prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author:Drawon
# Time:2020/11/6 16:01
# Version:python 3.7.6
import logging
import datetime
import numpy as np
import pandas as pd
from statsmodels.tsa.holtwinters import Holt
import warnings
warnings.filterwarnings('ignore')
def valueForecast(file):
"""
电费预测
:param data: 电量数据
格式为:用户 日期 使用电量值
:return: 预测电量值
"""
logging.debug('开始运行')
data = pd.read_excel(file)
if data.shape[0] == 0:
raise ValueError('相关性原始数据不存在')
data.iloc[:, 0] = data.iloc[:,0].astype(str)
users = set(data.iloc[:,0].values)
# 用电量预测
result_pre = pd.DataFrame(columns=['DATA_DATE', 'DATA_DATE1', 'DATA_DATE2', 'DATA_DATE3', 'DATA_DATE4', 'DATA_DATE5'])
for user in users:
subdata = data.loc[data.iloc[:,0]==user]
df_index =
|
pd.MultiIndex.from_frame(subdata.iloc[:, 1:2])
|
pandas.MultiIndex.from_frame
|
import opendp.smartnoise.core as sn
import random
import string
import numpy as np
from tests import (TEST_PUMS_PATH, TEST_PUMS_NAMES,
TEST_EDUC_PATH, TEST_EDUC_NAMES)
def generate_bools():
private_data = [[True, True], [True, False], [False, True], [False, False]]
dataset = sn.literal(value=private_data, value_public=False)
typed = sn.to_bool(dataset, true_label=True)
return sn.resize(typed, number_columns=2, categories=[True, False])
def generate_synthetic(var_type, n=10, rand_min=0, rand_max=10, cats_str=None, cats_num=None, variants=None):
cats_str = ['A', 'B', 'C', 'D'] if cats_str is None else cats_str
cats_num = [0, 1, 2, 3] if cats_num is None else cats_num
variants = ['Index', 'Random', 'Constant', 'Categories'] if variants is None else variants
data = []
names = []
for variant in variants:
if var_type == bool:
data.append(list({
'Index': (bool(i % 2) for i in range(n)),
'Random': (random.choice([True, False]) for _ in range(n)),
'Constant': (bool(1) for _ in range(n)),
'Categories': (bool(random.choice(cats_num)) for _ in range(n))
}[variant]))
names.append('B_' + variant)
if var_type == float:
data.append(list({
'Index': (float(i) for i in range(n)),
'Random': (rand_min + random.random() * (rand_max - rand_min) for _ in range(n)),
'Constant': (float(1) for _ in range(n)),
'Categories': (float(random.choice(cats_num)) for _ in range(n)),
}[variant]))
names.append('F_' + variant)
if var_type == int:
data.append(list({
'Index': range(n),
'Random': (random.randrange(rand_min, rand_max) for _ in range(n)),
'Constant': (1 for _ in range(n)),
'Categories': (random.choice(cats_num) for _ in range(n)),
}[variant]))
names.append('I_' + variant)
if var_type == str:
data.append(list({
'Index': (str(i) for i in range(n)),
'Random': (''.join([random.choice(string.ascii_letters + string.digits)
for n in range(2)]) for _ in range(n)),
'Constant': (str(1) for _ in range(n)),
'Categories': (random.choice(cats_str) for _ in range(n)),
}[variant]))
names.append('S_' + variant)
data = list(zip(*data))
dataset = sn.literal(value=data, value_public=False)
typed = sn.cast(dataset, atomic_type={
bool: 'bool', float: 'float', int: 'int', str: 'str'
}[var_type], true_label=True, lower=0, upper=10)
resized = sn.resize(typed, number_columns=len(variants), lower=0., upper=10.)
return sn.to_dataframe(resized, names=names)
def test_dp_covariance():
# establish data information
var_names = ["age", "sex", "educ", "race", "income", "married"]
with sn.Analysis() as analysis:
wn_data = sn.Dataset(path=TEST_PUMS_PATH, column_names=TEST_PUMS_NAMES)
# # get scalar covariance
age_income_cov_scalar = sn.dp_covariance(
left=sn.to_float(wn_data['age']),
right=sn.to_float(wn_data['income']),
privacy_usage={'epsilon': 5000},
left_lower=0.,
left_upper=100.,
left_rows=1000,
right_lower=0.,
right_upper=500_000.,
right_rows=1000)
data = sn.to_float(wn_data['age', 'income'])
# get full covariance matrix
age_income_cov_matrix = sn.dp_covariance(
data=data,
privacy_usage={'epsilon': 5000},
data_lower=[0., 0.],
data_upper=[100., 500_000.],
data_rows=1000)
# get cross-covariance matrix
cross_covar = sn.dp_covariance(
left=data,
right=data,
privacy_usage={'epsilon': 5000},
left_lower=[0., 0.],
left_upper=[100., 500_000.],
left_rows=1_000,
right_lower=[0., 0.],
right_upper=[100., 500_000.],
right_rows=1000)
analysis.release()
print('scalar covariance:\n{0}\n'.format(age_income_cov_scalar.value))
print('covariance matrix:\n{0}\n'.format(age_income_cov_matrix.value))
print('cross-covariance matrix:\n{0}'.format(cross_covar.value))
def test_dp_linear_regression():
with sn.Analysis():
wn_data = sn.Dataset(path=TEST_PUMS_PATH, column_names=TEST_PUMS_NAMES)
wn_data = sn.resize(
sn.to_float(wn_data[["age", "income"]]),
number_rows=1000,
lower=[0., 0.],
upper=[100., 500_000.])
dp_linear_regression = sn.dp_linear_regression(
data_x=sn.index(wn_data, indices=0),
data_y=sn.index(wn_data, indices=1),
privacy_usage={'epsilon': 10.},
lower_slope=0., upper_slope=1000.,
lower_intercept=0., upper_intercept=1000.
)
print(dp_linear_regression.value)
def test_divide():
with sn.Analysis():
data_A = generate_synthetic(float, variants=['Random'])
f_random = data_A['F_Random']
imputed = sn.impute(f_random, lower=0., upper=10.)
clamped_nonzero = sn.clamp(imputed, lower=1., upper=10.)
clamped_zero = sn.clamp(imputed, lower=0., upper=10.)
# test properties
assert f_random.nullity
assert not imputed.nullity
assert (2. / imputed).nullity
assert (f_random / imputed).nullity
assert (2. / clamped_zero).nullity
# TODO: fix these assertions in the validator- we should be able to get these tighter bounds
# assert not (2. / clamped_nonzero).nullity
# assert not (imputed / 2.).nullity
def test_dp_mean():
with sn.Analysis():
data = generate_synthetic(float, variants=['Random'])
mean = sn.dp_mean(
data['F_Random'],
privacy_usage={'epsilon': 0.1},
data_lower=0.,
data_upper=10.,
data_rows=10)
print("accuracy", mean.get_accuracy(0.05))
print(mean.from_accuracy(2.3, .05))
with sn.Analysis():
data = sn.Dataset(path=TEST_PUMS_PATH, column_names=TEST_PUMS_NAMES)
print(sn.dp_mean(sn.to_float(data['income']),
implementation="plug-in",
data_lower=0., data_upper=200_000.,
privacy_usage={"epsilon": 0.5}).value)
with sn.Analysis(protect_sensitivity=False, protect_floating_point=False):
data = sn.Dataset(path=TEST_PUMS_PATH, column_names=TEST_PUMS_NAMES)
mean = sn.mean(sn.to_float(data['income']), data_lower=0., data_upper=200_000., data_rows=1000)
print(sn.gaussian_mechanism(
mean,
sensitivity=[[0.0000001]],
privacy_usage={"epsilon": 0.5, 'delta': .000001}).value)
def test_dp_median():
with sn.Analysis():
data = generate_synthetic(float, variants=['Random'])
candidates = [-10., -2., 2., 3., 4., 7., 10., 12.]
median_scores = sn.median(
data['F_Random'],
candidates=candidates,
data_rows=10,
data_lower=0.,
data_upper=10.)
dp_median = sn.exponential_mechanism(median_scores, candidates=candidates, privacy_usage={"epsilon": 1.})
print(dp_median.value)
assert sn.dp_median(
data['F_Random'],
privacy_usage={"epsilon": 1.},
candidates=candidates,
data_lower=0.,
data_upper=10.).value is not None
def test_dp_median_raw():
with sn.Analysis() as analysis:
# create a literal data vector, and tag it as private
data = sn.Component.of([float(i) for i in range(20)], public=False)
dp_median = sn.dp_median(
sn.to_float(data),
privacy_usage={"epsilon": 1.},
candidates=[-10., -2., 2., 3., 4., 7., 10., 12.],
data_lower=0.,
data_upper=10.,
data_columns=1).value
print(dp_median)
# analysis.plot()
assert dp_median is not None
def test_median_education():
# import pandas as pd
# print(pd.read_csv(data_path)['value'].median())
with sn.Analysis(filter_level="all") as analysis:
data = sn.Dataset(path=TEST_EDUC_PATH, column_names=TEST_EDUC_NAMES)
candidates = list(map(float, range(1, 200, 2)))
median_scores = sn.median(
sn.impute(sn.to_float(data['value']), 100., 200.),
candidates=candidates)
# print(list(zip(candidates, median_scores.value[0])))
dp_median = sn.exponential_mechanism(
median_scores,
candidates=candidates,
privacy_usage={"epsilon": 100.})
print(dp_median.value)
analysis.release()
def test_equal():
with sn.Analysis(filter_level='all') as analysis:
data = generate_bools()
equality = sn.index(data, indices=0) == sn.index(data, indices=1)
analysis.release()
assert np.array_equal(equality.value, np.array([True, False, False, True]))
def test_partition():
with sn.Analysis(filter_level='all') as analysis:
data = generate_bools()
partitioned = sn.partition(data, num_partitions=3)
analysis.release()
# print(partitioned.value)
assert np.array_equal(partitioned.value[0], np.array([[True, True], [True, False]]))
assert np.array_equal(partitioned.value[1], np.array([[False, True]]))
assert np.array_equal(partitioned.value[2], np.array([[False, False]]))
def test_histogram():
# generate raw data
import numpy as np
import pandas as pd
import tempfile
import os
n = 1000
data = np.random.normal(loc=10, scale=25, size=n)
mean = np.mean(data)
sd = np.std(data)
data =
|
pd.DataFrame([(elem - mean) / sd for elem in data])
|
pandas.DataFrame
|
import pytest
from pandas import DataFrame
import pandas._testing as tm
class TestAssign:
def test_assign(self):
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
original = df.copy()
result = df.assign(C=df.B / df.A)
expected = df.copy()
expected["C"] = [4, 2.5, 2]
tm.assert_frame_equal(result, expected)
# lambda syntax
result = df.assign(C=lambda x: x.B / x.A)
tm.assert_frame_equal(result, expected)
# original is unmodified
tm.assert_frame_equal(df, original)
# Non-Series array-like
result = df.assign(C=[4, 2.5, 2])
tm.assert_frame_equal(result, expected)
# original is unmodified
tm.assert_frame_equal(df, original)
result = df.assign(B=df.B / df.A)
expected = expected.drop("B", axis=1).rename(columns={"C": "B"})
tm.assert_frame_equal(result, expected)
# overwrite
result = df.assign(A=df.A + df.B)
expected = df.copy()
expected["A"] = [5, 7, 9]
tm.assert_frame_equal(result, expected)
# lambda
result = df.assign(A=lambda x: x.A + x.B)
tm.assert_frame_equal(result, expected)
def test_assign_multiple(self):
df = DataFrame([[1, 4], [2, 5], [3, 6]], columns=["A", "B"])
result = df.assign(C=[7, 8, 9], D=df.A, E=lambda x: x.B)
expected = DataFrame(
[[1, 4, 7, 1, 4], [2, 5, 8, 2, 5], [3, 6, 9, 3, 6]], columns=list("ABCDE")
)
tm.assert_frame_equal(result, expected)
def test_assign_order(self):
# GH 9818
df = DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
result = df.assign(D=df.A + df.B, C=df.A - df.B)
expected = DataFrame([[1, 2, 3, -1], [3, 4, 7, -1]], columns=list("ABDC"))
tm.assert_frame_equal(result, expected)
result = df.assign(C=df.A - df.B, D=df.A + df.B)
expected = DataFrame([[1, 2, -1, 3], [3, 4, -1, 7]], columns=list("ABCD"))
|
tm.assert_frame_equal(result, expected)
|
pandas._testing.assert_frame_equal
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
import logging
import traceback
import six
logger = logging.getLogger(__name__)
try:
import pandas as pd
except ImportError:
_HAS_PANDAS = False
logger.info("Can't import pandas")
else:
_HAS_PANDAS = True
try:
from munch import munchify # noqa
except ImportError:
_HAS_MUNCH = False
logger.info("Can't import munch")
else:
_HAS_MUNCH = True
DATE_FORMATS = {1: "%Y:%m:%d-%H:%M:%S", 2: "%Y/%m/%d %H:%M:%S", 3: "%Y/%m/%d %H:%M:%S"}
def conv_resol(resolution):
"""Returns a string for resolution (from a Pandas)
"""
if _HAS_PANDAS:
from pandas.tseries.frequencies import to_offset
d = {
to_offset("1s"): "SECOND",
to_offset("1Min"): "MINUTE",
|
to_offset("2Min")
|
pandas.tseries.frequencies.to_offset
|
from io import StringIO
import pandas as pd
import numpy as np
import pytest
import bioframe
import bioframe.core.checks as checks
# import pyranges as pr
# def bioframe_to_pyranges(df):
# pydf = df.copy()
# pydf.rename(
# {"chrom": "Chromosome", "start": "Start", "end": "End"},
# axis="columns",
# inplace=True,
# )
# return pr.PyRanges(pydf)
# def pyranges_to_bioframe(pydf):
# df = pydf.df
# df.rename(
# {"Chromosome": "chrom", "Start": "start", "End": "end", "Count": "n_intervals"},
# axis="columns",
# inplace=True,
# )
# return df
# def pyranges_overlap_to_bioframe(pydf):
# ## convert the df output by pyranges join into a bioframe-compatible format
# df = pydf.df.copy()
# df.rename(
# {
# "Chromosome": "chrom_1",
# "Start": "start_1",
# "End": "end_1",
# "Start_b": "start_2",
# "End_b": "end_2",
# },
# axis="columns",
# inplace=True,
# )
# df["chrom_1"] = df["chrom_1"].values.astype("object") # to remove categories
# df["chrom_2"] = df["chrom_1"].values
# return df
chroms = ["chr12", "chrX"]
def mock_bioframe(num_entries=100):
pos = np.random.randint(1, 1e7, size=(num_entries, 2))
df = pd.DataFrame()
df["chrom"] = np.random.choice(chroms, num_entries)
df["start"] = np.min(pos, axis=1)
df["end"] = np.max(pos, axis=1)
df.sort_values(["chrom", "start"], inplace=True)
return df
############# tests #####################
def test_select():
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 5], ["chrX", 1, 5]],
columns=["chrom", "start", "end"],
)
region1 = "chr1:4-10"
df_result = pd.DataFrame([["chr1", 4, 5]], columns=["chrom", "start", "end"])
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
region1 = "chrX"
df_result = pd.DataFrame(
[["chrX", 3, 8], ["chrX", 1, 5]], columns=["chrom", "start", "end"]
)
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
region1 = "chrX:4-6"
df_result = pd.DataFrame(
[["chrX", 3, 8], ["chrX", 1, 5]], columns=["chrom", "start", "end"]
)
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
### select with non-standard column names
region1 = "chrX:4-6"
new_names = ["chr", "chrstart", "chrend"]
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 5], ["chrX", 1, 5]],
columns=new_names,
)
df_result = pd.DataFrame(
[["chrX", 3, 8], ["chrX", 1, 5]],
columns=new_names,
)
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1, cols=new_names).reset_index(drop=True)
)
region1 = "chrX"
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1, cols=new_names).reset_index(drop=True)
)
### select from a DataFrame with NaNs
colnames = ["chrom", "start", "end", "view_region"]
df = pd.DataFrame(
[
["chr1", -6, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=colnames,
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_result = pd.DataFrame(
[["chr1", -6, 12, "chr1p"]],
columns=colnames,
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
region1 = "chr1:0-1"
pd.testing.assert_frame_equal(
df_result, bioframe.select(df, region1).reset_index(drop=True)
)
def test_trim():
### trim with view_df
view_df = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 13, 26, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=["chrom", "start", "end", "name"],
)
df = pd.DataFrame(
[
["chr1", -6, 12, "chr1p"],
["chr1", 0, 12, "chr1p"],
["chr1", 32, 36, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=["chrom", "start", "end", "view_region"],
)
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 0, 12, "chr1p"],
["chr1", 26, 26, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=["chrom", "start", "end", "view_region"],
)
with pytest.raises(ValueError):
bioframe.trim(df, view_df=view_df)
# df_view_col already exists, so need to specify it:
pd.testing.assert_frame_equal(
df_trimmed, bioframe.trim(df, view_df=view_df, df_view_col="view_region")
)
### trim with view_df interpreted from dictionary for chromsizes
chromsizes = {"chr1": 20, "chrX_0": 5}
df = pd.DataFrame(
[
["chr1", 0, 12],
["chr1", 13, 26],
["chrX_0", 1, 8],
],
columns=["chrom", "startFunky", "end"],
)
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12],
["chr1", 13, 20],
["chrX_0", 1, 5],
],
columns=["chrom", "startFunky", "end"],
).astype({"startFunky": pd.Int64Dtype(), "end": pd.Int64Dtype()})
pd.testing.assert_frame_equal(
df_trimmed,
bioframe.trim(
df,
view_df=chromsizes,
cols=["chrom", "startFunky", "end"],
return_view_columns=False,
),
)
### trim with default limits=None and negative values
df = pd.DataFrame(
[
["chr1", -4, 12],
["chr1", 13, 26],
["chrX", -5, -1],
],
columns=["chrom", "start", "end"],
)
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12],
["chr1", 13, 26],
["chrX", 0, 0],
],
columns=["chrom", "start", "end"],
)
pd.testing.assert_frame_equal(df_trimmed, bioframe.trim(df))
### trim when there are NaN intervals
df = pd.DataFrame(
[
["chr1", -4, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, "chr1q"],
["chrX", -5, -1, "chrX_0"],
],
columns=["chrom", "start", "end", "region"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, "chr1q"],
["chrX", 0, 0, "chrX_0"],
],
columns=["chrom", "start", "end", "region"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
pd.testing.assert_frame_equal(df_trimmed, bioframe.trim(df))
### trim with view_df and NA intervals
view_df = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 13, 26, "chr1q"],
["chrX", 1, 12, "chrX_0"],
],
columns=["chrom", "start", "end", "name"],
)
df = pd.DataFrame(
[
["chr1", -6, 12],
["chr1", 0, 12],
[pd.NA, pd.NA, pd.NA],
["chrX", 1, 20],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 0, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, pd.NA],
["chrX", 1, 12, "chrX_0"],
],
columns=["chrom", "start", "end", "view_region"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
# infer df_view_col with assign_view and ignore NAs
pd.testing.assert_frame_equal(
df_trimmed,
bioframe.trim(df, view_df=view_df, df_view_col=None, return_view_columns=True)[
["chrom", "start", "end", "view_region"]
],
)
def test_expand():
d = """chrom start end
0 chr1 1 5
1 chr1 50 55
2 chr2 100 200"""
fake_bioframe = pd.read_csv(StringIO(d), sep=r"\s+")
expand_bp = 10
fake_expanded = bioframe.expand(fake_bioframe, expand_bp)
d = """chrom start end
0 chr1 -9 15
1 chr1 40 65
2 chr2 90 210"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
# expand with negative pad
expand_bp = -10
fake_expanded = bioframe.expand(fake_bioframe, expand_bp)
d = """chrom start end
0 chr1 3 3
1 chr1 52 52
2 chr2 110 190"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
expand_bp = -10
fake_expanded = bioframe.expand(fake_bioframe, expand_bp, side="left")
d = """chrom start end
0 chr1 3 5
1 chr1 52 55
2 chr2 110 200"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
# expand with multiplicative pad
mult = 0
fake_expanded = bioframe.expand(fake_bioframe, pad=None, scale=mult)
d = """chrom start end
0 chr1 3 3
1 chr1 52 52
2 chr2 150 150"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
mult = 2.0
fake_expanded = bioframe.expand(fake_bioframe, pad=None, scale=mult)
d = """chrom start end
0 chr1 -1 7
1 chr1 48 58
2 chr2 50 250"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
# expand with NA and non-integer multiplicative pad
d = """chrom start end
0 chr1 1 5
1 NA NA NA
2 chr2 100 200"""
fake_bioframe = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}
)
mult = 1.10
fake_expanded = bioframe.expand(fake_bioframe, pad=None, scale=mult)
d = """chrom start end
0 chr1 1 5
1 NA NA NA
2 chr2 95 205"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}
)
pd.testing.assert_frame_equal(df, fake_expanded)
def test_overlap():
### test consistency of overlap(how='inner') with pyranges.join ###
### note does not test overlap_start or overlap_end columns of bioframe.overlap
df1 = mock_bioframe()
df2 = mock_bioframe()
assert df1.equals(df2) == False
# p1 = bioframe_to_pyranges(df1)
# p2 = bioframe_to_pyranges(df2)
# pp = pyranges_overlap_to_bioframe(p1.join(p2, how=None))[
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"]
# ]
# bb = bioframe.overlap(df1, df2, how="inner")[
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"]
# ]
# pp = pp.sort_values(
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"],
# ignore_index=True,
# )
# bb = bb.sort_values(
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"],
# ignore_index=True,
# )
# pd.testing.assert_frame_equal(bb, pp, check_dtype=False, check_exact=False)
# print("overlap elements agree")
### test overlap on= [] ###
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+", "cat"],
["chr1", 8, 12, "-", "cat"],
["chrX", 1, 8, "+", "cat"],
],
columns=["chrom1", "start", "end", "strand", "animal"],
)
df2 = pd.DataFrame(
[["chr1", 6, 10, "+", "dog"], ["chrX", 7, 10, "-", "dog"]],
columns=["chrom2", "start2", "end2", "strand", "animal"],
)
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_index=True,
return_input=False,
)
assert np.sum(pd.isna(b["index_"].values)) == 3
b = bioframe.overlap(
df1,
df2,
on=["strand"],
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_index=True,
return_input=False,
)
assert np.sum(pd.isna(b["index_"].values)) == 2
b = bioframe.overlap(
df1,
df2,
on=None,
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_index=True,
return_input=False,
)
assert np.sum(pd.isna(b["index_"].values)) == 0
### test overlap 'left', 'outer', and 'right'
b = bioframe.overlap(
df1,
df2,
on=None,
how="outer",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 3
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="outer",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 5
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="inner",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 0
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="right",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 2
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 3
### test keep_order and NA handling
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+"],
[pd.NA, pd.NA, pd.NA, "-"],
["chrX", 1, 8, "+"],
],
columns=["chrom", "start", "end", "strand"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df2 = pd.DataFrame(
[["chr1", 6, 10, "+"], [pd.NA, pd.NA, pd.NA, "-"], ["chrX", 7, 10, "-"]],
columns=["chrom2", "start2", "end2", "strand"],
).astype({"start2": pd.Int64Dtype(), "end2": pd.Int64Dtype()})
assert df1.equals(
bioframe.overlap(
df1, df2, how="left", keep_order=True, cols2=["chrom2", "start2", "end2"]
)[["chrom", "start", "end", "strand"]]
)
assert ~df1.equals(
bioframe.overlap(
df1, df2, how="left", keep_order=False, cols2=["chrom2", "start2", "end2"]
)[["chrom", "start", "end", "strand"]]
)
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+", pd.NA],
[pd.NA, pd.NA, pd.NA, "-", pd.NA],
["chrX", 1, 8, pd.NA, pd.NA],
],
columns=["chrom", "start", "end", "strand", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df2 = pd.DataFrame(
[["chr1", 6, 10, pd.NA, "tiger"]],
columns=["chrom2", "start2", "end2", "strand", "animal"],
).astype({"start2": pd.Int64Dtype(), "end2": pd.Int64Dtype()})
assert (
bioframe.overlap(
df1,
df2,
how="outer",
cols2=["chrom2", "start2", "end2"],
return_index=True,
keep_order=False,
).shape
== (3, 12)
)
### result of overlap should still have bedframe-like properties
overlap_df = bioframe.overlap(
df1,
df2,
how="outer",
cols2=["chrom2", "start2", "end2"],
return_index=True,
suffixes=("", ""),
)
assert checks.is_bedframe(
overlap_df[df1.columns],
)
assert checks.is_bedframe(
overlap_df[df2.columns], cols=["chrom2", "start2", "end2"]
)
overlap_df = bioframe.overlap(
df1,
df2,
how="innter",
cols2=["chrom2", "start2", "end2"],
return_index=True,
suffixes=("", ""),
)
assert checks.is_bedframe(
overlap_df[df1.columns],
)
assert checks.is_bedframe(
overlap_df[df2.columns], cols=["chrom2", "start2", "end2"]
)
# test keep_order incompatible if how!= 'left'
with pytest.raises(ValueError):
bioframe.overlap(
df1,
df2,
how="outer",
on=["animal"],
cols2=["chrom2", "start2", "end2"],
keep_order=True,
)
def test_cluster():
df1 = pd.DataFrame(
[
["chr1", 1, 5],
["chr1", 3, 8],
["chr1", 8, 10],
["chr1", 12, 14],
],
columns=["chrom", "start", "end"],
)
df_annotated = bioframe.cluster(df1)
assert (
df_annotated["cluster"].values == np.array([0, 0, 0, 1])
).all() # the last interval does not overlap the first three
df_annotated = bioframe.cluster(df1, min_dist=2)
assert (
df_annotated["cluster"].values == np.array([0, 0, 0, 0])
).all() # all intervals part of the same cluster
df_annotated = bioframe.cluster(df1, min_dist=None)
assert (
df_annotated["cluster"].values == np.array([0, 0, 1, 2])
).all() # adjacent intervals not clustered
df1.iloc[0, 0] = "chrX"
df_annotated = bioframe.cluster(df1)
assert (
df_annotated["cluster"].values == np.array([2, 0, 0, 1])
).all() # do not cluster intervals across chromosomes
# test consistency with pyranges (which automatically sorts df upon creation and uses 1-based indexing for clusters)
# assert (
# (bioframe_to_pyranges(df1).cluster(count=True).df["Cluster"].values - 1)
# == bioframe.cluster(df1.sort_values(["chrom", "start"]))["cluster"].values
# ).all()
# test on=[] argument
df1 = pd.DataFrame(
[
["chr1", 3, 8, "+", "cat", 5.5],
["chr1", 3, 8, "-", "dog", 6.5],
["chr1", 6, 10, "-", "cat", 6.5],
["chrX", 6, 10, "-", "cat", 6.5],
],
columns=["chrom", "start", "end", "strand", "animal", "location"],
)
assert (
bioframe.cluster(df1, on=["animal"])["cluster"].values == np.array([0, 1, 0, 2])
).all()
assert (
bioframe.cluster(df1, on=["strand"])["cluster"].values == np.array([0, 1, 1, 2])
).all()
assert (
bioframe.cluster(df1, on=["location", "animal"])["cluster"].values
== np.array([0, 2, 1, 3])
).all()
### test cluster with NAs
df1 = pd.DataFrame(
[
["chrX", 1, 8, pd.NA, pd.NA],
[pd.NA, pd.NA, pd.NA, "-", pd.NA],
["chr1", 8, 12, "+", pd.NA],
["chr1", 1, 8, np.nan, pd.NA],
[pd.NA, np.nan, pd.NA, "-", pd.NA],
],
columns=["chrom", "start", "end", "strand", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
assert bioframe.cluster(df1)["cluster"].max() == 3
assert bioframe.cluster(df1, on=["strand"])["cluster"].max() == 4
pd.testing.assert_frame_equal(df1, bioframe.cluster(df1)[df1.columns])
assert checks.is_bedframe(
bioframe.cluster(df1, on=["strand"]),
cols=["chrom", "cluster_start", "cluster_end"],
)
assert checks.is_bedframe(
bioframe.cluster(df1), cols=["chrom", "cluster_start", "cluster_end"]
)
assert checks.is_bedframe(bioframe.cluster(df1))
def test_merge():
df1 = pd.DataFrame(
[
["chr1", 1, 5],
["chr1", 3, 8],
["chr1", 8, 10],
["chr1", 12, 14],
],
columns=["chrom", "start", "end"],
)
# the last interval does not overlap the first three with default min_dist=0
assert (bioframe.merge(df1)["n_intervals"].values == np.array([3, 1])).all()
# adjacent intervals are not clustered with min_dist=none
assert (
bioframe.merge(df1, min_dist=None)["n_intervals"].values == np.array([2, 1, 1])
).all()
# all intervals part of one cluster
assert (
bioframe.merge(df1, min_dist=2)["n_intervals"].values == np.array([4])
).all()
df1.iloc[0, 0] = "chrX"
assert (
bioframe.merge(df1, min_dist=None)["n_intervals"].values
== np.array([1, 1, 1, 1])
).all()
assert (
bioframe.merge(df1, min_dist=0)["n_intervals"].values == np.array([2, 1, 1])
).all()
# total number of intervals should equal length of original dataframe
mock_df = mock_bioframe()
assert np.sum(bioframe.merge(mock_df, min_dist=0)["n_intervals"].values) == len(
mock_df
)
# # test consistency with pyranges
# pd.testing.assert_frame_equal(
# pyranges_to_bioframe(bioframe_to_pyranges(df1).merge(count=True)),
# bioframe.merge(df1),
# check_dtype=False,
# check_exact=False,
# )
# test on=['chrom',...] argument
df1 = pd.DataFrame(
[
["chr1", 3, 8, "+", "cat", 5.5],
["chr1", 3, 8, "-", "dog", 6.5],
["chr1", 6, 10, "-", "cat", 6.5],
["chrX", 6, 10, "-", "cat", 6.5],
],
columns=["chrom", "start", "end", "strand", "animal", "location"],
)
assert len(bioframe.merge(df1, on=None)) == 2
assert len(bioframe.merge(df1, on=["strand"])) == 3
assert len(bioframe.merge(df1, on=["strand", "location"])) == 3
assert len(bioframe.merge(df1, on=["strand", "location", "animal"])) == 4
d = """ chrom start end animal n_intervals
0 chr1 3 10 cat 2
1 chr1 3 8 dog 1
2 chrX 6 10 cat 1"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(
df,
bioframe.merge(df1, on=["animal"]),
check_dtype=False,
)
# merge with repeated indices
df = pd.DataFrame(
{"chrom": ["chr1", "chr2"], "start": [100, 400], "end": [110, 410]}
)
df.index = [0, 0]
pd.testing.assert_frame_equal(
df.reset_index(drop=True), bioframe.merge(df)[["chrom", "start", "end"]]
)
# test merge with NAs
df1 = pd.DataFrame(
[
["chrX", 1, 8, pd.NA, pd.NA],
[pd.NA, pd.NA, pd.NA, "-", pd.NA],
["chr1", 8, 12, "+", pd.NA],
["chr1", 1, 8, np.nan, pd.NA],
[pd.NA, np.nan, pd.NA, "-", pd.NA],
],
columns=["chrom", "start", "end", "strand", "animal"],
).astype({"start":
|
pd.Int64Dtype()
|
pandas.Int64Dtype
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functionally annotate a fasta file.
Write functional predictions as a TSV with columns
- sequence_name (string)
- predicted_label (string)
- confidence (float); number between 0 and 1. An estimate of the model's
confidence that the label is true.
- label_description (string); a human-readable label description.
"""
import decimal
import io
import logging
import os
from typing import Dict, List, Text, Tuple
from absl import app
from absl import flags
from Bio.SeqIO import FastaIO
import numpy as np
import pandas as pd
import inference
import utils
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # TF c++ logging set to ERROR
import tensorflow.compat.v1 as tf # pylint: disable=g-import-not-at-top
import tqdm
_logger = logging.getLogger('proteinfer')
FLAGS = flags.FLAGS
flags.DEFINE_string('i', None, 'Input fasta file path.')
flags.DEFINE_string('o', None, 'Output write path.')
flags.DEFINE_integer(
'num_ensemble_elements', 1,
'In order to run with more than one ensemble element, you will need to run '
'install_models.py --install_ensemble=true. '
'More ensemble elements takes more time, but tends to be more accurate. '
'Run-time scales linearly with the number of ensemble elements. '
'Maximum value of this flag is {}.'.format(
utils.MAX_NUM_ENSEMBLE_ELS_FOR_INFERENCE))
flags.DEFINE_float(
'reporting_threshold',
.5,
'Number between 0 (exclusive) and 1 (inclusive). Predicted labels with '
'confidence at least resporting_threshold will be included in the output.',
lower_bound=0.,
upper_bound=1.)
flags.DEFINE_string('model_cache_path', 'cached_models',
'Path from which to use downloaded models and metadata.')
# A list of inferrers that all have the same label set.
_InferrerEnsemble = List[inference.Inferrer]
# (list_of_pfam_inferrers, list_of_ec_inferrers, list_of_go_inferrers)
_Models = Tuple[_InferrerEnsemble, _InferrerEnsemble, _InferrerEnsemble]
def _num_decimal_places(f):
"""Get the number of decimal places in a float."""
# https://stackoverflow.com/a/6190291/1445296
return abs(decimal.Decimal('{}'.format(f)).as_tuple().exponent)
def _gcs_path_to_relative_unzipped_path(p):
"""Parses GCS path, to gets the last part, and removes .tar.gz."""
return os.path.join(
os.path.basename(os.path.normpath(p)).replace('.tar.gz', ''))
def _get_inferrer_paths(model_urls,
model_cache_path):
"""Convert list of model GCS urls to a list of locally cached paths."""
return [
os.path.join(model_cache_path, _gcs_path_to_relative_unzipped_path(p))
for p in model_urls
]
def load_models(model_cache_path, num_ensemble_elements):
"""Load models from cache path into inferrerLists.
Args:
model_cache_path: path that contains downloaded SavedModels and associated
metadata. Same path that was used when installing the models via
install_models.
num_ensemble_elements: number of ensemble elements of each type to load.
Returns:
(list_of_pfam_inferrers, list_of_ec_inferrers, list_of_go_inferrers)
Raises:
ValueError if the models were not found. The exception message describes
that install_models.py needs to be rerun.
"""
try:
pfam_inferrer_paths = _get_inferrer_paths(utils.OSS_PFAM_ZIPPED_MODELS_URLS,
model_cache_path)
ec_inferrer_paths = _get_inferrer_paths(utils.OSS_EC_ZIPPED_MODELS_URLS,
model_cache_path)
go_inferrer_paths = _get_inferrer_paths(utils.OSS_GO_ZIPPED_MODELS_URLS,
model_cache_path)
to_return = []
inferrer_list_paths_for_all_models = [
pfam_inferrer_paths, ec_inferrer_paths, go_inferrer_paths
]
pbar = tqdm.tqdm(
desc='Loading models',
position=0,
total=len(inferrer_list_paths_for_all_models) * num_ensemble_elements,
leave=True,
dynamic_ncols=True)
for inferrer_list_paths in inferrer_list_paths_for_all_models:
inner_itr = inferrer_list_paths[:num_ensemble_elements]
inferrer_list = []
for p in inner_itr:
inferrer_list.append(inference.Inferrer(p, use_tqdm=True))
pbar.update()
to_return.append(inferrer_list)
pfam_inferrers = to_return[0]
ec_inferrers = to_return[1]
go_inferrers = to_return[2]
return pfam_inferrers, ec_inferrers, go_inferrers
except tf.errors.NotFoundError as exc:
err_msg = 'Unable to find cached models in {}.'.format(model_cache_path)
if num_ensemble_elements > 1:
err_msg += (
' Make sure you have installed the entire ensemble of models by '
'running\n install_models.py --install_ensemble '
'--model_cache_path={}'.format(model_cache_path))
else:
err_msg += (
' Make sure you have installed the models by running\n '
'install_models.py --model_cache_path={}'.format(model_cache_path))
err_msg += '\nThen try rerunning this script.'
raise ValueError(err_msg, exc)
def _assert_fasta_parsable(input_text):
with io.StringIO(initial_value=input_text) as f:
fasta_itr = FastaIO.FastaIterator(f)
end_iteration_sentinel = object()
# Avoid parsing the entire FASTA contents by using `next`.
# A malformed FASTA file will have no entries in its FastaIterator.
# This is unfortunate (instead of it throwing an error).
if next(fasta_itr, end_iteration_sentinel) is end_iteration_sentinel:
raise ValueError('Failed to parse any input from fasta file. '
'Consider checking the formatting of your fasta file. '
'First bit of contents from the fasta file was\n'
'{}'.format(input_text.splitlines()[:3]))
def parse_input_to_text(input_fasta_path):
"""Parses input fasta file.
Args:
input_fasta_path: path to FASTA file.
Returns:
Contents of file as a string.
Raises:
ValueError if parsing the FASTA file gives no records.
"""
_logger.info('Parsing input from %s', input_fasta_path)
with tf.io.gfile.GFile(input_fasta_path, 'r') as input_file:
input_text = input_file.read()
_assert_fasta_parsable(input_text=input_text)
return input_text
def input_text_to_df(input_text):
"""Converts fasta contents to a df with columns sequence_name and sequence."""
with io.StringIO(initial_value=input_text) as f:
fasta_records = list(FastaIO.FastaIterator(f))
fasta_df = pd.DataFrame([(f.name, str(f.seq)) for f in fasta_records],
columns=['sequence_name', 'sequence'])
return fasta_df
def perform_inference(input_df, models,
reporting_threshold):
"""Perform inference for Pfam, EC, and GO using given models.
Args:
input_df: pd.DataFrame with columns sequence_name (str) and sequence (str).
models: (list_of_pfam_inferrers, list_of_ec_inferrers,
list_of_go_inferrers).
reporting_threshold: report labels with mean confidence across ensemble
elements that exceeds this threshold.
Returns:
pd.DataFrame with columns sequence_name (str), label (str), confidence
(float).
"""
predictions = []
for inferrer_list in tqdm.tqdm(
models, position=1, desc='Progress', leave=True):
predictions.append(
inference.get_preds_at_or_above_threshold(input_df, inferrer_list,
reporting_threshold))
print('\n') # Because the tqdm bar is position 1, we need to print a newline.
predictions = pd.concat(predictions)
return predictions
def _sort_df_multiple_columns(df, key):
"""Sort df based on callable key.
Args:
df: pd.DataFrame.
key: function from rows of df (namedtuples) to tuple. This is used in the
builtin `sorted` method as the key.
Returns:
A sorted copy of df.
"""
# Unpack into list to take advantage of builtin sorted function.
# Note that pd.DataFrame.sort_values will not work because sort_values'
# sorting function is applied to each column at a time, whereas we need to
# consider multiple fields at once.
df_rows_sorted = sorted(df.itertuples(index=False), key=key)
return
|
pd.DataFrame(df_rows_sorted, columns=df.columns)
|
pandas.DataFrame
|
import networkx as nx
import os
import osmnx as ox
import pandas as pd
import pickle
import numpy as np
import random
import statistics
from pathlib import Path
from instance_class import Instance
from output_files import JsonConverter
from shapely.geometry import Polygon
from descartes.patch import PolygonPatch
from shapely.geometry import Point
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
plt.style.use('ggplot')
from matplotlib.colors import LogNorm
from fitter import Fitter, get_common_distributions, get_distributions
from multiprocessing import cpu_count
import ray
import gc
@ray.remote
def compute_distances(network, idx, origin, destination):
dist = network._return_estimated_distance_drive(origin, destination)
print(idx)
tuple_re = (idx, dist)
return tuple_re
def geographic_dispersion(inst, inst1, problem, filename1):
if problem == 'DARP':
#mu
#average travel time between origin and destinations
dtt = []
for idx, req in inst1.iterrows():
dtt.append(req['direct_travel_time'])
mudarp = sum(dtt) / len(dtt)
mu2 = inst1['direct_travel_time'].mean()
#average travel time between x nearest neighbors
#nyc -> compute for the 5 nearest zones
earliest_departure = 'earliest_departure'
#latest_arrival = 'do_time_sec'
time_gap = 600
#node_origin =
#node_destination =
osmid_origin = 'originnode_drive'
osmid_destination = 'destinationnode_drive'
speed = 7.22 #26kmh
sumnn = 0
for idx1, row1 in inst1.iterrows():
ltro = []
ltrd = []
for idx2, row2 in inst1.iterrows():
if idx2 != idx1:
latest_arrival1 = row1[earliest_departure] + row1['direct_travel_time']
latest_arrival2 = row2[earliest_departure] + row2['direct_travel_time']
#print(row2['earliest_departure'])
if (row2[earliest_departure] >= row1[earliest_departure] - time_gap) and (row2[earliest_departure] <= row1[earliest_departure] + time_gap):
#if (row2['originnode_drive'] != row1['originnode_drive']) and (row2['originnode_drive'] != row1['destinationnode_drive']):
#ltro.append(row2['originnode_drive'])
#origin_point = (row2['Pickup_Centroid_Latitude'], row2['Pickup_Centroid_Longitude'])
node_origin = row2[osmid_origin]
ltro.append(node_origin)
if (latest_arrival2 >= row1[earliest_departure] - time_gap) and (latest_arrival2 <= row1[earliest_departure] + time_gap):
#if (row2['destinationnode_drive'] != row1['originnode_drive']) and (row2['destinationnode_drive'] != row1['destinationnode_drive']):
#ltro.append(row2['destinationnode_drive'])
#destination_point = (row2['Dropoff_Centroid_Latitude'], row2['Dropoff_Centroid_Longitude'])
node_destination = row2[osmid_destination]
ltro.append(node_destination)
if (latest_arrival2 >= latest_arrival1 - time_gap) and (latest_arrival2 <= latest_arrival1 + time_gap):
#if (row2['destinationnode_drive'] != row1['originnode_drive']) and (row2['destinationnode_drive'] != row1['destinationnode_drive']):
#ltrd.append(row2['destinationnode_drive'])
#destination_point = (row2['Dropoff_Centroid_Latitude'], row2['Dropoff_Centroid_Longitude'])
node_destination = row2[osmid_destination]
ltro.append(node_destination)
if (row2[earliest_departure] >= latest_arrival1 - time_gap) and (row2[earliest_departure] <= latest_arrival1 + time_gap):
#if (row2['originnode_drive'] != row1['originnode_drive']) and (row2['originnode_drive'] != row1['destinationnode_drive']):
#ltrd.append(row2['originnode_drive'])
#origin_point = (row2['Pickup_Centroid_Latitude'], row2['Pickup_Centroid_Longitude'])
node_origin = row2[osmid_origin]
ltro.append(node_origin)
#ltro = list(dict.fromkeys(ltro))
#ltrd = list(dict.fromkeys(ltrd))
#print(ltro)
#print(ltrd)
ltrot = []
ltrdt = []
#org_row1 = int(row1['originnode_drive'])
#origin_point = (row1['Pickup_Centroid_Latitude'], row1['Pickup_Centroid_Longitude'])
#org_row1 = ox.get_nearest_node(inst.network.G_drive, origin_point)
org_row1 = row1[osmid_origin]
for x in ltro:
#tuplx = (x, inst.network._return_estimated_travel_time_drive(int(org_row1), int(x)))
dist = inst.network._return_estimated_distance_drive(int(org_row1), int(x))
tt = dist/speed
tuplx = (x, tt)
ltrot.append(tuplx)
#dest_row1 = int(row1['destinationnode_drive'])
#destination_point = (row1['Dropoff_Centroid_Latitude'], row1['Dropoff_Centroid_Longitude'])
#dest_row1 = ox.get_nearest_node(inst.network.G_drive, destination_point)
dest_row1 = row1[osmid_destination]
for y in ltrd:
#tuply = (y, inst.network._return_estimated_travel_time_drive(int(dest_row1), int(y)))
dist = inst.network._return_estimated_distance_drive(int(dest_row1), int(y))
tt = dist/speed
tuply = (y, tt)
ltrdt.append(tuply)
#ordenar as tuplas
ltrot.sort(key = lambda x: x[1])
ltrdt.sort(key = lambda x: x[1])
#pegar a media das 5 primeiras
n_neig = 5
avgo = 0
for i in range(min(n_neig, len(ltrot))):
avgo += ltrot[i][1]
if len(ltrot) > 0:
avgo = avgo/min(n_neig, len(ltrot))
avgd = 0
for j in range(min(n_neig, len(ltrdt))):
avgd += ltrdt[j][1]
#adicionar numa variavel de soma
if len(ltrdt) > 0:
avgd = avgd/min(n_neig, len(ltrdt))
#print(avgo, avgd)
#print(avgd)
sumnn += avgo + avgd
omegadarp = sumnn/(len(inst1)*2)
#ttm1['mean'] = ttm1.mean(axis=1)
#varchi = 0.7
#omega = ttm1['mean'].mean()
print(mudarp)
print(omegadarp)
gd = mudarp + omegadarp
print(gd)
else:
ttm_file_inst1 = 'travel_time_matrix_'+filename1
ttmfilename1 = os.fsdecode(ttm_file_inst1)
ttm1 = pd.read_csv(ttm_directory+'/'+ttmfilename1)
ttm1.set_index(['osmid_origin'], inplace=True)
earliest_departure = 'earliest_departure'
latest_arrival = 'latest_arrival'
time_gap = 600
#node_origin =
#node_destination =
osmid_origin = 'originnode_drive'
osmid_destination = 'destinationnode_drive'
speed = 7.22 #26kmh
stops_orgn = 'stops_orgn'
stops_dest = 'stops_dest'
stations = []
ovrsumm = 0
for idx, row in inst1.iterrows():
orgn = row[stops_orgn].strip('][').split(', ')
dest = row[stops_dest].strip('][').split(', ')
orgn = [int(i) for i in orgn]
dest = [int(i) for i in dest]
summ = 0
count = 0
for so in orgn:
for sd in dest:
if so != sd:
summ += ttm1.loc[so, str(sd)]
count += 1
ovrsumm += summ/count
muodbrp = ovrsumm/(len(inst1))
sumnn = 0
for idx1, row1 in inst1.iterrows():
ltro = []
ltrd = []
for idx2, row2 in inst1.iterrows():
if idx2 != idx1:
if (row2[earliest_departure] >= row1[earliest_departure] - time_gap) and (row2[earliest_departure] <= row1[earliest_departure] + time_gap):
stps = row2[stops_orgn].strip('][').split(', ')
ltro.extend(stps)
if (row2[latest_arrival] >= row1[earliest_departure] - time_gap) and (row2[latest_arrival] <= row1[earliest_departure] + time_gap):
stps = row2[stops_dest].strip('][').split(', ')
ltro.extend(stps)
if (row2[latest_arrival] >= row1[latest_arrival] - time_gap) and (row2[latest_arrival] <= row1[latest_arrival] + time_gap):
stps = row2[stops_dest].strip('][').split(', ')
ltrd.extend(stps)
if (row2[earliest_departure] >= row1[latest_arrival] - time_gap) and (row2[earliest_departure] <= row1[latest_arrival] + time_gap):
stps = row2[stops_orgn].strip('][').split(', ')
ltrd.extend(stps)
ltro = list(dict.fromkeys(ltro))
ltrd = list(dict.fromkeys(ltrd))
ltrot = []
ltrdt = []
org_stps = row1[stops_orgn].strip('][').split(', ')
org_stps = [int(i) for i in org_stps]
ltro = [int(i) for i in ltro if int(i) not in org_stps]
for s in org_stps:
for x in ltro:
tuplx = (x, ttm1.loc[int(s), str(x)])
ltrot.append(tuplx)
dest_stps = row1[stops_dest].strip('][').split(', ')
dest_stps = [int(i) for i in dest_stps]
ltrd = [int(i) for i in ltrd if int(i) not in dest_stps]
for s in dest_stps:
for y in ltrd:
tuply = (y, ttm1.loc[int(s), str(y)])
ltrdt.append(tuply)
#sort tuples
ltrot.sort(key = lambda x: x[1])
ltrdt.sort(key = lambda x: x[1])
#avg 5 first
n_neig = 5
avgo = 0
for i in range(min(n_neig, len(ltrot))):
avgo += ltrot[i][1]
if len(ltrot) > 0:
avgo = avgo/min(n_neig, len(ltrot))
avgd = 0
for j in range(min(n_neig, len(ltrdt))):
avgd += ltrdt[j][1]
if len(ltrdt) > 0:
avgd = avgd/min(n_neig, len(ltrdt))
sumnn += avgo + avgd
omegaodbrp = sumnn/(len(inst1)*2)
print(muodbrp)
print(omegaodbrp)
gd = muodbrp + omegaodbrp
print(gd)
def similarity(inst, inst1, inst2):
thtt = 360
thts = 60
the = 60
speed = 7.22 #26kmh
#columns for computation
earliest_departure = 'earliest_departure'
osmid_origin = 'originnode_drive'
osmid_destination = 'destinationnode_drive'
number_reqs = len(inst1)
G = nx.Graph()
for i in range(number_reqs*2):
G.add_node(int(i))
#top_nodes = [i for i in range(number_reqs)]
#bottom_nodes = [i+500 for i in range(number_reqs)]
for id1, req1 in inst1.iterrows():
#o1 = req1['originnode_drive']
#d1 = req1['destinationnode_drive']
#origin_point = (req1['Pickup_Centroid_Latitude'], req1['Pickup_Centroid_Longitude'])
#o1 = ox.get_nearest_node(inst.network.G_drive, origin_point)
#destination_point = (req1['Dropoff_Centroid_Latitude'], req1['Dropoff_Centroid_Longitude'])
#d1 = ox.get_nearest_node(inst.network.G_drive, destination_point)
o1 = req1[osmid_origin]
d1 = req1[osmid_destination]
for id2, req2 in inst2.iterrows():
#o2 = req2['originnode_drive']
#d2 = req2['destinationnode_drive']
#origin_point = (req2['Pickup_Centroid_Latitude'], req2['Pickup_Centroid_Longitude'])
#o2 = ox.get_nearest_node(inst.network.G_drive, origin_point)
#destination_point = (req2['Dropoff_Centroid_Latitude'], req2['Dropoff_Centroid_Longitude'])
#d2 = ox.get_nearest_node(inst.network.G_drive, destination_point)
o2 = req2[osmid_origin]
d2 = req2[osmid_destination]
#oott = inst.network._return_estimated_travel_time_drive(int(o1), int(o2))
#ddtt = inst.network._return_estimated_travel_time_drive(int(d1), int(d2))
#oott2 = inst.network._return_estimated_travel_time_drive(int(o2), int(o1))
#ddtt2 = inst.network._return_estimated_travel_time_drive(int(d2), int(d1))
oott = inst.network._return_estimated_distance_drive(int(o1), int(o2))
ddtt = inst.network._return_estimated_distance_drive(int(d1), int(d2))
oott2 = inst.network._return_estimated_distance_drive(int(o2), int(o1))
ddtt2 = inst.network._return_estimated_distance_drive(int(d2), int(d1))
oott = oott/speed
ddtt = ddtt/speed
oott2 = oott2/speed
ddtt2 = ddtt2/speed
phi = min(oott + ddtt, oott2 + ddtt2)
n1 = int(id1)
n2 = int(id2+number_reqs)
#print(n1, n2)
if phi < thtt:
#print("here")
tau = abs(req1['time_stamp'] - req2['time_stamp'])
eu1 = abs(req1[earliest_departure])
eu2 = abs(req2[earliest_departure])
vartheta = abs(eu1 - eu2)
#print(tau, vartheta)
if (vartheta < the):
G.add_edge(n1, n2, weight=100)
else:
if (tau < thts) or (vartheta < the):
#print("here")
G.add_edge(n1, n2, weight=75)
else:
#print("here")
G.add_edge(n1, n2, weight=50)
else:
G.add_edge(n1, n2, weight=0)
M = nx.max_weight_matching(G, weight='weight', maxcardinality=True)
#M = nx.bipartite.minimum_weight_full_matching(G, weight='weight')
si1i2 = 0
print(len(M))
#print(M)
count = 0
for e in M:
#print(e)
#print(e[0])
#print(e[1])
#print(e)
#print(e)
peso = G.edges[int(e[0]), int(e[1])]['weight']
#if peso > 1:
si1i2 += peso
count += 1
#print(si1i2)
#print(count)
si1i2 = si1i2/count
print(si1i2)
return si1i2
def dynamism(inst1, ed, ld):
time_stamp = 'time_stamp'
Te = abs(ld - ed)
inst1 = inst1.sort_values(time_stamp)
sorted_ts = inst1[time_stamp].tolist()
#sorted_ts = [i for i in sorted_ts if i != 0]
#exclude time stamp 0
DELTA = []
for ts in range(len(sorted_ts)-1):
DELTA.append(float(abs(sorted_ts[ts+1] - sorted_ts[ts])))
number_reqs = len(inst1)
theta = Te/len(sorted_ts)
SIGMA = []
for k in range(len(DELTA)):
if ((k == 0) and (DELTA[k] < theta)):
SIGMA.append(theta - DELTA[k])
else:
if ((k > 0) and (DELTA[k] < theta)):
SIGMA.append(theta - DELTA[k] + SIGMA[k-1]*((theta - DELTA[k])/theta))
else:
SIGMA.append(0)
#print(SIGMA)
lambdax = 0
for sk in SIGMA:
lambdax += sk
NEGSIGMA = []
for k in range(len(DELTA)):
if ((k > 0) and (DELTA[k] < theta)):
NEGSIGMA.append(theta + SIGMA[k-1]*((theta - DELTA[k])/theta))
else:
NEGSIGMA.append(theta)
#print(NEGSIGMA)
eta = 0
for nsk in NEGSIGMA:
eta += nsk
rho = 1 - (sum(SIGMA)/sum(NEGSIGMA))
print(rho)
def new_heatmap(inst, dfc):
df_og = dfc
df_de = dfc
pts_bhx = []
pts_bhy = []
pts_og = []
pts_ogx = []
pts_ogy = []
for idx, row in df_og.iterrows():
pt = (row['originx'], row['originy'])
pts_og.append(pt)
pts_ogx.append(row['originx'])
pts_ogy.append(row['originy'])
pts_bhx.append(row['originx'])
pts_bhy.append(row['originy'])
pts_de = []
pts_dex = []
pts_dey = []
for idx, row in df_de.iterrows():
pt = (row['destinationx'], row['destinationy'])
pts_de.append(pt)
pts_dex.append(row['destinationx'])
pts_dey.append(row['destinationy'])
pts_bhx.append(row['destinationx'])
pts_bhy.append(row['destinationy'])
minx, miny, maxx, maxy = inst.network.polygon.bounds
#hm = Heatmap(libpath="cHeatmap.cpython-38-x86_64-linux-gnu.so")
#img = hm.heatmap(pts_og, scheme='classic', dotsize=75, opacity=128, area=((minx, miny), (maxx, maxy)))
#img.save("heatmap_og.png")
#hm = Heatmap(libpath="cHeatmap.cpython-38-x86_64-linux-gnu.so")
#img = hm.heatmap(pts_de, scheme='classic', dotsize=75, opacity=128, area=((minx, miny), (maxx, maxy)))
#img.save("heatmap_de.png")
#print(' len points ', len(pts_ogx))
#plt.hist2d(pts_ogx,pts_ogy, bins=[np.arange(minx,maxx,5),np.arange(miny,maxy,5)])
#print(len(pts_ogx))
h = plt.hist2d(pts_ogx,pts_ogy, bins=25, norm=LogNorm(), cmap='jet')
plt.colorbar(h[3])
plt.xlabel('longitude')
plt.ylabel('latitude')
plt.savefig('heatmap_origin_syn.png')
plt.close()
#plt.hist2d(pts_dex,pts_dey, bins=[np.arange(minx,maxx,10),np.arange(miny,maxy,10)])
h = plt.hist2d(pts_dex,pts_dey, bins=25, norm=LogNorm(), cmap='jet')
plt.colorbar(h[3])
plt.xlabel('longitude')
plt.ylabel('latitude')
plt.savefig('heatmap_destination_syn.png')
plt.close()
h = plt.hist2d(pts_bhx,pts_bhy, bins=25, norm=LogNorm(), cmap='jet')
plt.colorbar(h[3])
plt.xlabel('longitude')
plt.ylabel('latitude')
plt.savefig('heatmap_both_syn.png')
plt.close()
#curr_folder = os.getcwd()
#fig, ax = ox.plot_graph(inst.network.G_drive,figsize=(8, 8), dpi=128, show=False, filepath='heatmap_origin_points.png', save=True)
#fig, ax = ox.plot_graph(inst.network.G_drive,figsize=(8, 8), dpi=128, show=False, filepath='heatmap_destination_points.png', save=True)
def new_heatmap_pois(inst, place_name):
output_folder_base = place_name
save_dir_csv = os.path.join(save_dir, 'csv')
path_pois = os.path.join(save_dir_csv, output_folder_base+'.pois.csv')
if os.path.isfile(path_pois):
print('is file POIs')
pois =
|
pd.read_csv(path_pois)
|
pandas.read_csv
|
import pandas as pd
import numpy as np
#
# TODO:
# Load up the dataset, setting correct header labels.
#
# .. your code here ..
df=pd.read_csv('D:/Python_Microscoft/DAT210x/Module2/Datasets/census.data',index_col=0, header=None)
df.head(5)
#
# TODO:
# Use basic pandas commands to look through the dataset... get a
# feel for it before proceeding! Do the data-types of each column
# reflect the values you see when you look through the data using
# a text editor / spread sheet program? If you see 'object' where
# you expect to see 'int32' / 'float64', that is a good indicator
# that there is probably a string or missing value in a column.
# use `your_data_frame['your_column'].unique()` to see the unique
# values of each column and identify the rogue values. If these
# should be represented as nans, you can convert them using
# na_values when loading the dataframe.
#
# .. your code here ..
df.columns=['education', 'age', 'capital-gain', 'race', 'capital-loss', 'hours-per-week', 'sex', 'classification']
df['capital-gain'].unique()
df2=df
df2['capital-gain']=pd.to_numeric(df2['capital-gain'], errors='coerce')
#
# TODO:
# Look through your data and identify any potential categorical
# features. Ensure you properly encode any ordinal and nominal
# types using the methods discussed in the chapter.
#
# Be careful! Some features can be represented as either categorical
# or continuous (numerical). If you ever get confused, think to yourself
# what makes more sense generally---to represent such features with a
# continuous numeric type... or a series of categories?
#
# .. your code here ..
df3=
|
pd.get_dummies(df2['classification'])
|
pandas.get_dummies
|
import matplotlib.pyplot as plt
import requests
import json
import pandas as pd
import numpy as np
# Округа Москвы
COLORS = ['y', 'b', 'r', 'g', 'c', 'm', 'lime', 'gold', 'orange', 'coral', 'purple', 'grey']
DISTRICT = {"Восточный административный округ": [55.787710, 37.775631],
"Западный административный округ": [55.728003, 37.443533],
"Зеленоградский административный округ": [55.987583, 37.194250],
"Новомосковский административный округ": [55.558121, 37.370724],
"Северный административный округ": [55.838384, 37.525765],
"Северо-Восточный административный округ": [55.863894, 37.620923],
"Северо-Западный административный округ": [55.829370, 37.451546],
"Троицкий административный округ": [55.355771, 37.146990],
"Центральный административный округ": [55.753995, 37.614069],
"Юго-Восточный административный округ": [55.692019, 37.754583],
"Юго-Западный административный округ": [55.662735, 37.576178],
"Южный административный округ": [55.610906, 37.681479]}
# Название округа
DISTRICT_NAME = ['ВАО', 'ЗАО', 'ЗелАО', 'Новомосковский АО', 'САО', 'СВАО', 'СЗАО', 'Троицкий АО', 'ЦАО', 'ЮВАО', 'ЮЗАО', 'ЮАО']
# POST запрос данных
def get_data(url, filename):
URL = url
client = requests.session()
client.get(URL)
res = requests.post(URL, headers=dict(Referer=URL))
with open(filename, 'w') as outfile:
json.dump(res.json(), outfile, ensure_ascii=False, separators=(',', ': '), indent=4, sort_keys=False)
class network_KH:
def __init__(self, values, centers):
self.values = np.array(values)
self.centers = np.array(centers)
self.weights = np.zeros((len(values), len(centers)))
def euclidDist(self, a, b):
return np.linalg.norm(a - b)
def find_weights(self):
for value_i in range(len(self.values)):
for center_i in range(len(self.centers)):
self.weights[value_i][center_i] = self.euclidDist(self.values[value_i], self.centers[center_i])
for value_i in range(len(self.values)):
min_index = self.weights[value_i].argmin()
self.weights[value_i][min_index] = 1
self.weights[value_i][0:min_index] = 0
self.weights[value_i][min_index + 1:] = 0
return self.weights
class ClusterAnalysis():
def __init__(self, data_lyceums):
self.read_json(data_lyceums)
_, self.ax = plt.subplots()
self.save_data('init.png')
# Считывание JSON
def read_json(self, data_lyceums):
json_data = open(data_lyceums).read()
data = json.loads(json_data)
lyceums_data = [data['features'][i]['geometry']['coordinates'] for i in
range(len(data['features']))]
dist_data = [data['features'][i]['properties']['Attributes']['okrug'] for i in
range(len(data['features']))]
name_data = [data['features'][i]['properties']['Attributes']['name'] for i in
range(len(data['features']))]
lyceums =
|
pd.DataFrame(lyceums_data, columns=['x', 'y'])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 11 12:01:40 2018
@author: suvod
"""
from main.git_log import git2repo
import pygit2
import re
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
import pandas as pd
from datetime import datetime
import re, unicodedata
import nltk.corpus
from nltk.stem import PorterStemmer,SnowballStemmer
from nltk.stem import WordNetLemmatizer
from pygit2 import GIT_SORT_TOPOLOGICAL, GIT_SORT_REVERSE
from main.utils import utils
import threading
from multiprocessing import Queue
from threading import Thread
import math
import os
from multiprocessing import Pool, cpu_count
import platform
from os.path import dirname as up
import sys
class ThreadWithReturnValue(Thread):
def __init__(self, group=None, target=None, name=None,
args=(), kwargs={}, Verbose=None):
Thread.__init__(self, group, target, name, args, kwargs)
self._return = None
def run(self):
#print(type(self._target))
if self._target is not None:
self._return = self._target(*self._args,
**self._kwargs)
def join(self, *args):
Thread.join(self, *args)
return self._return
class create_code_interaction_graph(object):
def __init__(self,repo_url,repo_name):
self.repo_url = repo_url
self.repo_name = repo_name
self.repo_obj = git2repo.git2repo(self.repo_url,self.repo_name)
self.repo = self.repo_obj.clone_repo()
if platform.system() == 'Darwin' or platform.system() == 'Linux':
self.repo_path = up(os.getcwd()) + '/temp_repo/' + repo_name
self.file_path = up(os.getcwd()) + '/data/' + repo_name + '_commit.pkl'
else:
self.repo_path = up(os.getcwd()) + '\\temp_repo\\' + repo_name
self.file_path = up(os.getcwd()) + '\\data\\' + repo_name + '_commit.pkl'
#self.commits = self.repo_obj.get_commit_objects()
self.commits = self.read_commits()
self.commit_df = pd.DataFrame(self.commits, columns = ['commit_object'])
#self.committed_files = self.repo_obj.get_committed_files()
self.diffs = self.get_diffs()
self.cores = cpu_count()
self.blame_count = 0
def read_commits(self):
df = pd.read_pickle(self.file_path)
df_commit_id = df.commit_number.values.tolist()
commits = []
for commit in df_commit_id:
obj = self.repo.get(commit)
if obj == None:
continue
commits.append(obj)
return commits
def get_diffs(self):
commmit_id = []
for i in self.commits:
commmit_id.append(i.id)
diffs = self.repo_obj.get_diffs(commmit_id)
return diffs
def get_bug_creators(self,diffs):
bug_creator = []
for value in diffs:
_diff_files = diffs[value]['files']
self.repo.head.set_target(diffs[value]['object'].parent_ids[0])
for _value in _diff_files:
try:
file_path = _diff_files[_value]['file_path']
#print("Get blame start in get_bug_creators")
self.blame_count += 1
blame = self.repo_obj.get_blame(file_path)
#print("Get blame end in get_bug_creators")
for _line in _diff_files[_value]['old_lines']:
if _line != -1:
ref = blame.for_line(_line)
bug_creator.append([ref.final_committer.name,diffs[value]['object'].committer.name ,ref.orig_commit_id, 1])
except Exception as e:
#print(file_path,e)
continue
bug_creator_df = pd.DataFrame(bug_creator, columns = ['committer1','committer2','commit','ob'])
return bug_creator_df
def create_adjacency_matrix(self):
threads = []
bug_creator_df = pd.DataFrame([], columns = ['committer1','committer2','commit','ob'])
#i = 0
keys = list(self.diffs.keys())
len_bd = len(self.diffs)
sub_list_len = len_bd/self.cores
keys_set = set(self.diffs.keys())
for i in range(self.cores):
sub_keys = keys[int(i*sub_list_len):int((i+1)*sub_list_len)]
subdict = {x: self.diffs[x] for x in sub_keys if x in keys_set}
t = ThreadWithReturnValue(target = self.get_bug_creators, args = [subdict])
threads.append(t)
for i in range(0,len(threads),self.cores):
print("Starting Thread group:",i)
_threads = threads[i:i+self.cores]
for th in _threads:
th.start()
for th in _threads:
response = th.join()
bug_creator_df =
|
pd.concat([bug_creator_df,response])
|
pandas.concat
|
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import types
from pandas import compat
from pandas.compat import u
from pandas.core.algorithms import factorize
from pandas.core.base import PandasObject, PandasDelegate
from pandas.core.index import Index, _ensure_index
from pandas.core.indexing import _is_null_slice
from pandas.tseries.period import PeriodIndex
import pandas.core.common as com
from pandas.core.common import isnull
from pandas.util.terminal import get_terminal_size
from pandas.core.config import get_option
from pandas.core import format as fmt
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and Categoricals can be
# seen as a custom type, but having different results depending whether a level are
# the same or not is kind of insane, so be a bit stricter here and use the python3 idea
# of comparing only things of equal type.
if not self.ordered:
if op in ['__lt__', '__gt__','__le__','__ge__']:
raise TypeError("Unordered Categoricals can only compare equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the levels are the same
if (len(self.levels) != len(other.levels)) or not ((self.levels == other.levels).all()):
raise TypeError("Categoricals can only be compared if 'levels' are the same")
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if 'ordered' is the same")
na_mask = (self._codes == -1) | (other._codes == -1)
f = getattr(self._codes, op)
ret = f(other._codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
elif np.isscalar(other):
if other in self.levels:
i = self.levels.get_loc(other)
return getattr(self._codes, op)(i)
else:
return np.repeat(False, len(self))
else:
msg = "Cannot compare a Categorical for op {op} with type {typ}. If you want to \n" \
"compare values, use 'np.asarray(cat) <op> other'."
raise TypeError(msg.format(op=op,typ=type(other)))
f.__name__ = op
return f
def _is_categorical(array):
""" return if we are a categorical possibility """
return isinstance(array, Categorical) or isinstance(array.dtype, com.CategoricalDtype)
def _maybe_to_categorical(array):
""" coerce to a categorical if a series is given """
if isinstance(array, com.ABCSeries):
return array.values
return array
_codes_doc = """The level codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the levels array.
There is not setter, used the other categorical methods and the item setter on
Categorical to change values in the categorical.
"""
_levels_doc = """The levels of this categorical.
Setting assigns new values to each level (effectively a rename of
each individual level).
The assigned value has to be a list-like object. If the number of
level-items is less than number of level-items in the current level,
all level-items at a higher position are set to NaN. If the number of
level-items is more that the current number of level-items, new
(unused) levels are added at the end.
To add level-items in between, use `reorder_levels`.
Raises
------
ValueError
If the new levels do not validate as levels
See also
--------
Categorical.reorder_levels
Categorical.remove_unused_levels
"""
class Categorical(PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`levels`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `levels` or `np.nan`.
Assigning values outside of `levels` will raise a `ValueError`. Order is
defined by the order of the `levels`, not lexical order of the values.
Parameters
----------
values : list-like
The values of the categorical. If levels are given, values not in levels will
be replaced with NaN.
levels : Index-like (unique), optional
The unique levels for this categorical. If not given, the levels are assumed
to be the unique values of values.
ordered : boolean, optional
Whether or not this categorical is treated as a ordered categorical. If not given,
the resulting categorical will be ordered if values can be sorted.
name : str, optional
Name for the Categorical variable. If name is None, will attempt
to infer from values.
compat : boolean, default=False
Whether to treat values as codes to the levels (old API, deprecated)
Attributes
----------
levels : Index
The levels of this categorical
codes : ndarray
The codes (integer positions, which point to the levels) of this categorical, read only
ordered : boolean
Whether or not this Categorical is ordered
name : string
The name of this Categorical
Raises
------
ValueError
If the levels do not validate
TypeError
If an explicit ``ordered=True`` is given but no `levels` and the `values` are not sortable
Examples
--------
>>> from pandas import Categorical
>>> Categorical([1, 2, 3, 1, 2, 3])
1
2
3
1
2
3
Levels (3): Int64Index([1, 2, 3], dtype=int64), ordered
>>> Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
a
b
c
a
b
c
Levels (3): Index(['a', 'b', 'c'], dtype=object), ordered
>>> a = Categorical(['a','b','c','a','b','c'], ['c', 'b', 'a'])
>>> a.min()
'c'
"""
ndim = 1
"""Number of dimensions (always 1!)"""
dtype = com.CategoricalDtype()
"""The dtype (always "category")"""
ordered = None
"""Whether or not this Categorical is ordered.
Only ordered `Categoricals` can be sorted (according to the order
of the levels) and have a min and max value.
See also
--------
Categorical.sort
Categorical.order
Categorical.min
Categorical.max
"""
# For comparisons, so that numpy uses our implementation if the compare ops, which raise
__array_priority__ = 1000
def __init__(self, values, levels=None, ordered=None, name=None, fastpath=False, compat=False):
if fastpath:
# fast path
self._codes = values
self.name = name
self.levels = levels
self.ordered = ordered
return
if name is None:
name = getattr(values, 'name', None)
# sanitize input
if com.is_categorical_dtype(values):
# we are either a Series or a Categorical
cat = values
if isinstance(values, com.ABCSeries):
cat = values.values
if levels is None:
levels = cat.levels
if ordered is None:
ordered = cat.ordered
values = values.__array__()
elif isinstance(values, Index):
pass
else:
# on numpy < 1.6 datetimelike get inferred to all i8 by _sanitize_array
# which is fine, but since factorize does this correctly no need here
# this is an issue because _sanitize_array also coerces np.nan to a string
# under certain versions of numpy as well
values = com._possibly_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# On list with NaNs, int values will be converted to float. Use "object" dtype
# to prevent this. In the end objects will be casted to int/... in the level
# assignment step.
dtype = 'object' if isnull(values).any() else None
values = _sanitize_array(values, None, dtype=dtype)
if levels is None:
try:
codes, levels = factorize(values, sort=True)
# If the underlying data structure was sortable, and the user doesn't want to
# "forget" this order, the categorical also is sorted/ordered
if ordered is None:
ordered = True
except TypeError:
codes, levels = factorize(values, sort=False)
if ordered:
# raise, as we don't have a sortable data structure and so the usershould
# give us one by specifying levels
raise TypeError("'values' is not ordered, please explicitly specify the level "
"order by passing in a level argument.")
else:
# there are two ways if levels are present
# the old one, where each value is a int pointer to the levels array
# the new one, where each value is also in the level array (or np.nan)
# make sure that we always have the same type here, no matter what we get passed in
levels = self._validate_levels(levels)
# There can be two ways: the old which passed in codes and levels directly
# and values have to be inferred and the new one, which passes in values and levels
# and _codes have to be inferred.
# min and max can be higher and lower if not all levels are in the values
if compat and (com.is_integer_dtype(values) and
(np.min(values) >= -1) and (np.max(values) < len(levels))):
warn("Using 'values' as codes is deprecated.\n"
"'Categorical(... , compat=True)' is only there for historical reasons and "
"should not be used in new code!\n"
"See https://github.com/pydata/pandas/pull/7217", FutureWarning)
codes = values
else:
codes = _get_codes_for_values(values, levels)
# if we got levels, we can assume that the order is intended
# if ordered is unspecified
if ordered is None:
ordered = True
self.ordered = False if ordered is None else ordered
self._codes = codes
self.levels = levels
self.name = name
def copy(self):
""" Copy constructor. """
return Categorical(values=self._codes.copy(),levels=self.levels,
name=self.name, ordered=self.ordered, fastpath=True)
@classmethod
def from_array(cls, data):
"""
Make a Categorical type from a single array-like object.
Parameters
----------
data : array-like
Can be an Index or array-like. The levels are assumed to be
the unique values of `data`.
"""
return Categorical(data)
@classmethod
def from_codes(cls, codes, levels, ordered=False, name=None):
"""
Make a Categorical type from codes and levels arrays.
This constructor is useful if you already have codes and levels and so do not need the
(computation intensive) factorization step, which is usually done on the constructor.
If your data does not follow this convention, please use the normal constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a level in levels or -1 for NaN
levels : index-like
The levels for the categorical. Items need to be unique.
ordered : boolean, optional
Whether or not this categorical is treated as a ordered categorical. If not given,
the resulting categorical will be unordered.
name : str, optional
Name for the Categorical variable.
"""
try:
codes = np.asarray(codes, np.int64)
except:
raise ValueError("codes need to be convertible to an arrays of integers")
levels = cls._validate_levels(levels)
if codes.max() >= len(levels) or codes.min() < -1:
raise ValueError("codes need to be between -1 and len(levels)-1")
return Categorical(codes, levels=levels, ordered=ordered, name=name, fastpath=True)
_codes = None
def _get_codes(self):
""" Get the level codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _get_labels(self):
""" Get the level labels (deprecated).
Deprecated, use .codes!
"""
import warnings
warnings.warn("'labels' is deprecated. Use 'codes' instead", FutureWarning)
return self.codes
labels = property(fget=_get_labels, fset=_set_codes)
_levels = None
@classmethod
def _validate_levels(cls, levels):
"""" Validates that we have good levels """
if not isinstance(levels, Index):
dtype = None
if not hasattr(levels, "dtype"):
levels = _convert_to_list_like(levels)
# on levels with NaNs, int values would be converted to float. Use "object" dtype
# to prevent this.
if isnull(levels).any():
without_na = np.array([x for x in levels if com.notnull(x)])
with_na = np.array(levels)
if with_na.dtype != without_na.dtype:
dtype = "object"
levels = Index(levels, dtype=dtype)
if not levels.is_unique:
raise ValueError('Categorical levels must be unique')
return levels
def _set_levels(self, levels):
""" Sets new levels """
levels = self._validate_levels(levels)
if not self._levels is None and len(levels) < len(self._levels):
# remove all _codes which are larger
self._codes[self._codes >= len(levels)] = -1
self._levels = levels
def _get_levels(self):
""" Gets the levels """
# levels is an Index, which is immutable -> no need to copy
return self._levels
levels = property(fget=_get_levels, fset=_set_levels, doc=_levels_doc)
def reorder_levels(self, new_levels, ordered=None):
""" Reorders levels as specified in new_levels.
`new_levels` must include all old levels but can also include new level items. In
contrast to assigning to `levels`, these new level items can be in arbitrary positions.
The level reordering is done inplace.
Raises
------
ValueError
If the new levels do not contain all old level items
Parameters
----------
new_levels : Index-like
The levels in new order. must be of same length as the old levels
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical. If not given,
do not change the ordered information.
"""
new_levels = self._validate_levels(new_levels)
if len(new_levels) < len(self._levels) or len(self._levels.difference(new_levels)):
raise ValueError('Reordered levels must include all original levels')
values = self.__array__()
self._codes = _get_codes_for_values(values, new_levels)
self._levels = new_levels
if not ordered is None:
self.ordered = ordered
def remove_unused_levels(self):
""" Removes levels which are not used.
The level removal is done inplace.
"""
_used = sorted(np.unique(self._codes))
new_levels = self.levels.take(com._ensure_platform_int(_used))
new_levels = _ensure_index(new_levels)
self._codes = _get_codes_for_values(self.__array__(), new_levels)
self._levels = new_levels
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def __array__(self, dtype=None):
""" The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or, if dtype==None (default), the same
dtype as categorical.levels.dtype
"""
ret = com.take_1d(self.levels.values, self._codes)
if dtype and dtype != self.levels.dtype:
return np.asarray(ret, dtype)
return ret
@property
def T(self):
return self
def isnull(self):
"""
Detect missing values
Both missing values (-1 in .codes) and NA as a level are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
pandas.isnull : pandas version
Categorical.notnull : boolean inverse of Categorical.isnull
"""
ret = self._codes == -1
# String/object and float levels can hold np.nan
if self.levels.dtype.kind in ['S', 'O', 'f']:
if np.nan in self.levels:
nan_pos = np.where(isnull(self.levels))[0]
# we only have one NA in levels
ret = np.logical_or(ret , self._codes == nan_pos)
return ret
def notnull(self):
"""
Reverse of isnull
Both missing values (-1 in .codes) and NA as a level are detected as null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
pandas.notnull : pandas version
Categorical.isnull : boolean inverse of Categorical.notnull
"""
return ~self.isnull()
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.levels.dtype or dtype string if periods
"""
# if we are a period index, return a string repr
if isinstance(self.levels, PeriodIndex):
return com.take_1d(np.array(self.levels.to_native_types(), dtype=object),
self._codes)
return np.array(self)
def argsort(self, ascending=True, **kwargs):
""" Implements ndarray.argsort.
For internal compatibility with numpy arrays.
Only ordered Categoricals can be argsorted!
Returns
-------
argsorted : numpy array
"""
if not self.ordered:
raise TypeError("Categorical not ordered")
result = np.argsort(self._codes.copy(), **kwargs)
if not ascending:
result = result[::-1]
return result
def order(self, inplace=False, ascending=True, na_position='last', **kwargs):
""" Sorts the Category by level value returning a new Categorical by default.
Only ordered Categoricals can be sorted!
Categorical.sort is the equivalent but sorts the Categorical inplace.
Parameters
----------
ascending : boolean, default True
Sort ascending. Passing False sorts descending
inplace : boolean, default False
Do operation in place.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Category or None
See Also
--------
Category.sort
"""
if not self.ordered:
raise TypeError("Categorical not ordered")
if na_position not in ['last','first']:
raise ValueError('invalid na_position: {!r}'.format(na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes==-1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position=="first" and not ascending:
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position=="last" and not ascending:
# ... and to the end
new_codes = codes.copy()
pos = len(codes)-n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return Categorical(values=codes,levels=self.levels, ordered=self.ordered,
name=self.name, fastpath=True)
def sort(self, inplace=True, ascending=True, na_position='last', **kwargs):
""" Sorts the Category inplace by level value.
Only ordered Categoricals can be sorted!
Catgorical.order is the equivalent but returns a new Categorical.
Parameters
----------
ascending : boolean, default True
Sort ascending. Passing False sorts descending
inplace : boolean, default False
Do operation in place.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Category or None
See Also
--------
Category.order
"""
return self.order(inplace=inplace, ascending=ascending, **kwargs)
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
""" Return my 'dense' repr """
return np.asarray(self)
def fillna(self, fill_value=None, method=None, limit=None, **kwargs):
""" Fill NA/NaN values using the specified method.
Parameters
----------
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
value : scalar
Value to use to fill holes (e.g. 0)
limit : int, default None
Maximum size gap to forward or backward fill (not implemented yet!)
Returns
-------
filled : Categorical with NA/NaN filled
"""
if fill_value is None:
fill_value = np.nan
if limit is not None:
raise NotImplementedError
values = self._codes
# Make sure that we also get NA in levels
if self.levels.dtype.kind in ['S', 'O', 'f']:
if np.nan in self.levels:
values = values.copy()
nan_pos = np.where(isnull(self.levels))[0]
# we only have one NA in levels
values[values == nan_pos] = -1
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1,len(self))
values = com.interpolate_2d(
values, method, 0, None, fill_value).astype(self.levels.dtype)[0]
values = _get_codes_for_values(values, self.levels)
else:
if not com.isnull(fill_value) and fill_value not in self.levels:
raise ValueError("fill value must be in levels")
mask = values==-1
if mask.any():
values = values.copy()
values[mask] = self.levels.get_loc(fill_value)
return Categorical(values, levels=self.levels, ordered=self.ordered,
name=self.name, fastpath=True)
def take_nd(self, indexer, allow_fill=True, fill_value=None):
""" Take the codes by the indexer, fill with the fill_value. """
# filling must always be None/nan here
# but is passed thru internally
assert isnull(fill_value)
codes = com.take_1d(self._codes, indexer, allow_fill=True, fill_value=-1)
result = Categorical(codes, levels=self.levels, ordered=self.ordered,
name=self.name, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself. """
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not _is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim categorical")
slicer = slicer[1]
_codes = self._codes[slicer]
return Categorical(values=_codes,levels=self.levels, ordered=self.ordered,
name=self.name, fastpath=True)
def __len__(self):
return len(self._codes)
def __iter__(self):
return iter(np.array(self))
def _tidy_repr(self, max_vals=20):
num = max_vals // 2
head = self[:num]._get_repr(length=False, name=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False,
name=False,
footer=False)
result = '%s\n...\n%s' % (head, tail)
result = '%s\n%s' % (result, self._repr_footer())
return compat.text_type(result)
def _repr_level_info(self):
""" Returns a string representation of the footer."""
max_levels = (10 if get_option("display.max_levels") == 0
else get_option("display.max_levels"))
level_strs = fmt.format_array(self.levels.get_values(), None)
if len(level_strs) > max_levels:
num = max_levels // 2
head = level_strs[:num]
tail = level_strs[-(max_levels - num):]
level_strs = head + ["..."] + tail
# Strip all leading spaces, which format_array adds for columns...
level_strs = [x.strip() for x in level_strs]
levheader = "Levels (%d, %s): " % (len(self.levels),
self.levels.dtype)
width, height = get_terminal_size()
max_width = (width if get_option("display.width") == 0
else
|
get_option("display.width")
|
pandas.core.config.get_option
|
import pickle
import random
import pandas as pd
from imblearn.over_sampling import SMOTE
from matplotlib import pyplot as plt
from sklearn.preprocessing import StandardScaler
from common.env import get_scaler_file
def generate_frauds(trainInputs, trainOutputs):
sm = SMOTE(random_state=2)
trainInputs, trainOutputs = sm.fit_resample(trainInputs, trainOutputs)
return trainInputs, trainOutputs
def normalization(data):
scaler = StandardScaler()
if not isinstance(data[0], list):
xData = [[d] for d in data]
scaler.fit(xData)
normalisedData = scaler.transform(xData)
xNormalisedData = [el[0] for el in normalisedData]
else:
scaler.fit(data)
xNormalisedData = scaler.transform(data)
# save scaler information to pickle file
scaler_file = get_scaler_file()
with open(scaler_file, 'wb') as handle:
pickle.dump(scaler, handle, protocol=pickle.HIGHEST_PROTOCOL)
return xNormalisedData
def split_data(inputs, outputs):
random.seed(5)
indexes = [i for i in range(len(inputs))]
trainSample = random.choices(indexes, k=int(0.5 * len(inputs)))
testSample = [i for i in indexes if not i in trainSample]
trainInputs = [inputs[i] for i in trainSample]
trainOutputs = [outputs[i] for i in trainSample]
testInputs = [inputs[i] for i in testSample]
testOutputs = [outputs[i] for i in testSample]
return trainInputs, trainOutputs, testInputs, testOutputs
def balance_histogram(outputs):
|
pd.value_counts(outputs)
|
pandas.value_counts
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# <NAME>
#
# Merge top loci table
#
import sys
import argparse
import pandas as pd
from collections import OrderedDict
from parquet_writer import write_parquet
import datetime
def main():
# Parse args
args = parse_args()
# Load
gwas = pd.read_json(args.in_gwascat, orient='records', lines=True)
ukb =
|
pd.read_json(args.in_ukb, orient='records', lines=True)
|
pandas.read_json
|
"""
ET Dataset from Informer Paper.
Dataset: https://github.com/zhouhaoyi/ETDataset
Dataloader: https://github.com/zhouhaoyi/Informer2020
"""
from typing import List
import os
import numpy as np
import pandas as pd
from pandas.tseries import offsets
from pandas.tseries.frequencies import to_offset
import torch
from torch.utils import data
from torch.utils.data import Dataset, DataLoader
import warnings
warnings.filterwarnings("ignore")
from src.dataloaders.datasets import SequenceDataset, default_data_path
class TimeFeature:
def __init__(self):
pass
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
pass
def __repr__(self):
return self.__class__.__name__ + "()"
class SecondOfMinute(TimeFeature):
"""Minute of hour encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.second / 59.0 - 0.5
class MinuteOfHour(TimeFeature):
"""Minute of hour encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.minute / 59.0 - 0.5
class HourOfDay(TimeFeature):
"""Hour of day encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.hour / 23.0 - 0.5
class DayOfWeek(TimeFeature):
"""Hour of day encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.dayofweek / 6.0 - 0.5
class DayOfMonth(TimeFeature):
"""Day of month encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.day - 1) / 30.0 - 0.5
class DayOfYear(TimeFeature):
"""Day of year encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.dayofyear - 1) / 365.0 - 0.5
class MonthOfYear(TimeFeature):
"""Month of year encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.month - 1) / 11.0 - 0.5
class WeekOfYear(TimeFeature):
"""Week of year encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.isocalendar().week - 1) / 52.0 - 0.5
def time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]:
"""
Returns a list of time features that will be appropriate for the given frequency string.
Parameters
----------
freq_str
Frequency string of the form [multiple][granularity] such as "12H", "5min", "1D" etc.
"""
features_by_offsets = {
offsets.YearEnd: [],
offsets.QuarterEnd: [MonthOfYear],
offsets.MonthEnd: [MonthOfYear],
offsets.Week: [DayOfMonth, WeekOfYear],
offsets.Day: [DayOfWeek, DayOfMonth, DayOfYear],
offsets.BusinessDay: [DayOfWeek, DayOfMonth, DayOfYear],
offsets.Hour: [HourOfDay, DayOfWeek, DayOfMonth, DayOfYear],
offsets.Minute: [
MinuteOfHour,
HourOfDay,
DayOfWeek,
DayOfMonth,
DayOfYear,
],
offsets.Second: [
SecondOfMinute,
MinuteOfHour,
HourOfDay,
DayOfWeek,
DayOfMonth,
DayOfYear,
],
}
offset =
|
to_offset(freq_str)
|
pandas.tseries.frequencies.to_offset
|
import os
import common
from pytz import timezone
from datetime import datetime
import pandas as pd
import hashlib
import yaml
from copy import deepcopy
common_dir = os.path.dirname(os.path.abspath(__file__))
config_path = os.path.join(common_dir, 'config.yml')
types_map = {'categorical': str,
'float': float,
'bool': bool,
'datetime': datetime}
def validate_headers(df:pd.DataFrame, expected_headers:set):
"""
:param df:
:param expected_headers:
:return: None
"""
headers = set(df.columns.tolist())
missing_headers = set(expected_headers) - headers
additional_headers = headers - set(expected_headers) # tell user any additional fields which could cause issues.
if len(additional_headers):
print(f'Found additional headers in supplied file: {additional_headers}. This may potentially cause issues.')
if len(missing_headers) != 0:
raise ValueError(f'Dataframe is missing expected headers: {missing_headers}')
fields_with_missing_vals = []
for header in expected_headers:
missing_vals = df[header].isnull().values.sum()
if missing_vals > 0:
fields_with_missing_vals.append(header)
if len(fields_with_missing_vals):
print(f'Fields {fields_with_missing_vals} are missing values. This may potentially cause issues.')
def make_prediction_df(qradar_df:pd.DataFrame, desired_subnet:str=None):
"""
:param qradar_df: DataFrame
:param desired_subnet: str
:return: my_perspective_network_df: DataFrame
"""
# validate_headers(qradar_df, set(FILE_CONFIG.biflow_fields))
if desired_subnet is None:
raise ValueError('Please provide a "desired_subnet" parameter (ex. 19.43)')
# re-work source only dataframe
src_subnet_field = FILE_CONFIG.biflow_src_prfx + FILE_CONFIG.hierarchy[0]
df_source = qradar_df[qradar_df[src_subnet_field] == desired_subnet]
df_source.columns = df_source.columns.str.replace(FILE_CONFIG.biflow_src_prfx, FILE_CONFIG.uniflow_this_prfx)
df_source.columns = df_source.columns.str.replace(FILE_CONFIG.biflow_dst_prfx, FILE_CONFIG.uniflow_that_prfx)
df_source = df_source.assign(**{FILE_CONFIG.uniflow_indicator: True})
dst_subnet_field = FILE_CONFIG.biflow_dst_prfx + FILE_CONFIG.hierarchy[0]
df_dest = qradar_df[qradar_df[dst_subnet_field] == desired_subnet]
df_dest.columns = df_dest.columns.str.replace(FILE_CONFIG.biflow_src_prfx, FILE_CONFIG.uniflow_that_prfx)
df_dest.columns = df_dest.columns.str.replace(FILE_CONFIG.biflow_dst_prfx, FILE_CONFIG.uniflow_this_prfx)
df_dest = df_dest.assign(**{FILE_CONFIG.uniflow_indicator: False})
my_perspective_network_df = pd.concat([df_source, df_dest], sort=True).sort_index()
# my_perspective_network_df['mytotheirbytesratio'] = my_perspective_network_df['mybytes'] / (my_perspective_network_df['mybytes'] + my_perspective_network_df['theirbytes'])
# my_perspective_network_df['myserverfromford'] = my_perspective_network_df['mysubnet'].str.startswith('19.')
# my_perspective_network_df['theirserverfromford'] = my_perspective_network_df['theirsubnet'].str.startswith('19.')
# validate_headers(my_perspective_network_df, FILE_CONFIG.uniflow_fields.keys())
return my_perspective_network_df
def md5(fname):
"""
:param fname:
:return:
"""
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def get_file_metadata(filepath, desired_subnet=None, priors_dir=None):
now = datetime.now()
qradar_df =
|
pd.read_csv(filepath, dtype=FILE_CONFIG.biflow_fields)
|
pandas.read_csv
|
import sys
import os
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
''' Function to load in messages and categories data,
join them and return the result. This function also
removes duplicates before merging datasets
Args:
messages_filepath (string): path to the messages csv file.
categories_filepath (string): path to the categories csv file
Returns:
df (DataFrame): merged dataset containing messages and categories columns
'''
# read in messages data
messages_df = pd.read_csv(messages_filepath)
# remove duplicates
messages_df.drop_duplicates(inplace=True)
# read in categories data
categories_df =
|
pd.read_csv(categories_filepath)
|
pandas.read_csv
|
import dataclasses
from collections import Counter
from itertools import compress
import numpy as np
import pandas as pd
import pytest
from ..tree import (
Node,
PeriodicPatternMiner,
Tree,
combine_horizontally,
combine_vertically,
encode_leaves,
get_occs,
greedy_cover,
grow_horizontally,
)
@pytest.mark.parametrize("tau", [0, 330])
def test_create_tree_3_wakeup_breakfast(tau):
"""
during 3 weeks
on every business day
wake up at 7:00 AM
breakfast at 7:10 AM
periods and distances expressed in minutes
"""
week_node = Node(
r=5, p=1440, children=["wake up", "breakfast"], children_dists=[10]
)
tree = Tree(tau, r=3, p=1440 * 7, children=[week_node])
assert dataclasses.is_dataclass(tree)
instances = tree.to_list()
assert len(instances) == tree._n_occs == 30 # 2 events per day, 5 days for 3 weeks
assert instances[0][0] == tau # first occurence at tau
assert tree._size == len(tree) == 4
assert {"r", "p"}.issubset(tree.to_dict().keys())
def test_prefit():
logs =
|
pd.Series(["wake up", "breakfast"] * 10)
|
pandas.Series
|
import numpy as np;
import pandas as pd;
import os
raw_data_path = os.path.join(os.path.pardir,'data','raw')
train_file_path = os.path.join(raw_data_path,'train.csv')
test_file_path = os.path.join(raw_data_path,'test.csv')
#read data as dataframe
train_df =
|
pd.read_csv(train_file_path,index_col='PassengerId')
|
pandas.read_csv
|
import re
from inspect import isclass
import numpy as np
import pandas as pd
import pytest
from mock import patch
import woodwork as ww
from woodwork.accessor_utils import (
_is_dask_dataframe,
_is_dask_series,
_is_koalas_dataframe,
_is_koalas_series,
init_series,
)
from woodwork.exceptions import (
ColumnNotPresentError,
IndexTagRemovedWarning,
ParametersIgnoredWarning,
TypeConversionError,
TypingInfoMismatchWarning,
WoodworkNotInitError,
)
from woodwork.logical_types import (
URL,
Address,
Age,
AgeFractional,
AgeNullable,
Boolean,
BooleanNullable,
Categorical,
CountryCode,
Datetime,
Double,
EmailAddress,
Filepath,
Integer,
IntegerNullable,
IPAddress,
LatLong,
NaturalLanguage,
Ordinal,
PersonFullName,
PhoneNumber,
PostalCode,
SubRegionCode,
Unknown,
)
from woodwork.table_accessor import (
WoodworkTableAccessor,
_check_index,
_check_logical_types,
_check_partial_schema,
_check_time_index,
_check_unique_column_names,
_check_use_standard_tags,
_infer_missing_logical_types,
)
from woodwork.table_schema import TableSchema
from woodwork.tests.testing_utils import (
is_property,
is_public_method,
to_pandas,
validate_subset_schema,
)
from woodwork.tests.testing_utils.table_utils import assert_schema_equal
from woodwork.utils import import_or_none
dd = import_or_none("dask.dataframe")
ks = import_or_none("databricks.koalas")
def test_check_index_errors(sample_df):
error_message = "Specified index column `foo` not found in dataframe"
with pytest.raises(ColumnNotPresentError, match=error_message):
_check_index(dataframe=sample_df, index="foo")
if isinstance(sample_df, pd.DataFrame):
# Does not check for index uniqueness with Dask
error_message = "Index column must be unique"
with pytest.raises(LookupError, match=error_message):
_check_index(sample_df, index="age")
def test_check_logical_types_errors(sample_df):
error_message = "logical_types must be a dictionary"
with pytest.raises(TypeError, match=error_message):
_check_logical_types(sample_df, logical_types="type")
bad_logical_types_keys = {
"full_name": None,
"age": None,
"birthday": None,
"occupation": None,
}
error_message = re.escape(
"logical_types contains columns that are not present in dataframe: ['birthday', 'occupation']"
)
with pytest.raises(ColumnNotPresentError, match=error_message):
_check_logical_types(sample_df, bad_logical_types_keys)
def test_check_time_index_errors(sample_df):
error_message = "Specified time index column `foo` not found in dataframe"
with pytest.raises(ColumnNotPresentError, match=error_message):
_check_time_index(dataframe=sample_df, time_index="foo")
def test_check_unique_column_names_errors(sample_df):
if _is_koalas_dataframe(sample_df):
pytest.skip("Koalas enforces unique column names")
duplicate_cols_df = sample_df.copy()
if _is_dask_dataframe(sample_df):
duplicate_cols_df = dd.concat(
[duplicate_cols_df, duplicate_cols_df["age"]], axis=1
)
else:
duplicate_cols_df.insert(0, "age", [18, 21, 65, 43], allow_duplicates=True)
with pytest.raises(
IndexError, match="Dataframe cannot contain duplicate columns names"
):
_check_unique_column_names(duplicate_cols_df)
def test_check_use_standard_tags_errors():
error_message = "use_standard_tags must be a dictionary or a boolean"
with pytest.raises(TypeError, match=error_message):
_check_use_standard_tags(1)
def test_accessor_init(sample_df):
assert sample_df.ww.schema is None
sample_df.ww.init()
assert isinstance(sample_df.ww.schema, TableSchema)
def test_accessor_schema_property(sample_df):
sample_df.ww.init()
assert sample_df.ww._schema is not sample_df.ww.schema
assert sample_df.ww._schema == sample_df.ww.schema
def test_set_accessor_name(sample_df):
df = sample_df.copy()
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
with pytest.raises(WoodworkNotInitError, match=error):
df.ww.name
with pytest.raises(WoodworkNotInitError, match=error):
df.ww.name = "name"
df.ww.init()
assert df.ww.name is None
df.ww.name = "name"
assert df.ww.schema.name == "name"
assert df.ww.name == "name"
def test_rename_init_with_name(sample_df):
df = sample_df.copy()
df.ww.init(name="name")
assert df.ww.name == "name"
df.ww.name = "new_name"
assert df.ww.schema.name == "new_name"
assert df.ww.name == "new_name"
def test_name_error_on_init(sample_df):
err_msg = "Table name must be a string"
with pytest.raises(TypeError, match=err_msg):
sample_df.ww.init(name=123)
def test_name_error_on_update(sample_df):
sample_df.ww.init()
err_msg = "Table name must be a string"
with pytest.raises(TypeError, match=err_msg):
sample_df.ww.name = 123
def test_name_persists_after_drop(sample_df):
df = sample_df.copy()
df.ww.init()
df.ww.name = "name"
assert df.ww.name == "name"
dropped_df = df.ww.drop(["id"])
assert dropped_df.ww.name == "name"
assert dropped_df.ww.schema.name == "name"
def test_set_accessor_metadata(sample_df):
df = sample_df.copy()
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
with pytest.raises(WoodworkNotInitError, match=error):
df.ww.metadata
with pytest.raises(WoodworkNotInitError, match=error):
df.ww.metadata = {"new": "metadata"}
df.ww.init()
assert df.ww.metadata == {}
df.ww.metadata = {"new": "metadata"}
assert df.ww.schema.metadata == {"new": "metadata"}
assert df.ww.metadata == {"new": "metadata"}
def test_set_metadata_after_init_with_metadata(sample_df):
df = sample_df.copy()
df.ww.init(table_metadata={"new": "metadata"})
assert df.ww.metadata == {"new": "metadata"}
df.ww.metadata = {"new": "new_metadata"}
assert df.ww.schema.metadata == {"new": "new_metadata"}
assert df.ww.metadata == {"new": "new_metadata"}
def test_metadata_persists_after_drop(sample_df):
df = sample_df.copy()
df.ww.init()
df.ww.metadata = {"new": "metadata"}
assert df.ww.metadata == {"new": "metadata"}
dropped_df = df.ww.drop(["id"])
assert dropped_df.ww.metadata == {"new": "metadata"}
assert dropped_df.ww.schema.metadata == {"new": "metadata"}
def test_metadata_error_on_init(sample_df):
err_msg = "Table metadata must be a dictionary."
with pytest.raises(TypeError, match=err_msg):
sample_df.ww.init(table_metadata=123)
def test_metadata_error_on_update(sample_df):
sample_df.ww.init()
err_msg = "Table metadata must be a dictionary."
with pytest.raises(TypeError, match=err_msg):
sample_df.ww.metadata = 123
def test_accessor_physical_types_property(sample_df):
sample_df.ww.init(logical_types={"age": "Categorical"})
assert isinstance(sample_df.ww.physical_types, dict)
assert set(sample_df.ww.physical_types.keys()) == set(sample_df.columns)
for k, v in sample_df.ww.physical_types.items():
logical_type = sample_df.ww.columns[k].logical_type
if _is_koalas_dataframe(sample_df) and logical_type.backup_dtype is not None:
assert v == logical_type.backup_dtype
else:
assert v == logical_type.primary_dtype
def test_accessor_separation_of_params(sample_df):
# mix up order of acccessor and schema params
schema_df = sample_df.copy()
schema_df.ww.init(
name="test_name",
index="id",
semantic_tags={"id": "test_tag"},
time_index="signup_date",
)
assert schema_df.ww.semantic_tags["id"] == {"index", "test_tag"}
assert schema_df.ww.index == "id"
assert schema_df.ww.time_index == "signup_date"
assert schema_df.ww.name == "test_name"
def test_init_with_full_schema(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init(name="test_schema", semantic_tags={"id": "test_tag"}, index="id")
schema = schema_df.ww._schema
head_df = schema_df.head(2)
assert head_df.ww.schema is None
head_df.ww.init_with_full_schema(schema=schema)
assert head_df.ww._schema is schema
assert head_df.ww.name == "test_schema"
assert head_df.ww.semantic_tags["id"] == {"index", "test_tag"}
iloc_df = schema_df.loc[[2, 3]]
assert iloc_df.ww.schema is None
iloc_df.ww.init_with_full_schema(schema=schema)
assert iloc_df.ww._schema is schema
assert iloc_df.ww.name == "test_schema"
assert iloc_df.ww.semantic_tags["id"] == {"index", "test_tag"}
# Extra parameters do not take effect
assert isinstance(iloc_df.ww.logical_types["id"], Integer)
def test_accessor_init_errors_methods(sample_df):
methods_to_exclude = ["init", "init_with_full_schema", "init_with_partial_schema"]
public_methods = [
method
for method in dir(sample_df.ww)
if is_public_method(WoodworkTableAccessor, method)
]
public_methods = [
method for method in public_methods if method not in methods_to_exclude
]
method_args_dict = {
"add_semantic_tags": [{"id": "new_tag"}],
"describe": None,
"pop": ["id"],
"describe": None,
"describe_dict": None,
"drop": ["id"],
"get_valid_mi_columns": None,
"mutual_information": None,
"mutual_information_dict": None,
"remove_semantic_tags": [{"id": "new_tag"}],
"rename": [{"id": "new_id"}],
"reset_semantic_tags": None,
"select": [["Double"]],
"set_index": ["id"],
"set_time_index": ["signup_date"],
"set_types": [{"id": "Integer"}],
"to_disk": ["dir"],
"to_dictionary": None,
"value_counts": None,
"infer_temporal_frequencies": None,
}
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
for method in public_methods:
func = getattr(sample_df.ww, method)
method_args = method_args_dict[method]
with pytest.raises(WoodworkNotInitError, match=error):
if method_args:
func(*method_args)
else:
func()
def test_accessor_init_errors_properties(sample_df):
props_to_exclude = ["iloc", "loc", "schema", "_dataframe"]
props = [
prop
for prop in dir(sample_df.ww)
if is_property(WoodworkTableAccessor, prop) and prop not in props_to_exclude
]
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
for prop in props:
with pytest.raises(WoodworkNotInitError, match=error):
getattr(sample_df.ww, prop)
def test_init_accessor_with_schema_errors(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init()
schema = schema_df.ww.schema
iloc_df = schema_df.iloc[:, :-1]
assert iloc_df.ww.schema is None
error = "Provided schema must be a Woodwork.TableSchema object."
with pytest.raises(TypeError, match=error):
iloc_df.ww.init_with_full_schema(schema=int)
error = (
"Woodwork typing information is not valid for this DataFrame: "
"The following columns in the typing information were missing from the DataFrame: {'ip_address'}"
)
with pytest.raises(ValueError, match=error):
iloc_df.ww.init_with_full_schema(schema=schema)
def test_accessor_with_schema_parameter_warning(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init(name="test_schema", semantic_tags={"id": "test_tag"}, index="id")
schema = schema_df.ww.schema
head_df = schema_df.head(2)
warning = (
"A schema was provided and the following parameters were ignored: index, "
"time_index, logical_types, already_sorted, semantic_tags, use_standard_tags"
)
with pytest.warns(ParametersIgnoredWarning, match=warning):
head_df.ww.init_with_full_schema(
index="ignored_id",
time_index="ignored_time_index",
logical_types={"ignored": "ltypes"},
already_sorted=True,
semantic_tags={"ignored_id": "ignored_test_tag"},
use_standard_tags={"id": True, "age": False},
schema=schema,
)
assert head_df.ww.name == "test_schema"
assert head_df.ww.semantic_tags["id"] == {"index", "test_tag"}
def test_accessor_getattr(sample_df):
schema_df = sample_df.copy()
# We can access attributes on the Accessor class before the schema is initialized
assert schema_df.ww.schema is None
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
with pytest.raises(WoodworkNotInitError, match=error):
schema_df.ww.index
schema_df.ww.init()
assert schema_df.ww.name is None
assert schema_df.ww.index is None
assert schema_df.ww.time_index is None
assert set(schema_df.ww.columns.keys()) == set(sample_df.columns)
error = re.escape("Woodwork has no attribute 'not_present'")
with pytest.raises(AttributeError, match=error):
sample_df.ww.init()
sample_df.ww.not_present
def test_getitem(sample_df):
df = sample_df
df.ww.init(
time_index="signup_date",
index="id",
name="df_name",
logical_types={"age": "Double"},
semantic_tags={"age": {"custom_tag"}},
)
assert list(df.columns) == list(df.ww.schema.columns)
subset = ["id", "signup_date"]
df_subset = df.ww[subset]
pd.testing.assert_frame_equal(to_pandas(df[subset]), to_pandas(df_subset))
assert subset == list(df_subset.ww._schema.columns)
assert df_subset.ww.index == "id"
assert df_subset.ww.time_index == "signup_date"
subset = ["age", "email"]
df_subset = df.ww[subset]
pd.testing.assert_frame_equal(to_pandas(df[subset]), to_pandas(df_subset))
assert subset == list(df_subset.ww._schema.columns)
assert df_subset.ww.index is None
assert df_subset.ww.time_index is None
assert isinstance(df_subset.ww.logical_types["age"], Double)
assert df_subset.ww.semantic_tags["age"] == {"custom_tag", "numeric"}
subset = df.ww[[]]
assert len(subset.ww.columns) == 0
assert subset.ww.index is None
assert subset.ww.time_index is None
series = df.ww["age"]
pd.testing.assert_series_equal(to_pandas(series), to_pandas(df["age"]))
assert isinstance(series.ww.logical_type, Double)
assert series.ww.semantic_tags == {"custom_tag", "numeric"}
series = df.ww["id"]
pd.testing.assert_series_equal(to_pandas(series), to_pandas(df["id"]))
assert isinstance(series.ww.logical_type, Integer)
assert series.ww.semantic_tags == {"index"}
def test_getitem_init_error(sample_df):
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
with pytest.raises(WoodworkNotInitError, match=error):
sample_df.ww["age"]
def test_getitem_invalid_input(sample_df):
df = sample_df
df.ww.init()
error_msg = r"Column\(s\) '\[1, 2\]' not found in DataFrame"
with pytest.raises(ColumnNotPresentError, match=error_msg):
df.ww[["email", 2, 1]]
error_msg = "Column with name 'invalid_column' not found in DataFrame"
with pytest.raises(ColumnNotPresentError, match=error_msg):
df.ww["invalid_column"]
def test_accessor_equality(sample_df):
# Confirm equality with same schema and same data
schema_df = sample_df.copy()
schema_df.ww.init()
copy_df = schema_df.ww.copy()
assert schema_df.ww == copy_df.ww
# Confirm not equal with different schema but same data
copy_df.ww.set_time_index("signup_date")
assert schema_df.ww != copy_df.ww
# Confirm not equal with same schema but different data - only pandas
loc_df = schema_df.ww.loc[:2, :]
if isinstance(sample_df, pd.DataFrame):
assert schema_df.ww != loc_df
else:
assert schema_df.ww == loc_df
def test_accessor_shallow_equality(sample_df):
metadata_table = sample_df.copy()
metadata_table.ww.init(table_metadata={"user": "user0"})
diff_metadata_table = sample_df.copy()
diff_metadata_table.ww.init(table_metadata={"user": "user2"})
assert diff_metadata_table.ww.__eq__(metadata_table, deep=False)
assert not diff_metadata_table.ww.__eq__(metadata_table, deep=True)
schema = metadata_table.ww.schema
diff_data_table = metadata_table.ww.loc[:2, :]
same_data_table = metadata_table.ww.copy()
assert diff_data_table.ww.schema.__eq__(schema, deep=True)
assert same_data_table.ww.schema.__eq__(schema, deep=True)
assert same_data_table.ww.__eq__(metadata_table.ww, deep=False)
assert same_data_table.ww.__eq__(metadata_table.ww, deep=True)
assert diff_data_table.ww.__eq__(metadata_table.ww, deep=False)
if isinstance(sample_df, pd.DataFrame):
assert not diff_data_table.ww.__eq__(metadata_table.ww, deep=True)
def test_accessor_init_with_valid_string_time_index(time_index_df):
time_index_df.ww.init(name="schema", index="id", time_index="times")
assert time_index_df.ww.name == "schema"
assert time_index_df.ww.index == "id"
assert time_index_df.ww.time_index == "times"
assert isinstance(
time_index_df.ww.columns[time_index_df.ww.time_index].logical_type, Datetime
)
def test_accessor_init_with_numeric_datetime_time_index(time_index_df):
schema_df = time_index_df.copy()
schema_df.ww.init(time_index="ints", logical_types={"ints": Datetime})
error_msg = "Time index column must contain datetime or numeric values"
with pytest.raises(TypeError, match=error_msg):
time_index_df.ww.init(
name="schema", time_index="strs", logical_types={"strs": Datetime}
)
assert schema_df.ww.time_index == "ints"
assert schema_df["ints"].dtype == "datetime64[ns]"
def test_accessor_with_numeric_time_index(time_index_df):
# Set a numeric time index on init
schema_df = time_index_df.copy()
schema_df.ww.init(time_index="ints")
date_col = schema_df.ww.columns["ints"]
assert schema_df.ww.time_index == "ints"
assert isinstance(date_col.logical_type, Integer)
assert date_col.semantic_tags == {"time_index", "numeric"}
# Specify logical type for time index on init
schema_df = time_index_df.copy()
schema_df.ww.init(time_index="ints", logical_types={"ints": "Double"})
date_col = schema_df.ww.columns["ints"]
assert schema_df.ww.time_index == "ints"
assert isinstance(date_col.logical_type, Double)
assert date_col.semantic_tags == {"time_index", "numeric"}
schema_df = time_index_df.copy()
schema_df.ww.init(time_index="strs", logical_types={"strs": "Double"})
date_col = schema_df.ww.columns["strs"]
assert schema_df.ww.time_index == "strs"
assert isinstance(date_col.logical_type, Double)
assert date_col.semantic_tags == {"time_index", "numeric"}
error_msg = "Time index column must contain datetime or numeric values"
with pytest.raises(TypeError, match=error_msg):
time_index_df.ww.init(time_index="ints", logical_types={"ints": "Categorical"})
error_msg = "Time index column must contain datetime or numeric values"
with pytest.raises(TypeError, match=error_msg):
time_index_df.ww.init(time_index="letters", logical_types={"strs": "Integer"})
# Set numeric time index after init
schema_df = time_index_df.copy()
schema_df.ww.init(logical_types={"ints": "Double"})
assert schema_df.ww.time_index is None
schema_df.ww.set_time_index("ints")
date_col = schema_df.ww.columns["ints"]
assert schema_df.ww.time_index == "ints"
assert isinstance(date_col.logical_type, Double)
assert date_col.semantic_tags == {"numeric", "time_index"}
def test_numeric_time_index_dtypes(numeric_time_index_df):
numeric_time_index_df.ww.init(time_index="ints")
assert numeric_time_index_df.ww.time_index == "ints"
assert isinstance(numeric_time_index_df.ww.logical_types["ints"], Integer)
assert numeric_time_index_df.ww.semantic_tags["ints"] == {"time_index", "numeric"}
numeric_time_index_df.ww.set_time_index("floats")
assert numeric_time_index_df.ww.time_index == "floats"
assert isinstance(numeric_time_index_df.ww.logical_types["floats"], Double)
assert numeric_time_index_df.ww.semantic_tags["floats"] == {"time_index", "numeric"}
numeric_time_index_df.ww.set_time_index("with_null")
assert numeric_time_index_df.ww.time_index == "with_null"
assert isinstance(
numeric_time_index_df.ww.logical_types["with_null"], IntegerNullable
)
assert numeric_time_index_df.ww.semantic_tags["with_null"] == {
"time_index",
"numeric",
}
def test_accessor_init_with_invalid_string_time_index(sample_df):
error_msg = "Time index column must contain datetime or numeric values"
with pytest.raises(TypeError, match=error_msg):
sample_df.ww.init(name="schema", time_index="full_name")
def test_accessor_init_with_string_logical_types(sample_df):
logical_types = {"full_name": "natural_language", "age": "Double"}
schema_df = sample_df.copy()
schema_df.ww.init(name="schema", logical_types=logical_types)
assert isinstance(schema_df.ww.columns["full_name"].logical_type, NaturalLanguage)
assert isinstance(schema_df.ww.columns["age"].logical_type, Double)
logical_types = {
"full_name": "NaturalLanguage",
"age": "IntegerNullable",
"signup_date": "Datetime",
}
schema_df = sample_df.copy()
schema_df.ww.init(
name="schema", logical_types=logical_types, time_index="signup_date"
)
assert isinstance(schema_df.ww.columns["full_name"].logical_type, NaturalLanguage)
assert isinstance(schema_df.ww.columns["age"].logical_type, IntegerNullable)
assert schema_df.ww.time_index == "signup_date"
def test_int_dtype_inference_on_init():
df = pd.DataFrame(
{
"ints_no_nans": pd.Series([1, 2]),
"ints_nan": pd.Series([1, np.nan]),
"ints_NA": pd.Series([1, pd.NA]),
"ints_NA_specified": pd.Series([1, pd.NA], dtype="Int64"),
}
)
df = df.loc[df.index.repeat(5)].reset_index(drop=True)
df.ww.init()
assert df["ints_no_nans"].dtype == "int64"
assert df["ints_nan"].dtype == "float64"
assert df["ints_NA"].dtype == "category"
assert df["ints_NA_specified"].dtype == "Int64"
def test_bool_dtype_inference_on_init():
df = pd.DataFrame(
{
"bools_no_nans": pd.Series([True, False]),
"bool_nan": pd.Series([True, np.nan]),
"bool_NA": pd.Series([True, pd.NA]),
"bool_NA_specified": pd.Series([True, pd.NA], dtype="boolean"),
}
)
df = df.loc[df.index.repeat(5)].reset_index(drop=True)
df.ww.init()
assert df["bools_no_nans"].dtype == "bool"
assert df["bool_nan"].dtype == "category"
assert df["bool_NA"].dtype == "category"
assert df["bool_NA_specified"].dtype == "boolean"
def test_str_dtype_inference_on_init():
df = pd.DataFrame(
{
"str_no_nans": pd.Series(["a", "b"]),
"str_nan": pd.Series(["a", np.nan]),
"str_NA": pd.Series(["a", pd.NA]),
"str_NA_specified": pd.Series([1, pd.NA], dtype="string"),
}
)
df = df.loc[df.index.repeat(5)].reset_index(drop=True)
df.ww.init()
assert df["str_no_nans"].dtype == "category"
assert df["str_nan"].dtype == "category"
assert df["str_NA"].dtype == "category"
assert df["str_NA_specified"].dtype == "category"
def test_float_dtype_inference_on_init():
df = pd.DataFrame(
{
"floats_no_nans": pd.Series([1.1, 2.2]),
"floats_nan": pd.Series([1.1, np.nan]),
"floats_NA": pd.Series([1.1, pd.NA]),
"floats_nan_specified": pd.Series([1.1, np.nan], dtype="float"),
}
)
df = df.loc[df.index.repeat(5)].reset_index(drop=True)
df.ww.init()
assert df["floats_no_nans"].dtype == "float64"
assert df["floats_nan"].dtype == "float64"
assert df["floats_NA"].dtype == "category"
assert df["floats_nan_specified"].dtype == "float64"
def test_datetime_dtype_inference_on_init():
df = pd.DataFrame(
{
"date_no_nans": pd.Series([pd.to_datetime("2020-09-01")] * 2),
"date_nan": pd.Series([pd.to_datetime("2020-09-01"), np.nan]),
"date_NA": pd.Series([pd.to_datetime("2020-09-01"), pd.NA]),
"date_NaT": pd.Series([pd.to_datetime("2020-09-01"), pd.NaT]),
"date_NA_specified": pd.Series(
[pd.to_datetime("2020-09-01"), pd.NA], dtype="datetime64[ns]"
),
}
)
df.ww.init()
assert df["date_no_nans"].dtype == "datetime64[ns]"
assert df["date_nan"].dtype == "datetime64[ns]"
assert df["date_NA"].dtype == "datetime64[ns]"
assert df["date_NaT"].dtype == "datetime64[ns]"
assert df["date_NA_specified"].dtype == "datetime64[ns]"
def test_datetime_inference_with_format_param():
df = pd.DataFrame(
{
"index": [0, 1, 2],
"dates": ["2019/01/01", "2019/01/02", "2019/01/03"],
"ymd_special": ["2019~01~01", "2019~01~02", "2019~01~03"],
"mdy_special": pd.Series(
["3~11~2000", "3~12~2000", "3~13~2000"], dtype="string"
),
}
)
df.ww.init(
name="df_name",
logical_types={
"ymd_special": Datetime(datetime_format="%Y~%m~%d"),
"mdy_special": Datetime(datetime_format="%m~%d~%Y"),
"dates": Datetime,
},
time_index="ymd_special",
)
assert df["dates"].dtype == "datetime64[ns]"
assert df["ymd_special"].dtype == "datetime64[ns]"
assert df["mdy_special"].dtype == "datetime64[ns]"
assert df.ww.time_index == "ymd_special"
assert isinstance(df.ww["dates"].ww.logical_type, Datetime)
assert isinstance(df.ww["ymd_special"].ww.logical_type, Datetime)
assert isinstance(df.ww["mdy_special"].ww.logical_type, Datetime)
df.ww.set_time_index("mdy_special")
assert df.ww.time_index == "mdy_special"
df = pd.DataFrame(
{
"mdy_special": pd.Series(
["3&11&2000", "3&12&2000", "3&13&2000"], dtype="string"
),
}
)
df = df.loc[df.index.repeat(5)].reset_index(drop=True)
df.ww.init()
assert df["mdy_special"].dtype == "category"
df.ww.set_types(logical_types={"mdy_special": Datetime(datetime_format="%m&%d&%Y")})
assert df["mdy_special"].dtype == "datetime64[ns]"
df.ww.set_time_index("mdy_special")
assert isinstance(df.ww["mdy_special"].ww.logical_type, Datetime)
assert df.ww.time_index == "mdy_special"
def test_timedelta_dtype_inference_on_init():
df = pd.DataFrame(
{
"delta_no_nans": (
pd.Series([pd.to_datetime("2020-09-01")] * 2)
- pd.to_datetime("2020-07-01")
),
"delta_nan": (
pd.Series([pd.to_datetime("2020-09-01"), np.nan])
- pd.to_datetime("2020-07-01")
),
"delta_NaT": (
pd.Series([pd.to_datetime("2020-09-01"), pd.NaT])
- pd.to_datetime("2020-07-01")
),
"delta_NA_specified": (
pd.Series([pd.to_datetime("2020-09-01"), pd.NA], dtype="datetime64[ns]")
- pd.to_datetime("2020-07-01")
),
}
)
df.ww.init()
assert df["delta_no_nans"].dtype == "timedelta64[ns]"
assert df["delta_nan"].dtype == "timedelta64[ns]"
assert df["delta_NaT"].dtype == "timedelta64[ns]"
assert df["delta_NA_specified"].dtype == "timedelta64[ns]"
def test_sets_category_dtype_on_init():
column_name = "test_series"
series_list = [
pd.Series(["a", "b", "c"], name=column_name),
pd.Series(["a", None, "c"], name=column_name),
pd.Series(["a", np.nan, "c"], name=column_name),
pd.Series(["a", pd.NA, "c"], name=column_name),
pd.Series(["a", pd.NaT, "c"], name=column_name),
]
logical_types = [
Categorical,
CountryCode,
Ordinal(order=["a", "b", "c"]),
PostalCode,
SubRegionCode,
]
for series in series_list:
series = series.astype("object")
for logical_type in logical_types:
if isclass(logical_type):
logical_type = logical_type()
ltypes = {
column_name: logical_type,
}
df = pd.DataFrame(series)
df.ww.init(logical_types=ltypes)
assert df.ww.columns[column_name].logical_type == logical_type
assert df[column_name].dtype == logical_type.primary_dtype
def test_sets_object_dtype_on_init(latlong_df):
for column_name in latlong_df.columns:
ltypes = {
column_name: LatLong,
}
df = latlong_df.loc[:, [column_name]]
df.ww.init(logical_types=ltypes)
assert isinstance(df.ww.columns[column_name].logical_type, LatLong)
assert df[column_name].dtype == LatLong.primary_dtype
df_pandas = to_pandas(df[column_name])
expected_val = (3, 4)
if _is_koalas_dataframe(latlong_df):
expected_val = [3, 4]
assert df_pandas.iloc[-1] == expected_val
def test_sets_string_dtype_on_init():
column_name = "test_series"
series_list = [
pd.Series(["a", "b", "c"], name=column_name),
pd.Series(["a", None, "c"], name=column_name),
pd.Series(["a", np.nan, "c"], name=column_name),
pd.Series(["a", pd.NA, "c"], name=column_name),
]
logical_types = [
Address,
Filepath,
PersonFullName,
IPAddress,
NaturalLanguage,
PhoneNumber,
URL,
]
for series in series_list:
series = series.astype("object")
for logical_type in logical_types:
ltypes = {
column_name: logical_type,
}
df = pd.DataFrame(series)
df.ww.init(logical_types=ltypes)
assert isinstance(df.ww.columns[column_name].logical_type, logical_type)
assert df[column_name].dtype == logical_type.primary_dtype
def test_sets_boolean_dtype_on_init():
column_name = "test_series"
series_list = [
pd.Series([True, False, True], name=column_name),
pd.Series([True, None, True], name=column_name),
|
pd.Series([True, np.nan, True], name=column_name)
|
pandas.Series
|
import re
import pandas as pd
import numpy as np
import time
import constants
import pickle
# Cleansing
def cleanse(text):
"""
Clean up the text a little by removing punctuation, extra spaces, new lines, etc.
This should be run after split_to_sentences(), tokenize_by_sentence() because I think it removes all the
punctuation you will need to split the text into sentences.
:param text:
:return:
"""
text = text.lower()
text = text.replace("'", '') # Remove apostrophes
# text = re.sub('[^\w\s]', ' ', text)
# Replace punct with a space so that when someone does something like <word comma word> you don't accidentally
# transform it into one word. We remove extra spaces in the next line.
text = re.sub('[^\w\s]', ' ', text)
text = re.sub('\\n', ' ', text)
text = re.sub(' +', ' ', text)
text = text.strip() # Small thing but remove a trailing space.
return text
# Tokenization
def tokenize_string(sentence):
"""
cleanse the string and tokenize to individual words
:param sentence: a string of text (in the context of this application, most likely an individual sentence)
:return: a list of strings
"""
sentence = cleanse(sentence)
return sentence.split(' ')
# TODO: Convert contractions to the "uncontracted" two words. Ex "you'll" -> "you are".
# Would need some list of common contractions. Of course, this is language dependent.
def split_to_sentences(text):
"""
Gets a bunch of text and returns the sentences as a list. It attempts to split the text up into its component
sentences, using punctuation that typically ends a sentence (see constants.PUNCTUATION_REGEX, which at the moment is
'[.?!]'). Text that does not behave this way, for example when each line is intended to be independent,
will likely give an unexpected result.
:param text: regular text; for example the contents of a file of text
:return: a list, where each element in the list is a sentence
"""
# TODO: A way to handle text that is broken up by lines (for example, poetry); maybe allow the call to specify
# the regex.
p = re.compile(constants.PUNCTUATION_REGEX)
sentences = p.split(text)
for i in range(len(sentences)):
sentences[i] = sentences[i].strip()
if sentences[-1] == '':
sentences = sentences[:-1]
return sentences
def tokenize_by_sentence(text):
"""
Tokenize the text, but group words in sentences together. The input, and constraints on tokenization,
are the same as for split_to_sentences().
:param text: regular text; for example the contents of a file of text
:return: A list of lists. Each list inside the overall list is the words in a given sentence.
"""
sentences = split_to_sentences(text)
result = []
for sentence in sentences:
current_result = tokenize_string(sentence)
if current_result is not None and current_result != ['']:
result.append(current_result)
return result
# Statistics
def find_word_stats(text):
"""
Get statistics on word frequencies. This tells you the word and how many times it occurred in the text. There
are also columns for the fraction of total words that it represents, cumulative count and cumulative ratio of all
words, "cumulative" being if you count that word and all other words above it.
:param text:
:return: a DataFrame, sorted by most common words first
"""
tokens = tokenize_string(text)
tokens_pd =
|
pd.Series(tokens)
|
pandas.Series
|
import datetime
import random
from typing import List
import pandas as pd
from dateutil.utils import today
from numpy import array_split
from toloka.client import TolokaClient
from toloka.client.actions import RestrictionV2
from toloka.client.batch_create_results import TaskSuiteBatchCreateResult
from toloka.client.collectors import Income, SkippedInRowAssignments, MajorityVote
from toloka.client.conditions import IncomeSumForLast24Hours, SkippedInRowCount, TotalAnswersCount, \
CorrectAnswersRate
from toloka.client.filter import FilterAnd, FilterOr, RegionByPhone, Languages, \
ClientType
from toloka.client.owner import Owner
from toloka.client.pool import Pool
from toloka.client.primitives.operators import InclusionOperator, IdentityOperator, CompareOperator
from toloka.client.quality_control import QualityControl
from toloka.client.task import BaseTask
from toloka.client.task_suite import TaskSuite
from toloka.client.user_restriction import UserRestriction, DurationUnit
from config import TOKEN_YANDEX_TOLOKA, YANDEX_TOLOKA_PROJECT_ID
REWARD_PER_ASSIGNMENT = 0.01
TASKS_PER_TASK_SUITE = 20
TRAINING_PASSING_SKILL_VALUE = 90
OVERLAP = 20
TYPE_ORIGINAL = 'original'
TYPE_ANALOG = 'analog'
TYPE_RANDOM = 'random'
class YToloka:
client: TolokaClient = None
def get_or_create_pool(self, pool_type: str) -> Pool:
self._check_auth()
project_id = YANDEX_TOLOKA_PROJECT_ID
all_pools = self.client.get_pools(project_id=project_id)
pool = next(filter(lambda p: p.private_comment == pool_type, all_pools), None)
if pool is None:
pool = self.client.create_pool(self._get_pool(project_id, pool_type))
return pool
def upload_tasks(self, pool_type: str, tasks: List[BaseTask]) -> TaskSuiteBatchCreateResult:
self._check_auth()
pool_id = self._get_pool_id(pool_type)
task_suites: List[TaskSuite] = []
n_suites = (len(tasks) + TASKS_PER_TASK_SUITE - 1) // TASKS_PER_TASK_SUITE
for tasks_batch in array_split(tasks, n_suites):
task_suites.append(TaskSuite(
overlap=OVERLAP,
pool_id=pool_id,
tasks=tasks_batch.tolist()
))
return self.client.create_task_suites(task_suites=task_suites)
def upload_tasks_analog(self) -> TaskSuiteBatchCreateResult:
df = pd.read_csv('my_vs_another.csv')
if len(df) == 0:
return TaskSuiteBatchCreateResult()
tasks: List[BaseTask] = []
for _, row in df.iterrows():
tasks.append(BaseTask(input_values={
'image_first': row['cover_generated_my'],
'image_second': row['cover_generated_another'],
'audio_path': row['audio']
}))
return self.upload_tasks(TYPE_ANALOG, tasks)
def upload_tasks_original(self) -> TaskSuiteBatchCreateResult:
df = pd.read_csv('my_vs_original.csv')
if len(df) == 0:
return TaskSuiteBatchCreateResult()
tasks: List[BaseTask] = []
for _, row in df.iterrows():
tasks.append(BaseTask(input_values={
'image_first': row['cover_original'],
'image_second': row['cover_generated'],
'audio_path': row['audio']
}))
return self.upload_tasks(TYPE_ORIGINAL, tasks)
def upload_tasks_random(self) -> TaskSuiteBatchCreateResult:
df = pd.read_csv('my_vs_original.csv')
if len(df) == 0:
return TaskSuiteBatchCreateResult()
covers = df.cover_generated.tolist()
tasks: List[BaseTask] = []
for _, row in df.iterrows():
tasks.append(BaseTask(input_values={
'image_first': row['cover_generated'],
'image_second': random.choice(covers),
'audio_path': row['audio']
}))
return self.upload_tasks(TYPE_RANDOM, tasks)
def stop_pool(self, pool_type: str):
pool_id = self._get_pool_id(pool_type)
task_suites = self.client.get_task_suites(pool_id=pool_id)
for task_suite in task_suites:
if task_suite.remaining_overlap > 0:
self.client.patch_task_suite_overlap_or_min(task_suite.id, overlap=0)
def open_pool(self, pool_type: str):
try:
self.client.open_pool(self._get_pool_id(pool_type))
except Exception as e:
print(e)
pass
@staticmethod
def filter_assigned(tracks: pd.DataFrame, assignments: pd.DataFrame) -> pd.DataFrame:
assigned_set = set(assignments['track_id'].tolist())
unassigned_tracks = []
for i, track in tracks.iterrows():
if track['track_id'] in assigned_set:
unassigned_tracks.append(track)
return
|
pd.DataFrame(unassigned_tracks)
|
pandas.DataFrame
|
import addfips
import os
import pandas as pd
import datetime
ageVariables = {
'DATE': 'date_stamp',
'AGE_RANGE': 'age_group',
'AR_TOTALCASES': 'cnt_confirmed',
'AR_TOTALPERCENT': 'pct_confirmed',
'AR_NEWCASES': 'cnt_confirmed_new',
'AR_NEWPERCENT': 'pct_confirmed_new',
'AR_TOTALDEATHS' : 'cnt_death',
'AR_NEWDEATHS': 'cnt_death_new'
}
countyVariables = {
'DATE': 'date_stamp',
'COUNTY': 'us_county_fips',
'TOTAL_CASES': 'cnt_total',
'NEW_CASES': 'cnt_total_new',
'TOTAL_CONFIRMED': 'cnt_confirmed',
'NEW_CONFIRMED': 'cnt_confirmed_new',
'TOTAL_PROBABLE': 'cnt_probable',
'NEW_PROBABLE': 'cnt_probable_new',
'POS_TESTS': 'cnt_tested_pos',
'NEG_TESTS': 'cnt_tested_neg',
'TOTAL_TESTS': 'cnt_tested',
'NEW_TESTS': 'cnt_tested_new',
'NEW_DEATHS': 'cnt_death_new',
'TOTAL_DEATHS': 'cnt_death',
'NEW_RECOVERED': 'cnt_recovered_new',
'TOTAL_RECOVERED': 'cnt_recovered',
'NEW_ACTIVE': 'cnt_active_new',
'TOTAL_ACTIVE': 'cnt_active',
'NEW_HOSPITALIZED': 'cnt_hospitalized_new',
'TOTAL_HOSPITALIZED': 'cnt_hospitalized',
}
dailyVariables = {
'DATE': 'date_stamp',
'TOTAL_CASES': 'cnt_total',
'NEW_CASES': 'cnt_total_new',
'TOTAL_CONFIRMED': 'cnt_confirmed',
'NEW_CONFIRMED': 'cnt_confirmed_new',
'TOTAL_PROBABLE': 'cnt_probable',
'NEW_PROBABLE': 'cnt_probable_new',
'POS_TESTS': 'cnt_tested_pos',
'NEG_TESTS': 'cnt_tested_neg',
'TOTAL_TESTS': 'cnt_tested',
'NEW_TESTS': 'cnt_tested_new',
'NEW_DEATHS': 'cnt_death_new',
'TOTAL_DEATHS': 'cnt_death',
'NEW_RECOVERED': 'cnt_recovered_new',
'TOTAL_RECOVERED': 'cnt_recovered',
'NEW_ACTIVE': 'cnt_active_new',
'TOTAL_ACTIVE': 'cnt_active',
'NEW_HOSP': 'cnt_hospitalized_new',
'TOTAL_HOSP': 'cnt_hospitalized',
}
raceEthSexVariables = {
'Date': 'date_stamp',
'Category': 'category_type',
'Cat_Detail': 'category_name',
'CAT_DETAIL': 'category_name',
'Cat_CaseCount': 'cnt_confirmed',
'Cat_Percent': 'pct_confirmed',
'CAT_DEATHCOUNT' : 'cnt_death',
'CAT_DEATHPERCENT': 'pct_death'
}
def cleanAgeData(data):
df = pd.DataFrame(data)
# Rename the file headers
df.rename(ageVariables, axis="columns", inplace=True)
# Reformat dates
df['date_stamp'] = pd.to_datetime(df['date_stamp'], format='%m-%d-%y')
# Code age ranges
df['age_group'] = df['age_group'].map({ '0-10 years':'00', '11-20 years': '11', '21-30 years': '21', '31-40 years': '31', '41-50 years': '41', '51-60 years': '51', '61-70 years': '61', '71-80 years': '71', '81+ years': '81', 'Pending': '99' })
# multiply the percentages by 100
df['pct_confirmed'] = df['pct_confirmed'].apply(lambda x: round(x*100,4))
df['pct_confirmed_new'] = df['pct_confirmed_new'].apply(lambda x: round(x*100, 4))
#cast count variables to integers
df['cnt_death'] = df['cnt_death'].astype(pd.Int32Dtype())
df['cnt_death_new'] = df['cnt_death_new'].astype(pd.Int32Dtype())
df['cnt_confirmed'] = df['cnt_confirmed'].astype(pd.Int32Dtype())
# reorder so that the cnt and new are always next to each other in the same order
df = df[['date_stamp', 'age_group', 'cnt_confirmed', 'cnt_confirmed_new', 'pct_confirmed', 'pct_confirmed_new', 'cnt_death', 'cnt_death_new']]
# order the records by date
df = df.sort_values(by=['date_stamp','age_group'], ascending=True)
return df
def cleanCountyData(data):
df = pd.DataFrame(data)
# Rename the file headers
df.rename(countyVariables, axis="columns", inplace=True)
# Reformat dates
df['date_stamp'] = pd.to_datetime(df['date_stamp'], format='%m-%d-%y')
# Copy original county value to keep the pending and out of state values
df['tn_covid_geo'] = df['us_county_fips']
# Change county name to fips code
af = addfips.AddFIPS()
fips = []
for key, value in df['us_county_fips'].items():
fips.append(af.get_county_fips(value, 'Tennessee'))
df['us_county_fips'] = fips
# Copy appropriate fips codes to covid geo
df.loc[(df['tn_covid_geo'] != 'Pending') & (df['tn_covid_geo'] != 'Out of State'), 'tn_covid_geo'] = df['us_county_fips']
df.loc[df['tn_covid_geo'] == 'Pending', 'tn_covid_geo'] = '47PEN'
df.loc[df['tn_covid_geo'] == 'Out of State', 'tn_covid_geo'] = '47OOS'
# format as Integers a none
df['cnt_total'] = df['cnt_total'].astype(pd.Int32Dtype())
df['cnt_total_new'] = df['cnt_total_new'].astype(pd.Int32Dtype())
df['cnt_confirmed'] = df['cnt_confirmed'].astype(pd.Int32Dtype())
df['cnt_confirmed_new'] = df['cnt_confirmed_new'].astype(pd.Int32Dtype())
if 'cnt_probable' in df.columns:
df['cnt_probable'] = df['cnt_probable'].astype(pd.Int32Dtype())
df['cnt_probable_new'] = df['cnt_probable_new'].astype(pd.Int32Dtype())
df['cnt_tested_pos'] = df['cnt_tested_pos'].astype(pd.Int32Dtype())
df['cnt_tested_neg'] = df['cnt_tested_neg'].astype(pd.Int32Dtype())
df['cnt_tested'] = df['cnt_tested'].astype(pd.Int32Dtype())
df['cnt_tested_new'] = df['cnt_tested_new'].astype(pd.Int32Dtype())
df['cnt_death_new'] = df['cnt_death_new'].astype(pd.Int32Dtype())
df['cnt_death'] = df['cnt_death'].astype(pd.Int32Dtype())
df['cnt_recovered_new'] = df['cnt_recovered_new'].astype(pd.Int32Dtype())
df['cnt_recovered'] = df['cnt_recovered'].astype(pd.Int32Dtype())
df['cnt_active_new'] = df['cnt_active_new'].astype(pd.Int32Dtype())
df['cnt_active'] = df['cnt_active'].astype(pd.Int32Dtype())
df['cnt_hospitalized_new'] = df['cnt_hospitalized_new'].astype(pd.Int32Dtype())
df['cnt_hospitalized'] = df['cnt_hospitalized'].astype(pd.Int32Dtype())
# reorder so that the total and new are always next to each other in the same order
if 'cnt_probable' in df.columns:
df = df[['date_stamp', 'us_county_fips', 'tn_covid_geo', 'cnt_total', 'cnt_total_new', 'cnt_confirmed', 'cnt_confirmed_new', 'cnt_probable', 'cnt_probable_new', 'cnt_active', 'cnt_active_new', 'cnt_hospitalized', 'cnt_hospitalized_new', 'cnt_recovered', 'cnt_recovered_new', 'cnt_death', 'cnt_death_new', 'cnt_tested_pos', 'cnt_tested_neg', 'cnt_tested', 'cnt_tested_new']]
else:
df = df[['date_stamp', 'us_county_fips', 'tn_covid_geo', 'cnt_total', 'cnt_total_new', 'cnt_confirmed', 'cnt_confirmed_new', 'cnt_active', 'cnt_active_new', 'cnt_hospitalized', 'cnt_hospitalized_new', 'cnt_recovered', 'cnt_recovered_new', 'cnt_death', 'cnt_death_new', 'cnt_tested_pos', 'cnt_tested_neg', 'cnt_tested', 'cnt_tested_new']]
# order the records by date
df = df.sort_values(by='date_stamp', ascending=True)
return df
def cleanDailyData(data):
df = pd.DataFrame(data)
# Rename the file headers
df.rename(dailyVariables, axis="columns", inplace=True)
# Reformat dates
df['date_stamp'] = pd.to_datetime(df['date_stamp'], format='%m-%d-%y')
# format as Integers a none
df['cnt_total'] = df['cnt_total'].astype(pd.Int32Dtype())
df['cnt_total_new'] = df['cnt_total_new'].astype(pd.Int32Dtype())
df['cnt_confirmed'] = df['cnt_confirmed'].astype(pd.Int32Dtype())
df['cnt_confirmed_new'] = df['cnt_confirmed_new'].astype(pd.Int32Dtype())
if 'cnt_probable' in df.columns:
df['cnt_probable'] = df['cnt_probable'].astype(pd.Int32Dtype())
df['cnt_probable_new'] = df['cnt_probable_new'].astype(pd.Int32Dtype())
df['cnt_tested_pos'] = df['cnt_tested_pos'].astype(pd.Int32Dtype())
df['cnt_tested_neg'] = df['cnt_tested_neg'].astype(pd.Int32Dtype())
df['cnt_tested'] = df['cnt_tested'].astype(
|
pd.Int32Dtype()
|
pandas.Int32Dtype
|
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(
|
StringIO(data)
|
pandas.compat.StringIO
|
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from datetime import datetime, timedelta # 记录outputs_time,记录循环用时
# matplotlib.use('Agg'
from IPython import display
#%matplotlib inline
import torch
from pprint import pprint
import itertools
from pathlib import Path
from stable_baselines3 import A2C
import sys, os
import hashlib
from finrl.neo_finrl.preprocessor.yahoodownloader import YahooDownloader
from finrl.neo_finrl.preprocessor.preprocessors import FeatureEngineer
#%load_ext autoreload
#%autoreload 2
mpl.rcParams.update({"font.size": 16})
# import sys
# sys.path.append("../FinRL-Library")
# import sys,os
# sys.path.append(os.path.dirname(os.path.realpath(".")))
import yfinance as yf
DATASETS_FULL_PATH = [
"dow_full.csv",
"nas_full.csv",
"sp_full.csv",
]
def data_split(df, start, end):
"""
split the dataset into training or testing using date
:param data: (df) pandas dataframe, start, end
:return: (df) pandas dataframe
"""
data = df[(df.date >= start) & (df.date < end)]
data = data.sort_values(["date", "tic"], ignore_index=True)
data.index = data.date.factorize()[0]
return data
def preprocess(
dataset_dir,
market_id,
start_date,
end_date,
ticker_list,
train_start,
train_end,
val_start,
val_end,
test_start,
test_end,
tech_indicators,
cache_dir,
):
ticker_list.sort()
encoder = hashlib.sha256()
encoder.update("_".join(list(ticker_list)).encode())
encoder.update("_".join(list(tech_indicators)).encode())
cache_path = cache_dir/ f"data_{market_id}_{start_date}_{end_date}_{encoder.hexdigest()}.csv"
# 缓存原始数据
if os.path.exists(cache_path):
processed_full =
|
pd.read_csv(cache_path)
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
from pyalgotrade import strategy
from pyalgotrade.technical import ma
from pyalgotrade.technical import cross, highlow
from pyalgotrade import technical
from pyalgotrade.technical import vwap
from pyalgotrade.stratanalyzer import sharpe
from pandas import DataFrame
# from compiler.ast import flatten
import numpy as np
class SMACrossOver(strategy.BacktestingStrategy):
def __init__(self, feed, instrument, smaPeriod):
strategy.BacktestingStrategy.__init__(self, feed)
self.__instrument = instrument
self.__position = None
# We'll use adjusted close values instead of regular close values.
self.setUseAdjustedValues(False)
self.__prices = feed[instrument].getPriceDataSeries()
self.__sma = ma.SMA(self.__prices, smaPeriod)
def getSMA(self):
return self.__sma
def onEnterCanceled(self, position):
self.__position = None
def onExitOk(self, position):
self.__position = None
def onExitCanceled(self, position):
# If the exit was canceled, re-submit it.
self.__position.exitMarket()
def onBars(self, bars):
# If a position was not opened, check if we should enter a long position.
if self.__position is None:
if cross.cross_above(self.__prices, self.__sma) > 0:
shares = int(self.getBroker().getCash() * 0.9 / bars[self.__instrument].getPrice())
# Enter a buy market order. The order is good till canceled.
self.__position = self.enterLong(self.__instrument, shares, True)
# Check if we have to exit the position.
elif not self.__position.exitActive() and cross.cross_below(self.__prices, self.__sma) > 0:
self.__position.exitMarket()
class VWAPMomentum(strategy.BacktestingStrategy):
def __init__(self, feed, instrument, vwapWindowSize, threshold):
strategy.BacktestingStrategy.__init__(self, feed)
self.__instrument = instrument
self.__feed = feed
self.__threshold = threshold
self.__vwap = {}
for element in instrument:
self.__vwap[element] = vwap.VWAP(feed[element], vwapWindowSize)
self.__notional = 0
self.__count = 0
self.__info = DataFrame(columns={'date', 'id', 'action', 'instrument', 'quantity', 'price'}) # 交易记录信息
self.__info_matrix = []
# 手动增加日志信息,以获取数据,备网页显示,先单个交易的信息,多个交易暂时未写,从order中获取
def addInfo(self, order):
__date = order.getSubmitDateTime() # 时间
__action = order.getAction() # 动作
__id = order.getId() # 订单号
__instrument = order.getInstrument()
__quantity = order.getQuantity() # 数量
__price = order.getAvgFillPrice()
self.__info_matrix.append([__date, __id, __action, __instrument, __quantity, __price])
# 有多重实现方式和存储方式,考虑到组合数据,最终选用dataFrame且ID默认,因为或存在一日多单
def getInfo(self):
_matrix = np.array(self.__info_matrix).reshape((len(self.__info_matrix), 6))
return DataFrame(
{'date': _matrix[:, 0], 'id': _matrix[:, 1], 'action': _matrix[:, 2], 'instrument': _matrix[:, 3],
'quantity': _matrix[:, 4], 'price': _matrix[:, 5]})
# 对于组合取其并集
def getDateTimeSeries(self, instrument=None):
if instrument is None:
__dateTime = DataFrame()
for element in self.__instrument:
__dateTime = __dateTime.append(self.__feed[element].getPriceDataSeries().getDateTimes())
__dateTime = __dateTime.drop_duplicates([0])
return __dateTime.values # 此时返回的为二维数组
return self.__feed[instrument].getPriceDataSeries().getDateTimes()
def getVWAP(self):
return self.__vwap
def onBars(self, bars):
for element in bars.getInstruments(): # element in self.__instrument这种可能存在部分元素不在的情况
self.__count += 1
vwap = self.__vwap[element][-1]
if vwap is None:
return
shares = self.getBroker().getShares(element)
price = bars[element].getClose()
notional = shares * price
if self.__count < 30:
print(self.__count, element, shares, notional, self.getBroker().getCash(
False), self.getBroker().getCash())
self.__notional = notional # 记录上一次的值
# print vwap,self.__notional
if price > vwap * (1 + self.__threshold) and notional < 1000000:
__order = self.marketOrder(element, 100)
self.addInfo(__order) # 添加交易信息
if (self.__count < 30):
# print "buy %s at ¥%.2f" % (element, price)
self.info("buy %s at ¥%.2f" % (element,price))
elif price < vwap * (1 - self.__threshold) and notional > 0:
__order = self.marketOrder(element, -100)
self.addInfo(__order) # 添加交易信息
if (self.__count < 30):
# print "sell %s at ¥%.2f" % (element, price)
self.info("sell %s at ¥%.2f" % (element,price))
class turtle(strategy.BacktestingStrategy):
def __init__(self, feed, instrument, N1, N2):
strategy.BacktestingStrategy.__init__(self, feed)
self.__instrument = instrument
self.__feed = feed
self.__position = None
self.setUseAdjustedValues(False)
self.__prices = feed[instrument].getPriceDataSeries()
self.__high = highlow.High(self.__prices, N1, 3)
self.__low = highlow.Low(self.__prices, N2, 3)
self._count = 0
self.__info =
|
DataFrame(columns={'date', 'id', 'action', 'instrument', 'quantity', 'price'})
|
pandas.DataFrame
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
from ....config import options, option_context
from ....dataframe import DataFrame
from ....tensor import arange, tensor
from ....tensor.random import rand
from ....tests.core import require_cudf
from ....utils import lazy_import
from ... import eval as mars_eval, cut, qcut
from ...datasource.dataframe import from_pandas as from_pandas_df
from ...datasource.series import from_pandas as from_pandas_series
from ...datasource.index import from_pandas as from_pandas_index
from .. import to_gpu, to_cpu
from ..to_numeric import to_numeric
from ..rebalance import DataFrameRebalance
cudf = lazy_import('cudf', globals=globals())
@require_cudf
def test_to_gpu_execution(setup_gpu):
pdf = pd.DataFrame(np.random.rand(20, 30), index=np.arange(20, 0, -1))
df = from_pandas_df(pdf, chunk_size=(13, 21))
cdf = to_gpu(df)
res = cdf.execute().fetch()
assert isinstance(res, cudf.DataFrame)
pd.testing.assert_frame_equal(res.to_pandas(), pdf)
pseries = pdf.iloc[:, 0]
series = from_pandas_series(pseries)
cseries = series.to_gpu()
res = cseries.execute().fetch()
assert isinstance(res, cudf.Series)
pd.testing.assert_series_equal(res.to_pandas(), pseries)
@require_cudf
def test_to_cpu_execution(setup_gpu):
pdf = pd.DataFrame(np.random.rand(20, 30), index=np.arange(20, 0, -1))
df = from_pandas_df(pdf, chunk_size=(13, 21))
cdf = to_gpu(df)
df2 = to_cpu(cdf)
res = df2.execute().fetch()
assert isinstance(res, pd.DataFrame)
pd.testing.assert_frame_equal(res, pdf)
pseries = pdf.iloc[:, 0]
series = from_pandas_series(pseries, chunk_size=(13, 21))
cseries = to_gpu(series)
series2 = to_cpu(cseries)
res = series2.execute().fetch()
assert isinstance(res, pd.Series)
pd.testing.assert_series_equal(res, pseries)
def test_rechunk_execution(setup):
data = pd.DataFrame(np.random.rand(8, 10))
df = from_pandas_df(pd.DataFrame(data), chunk_size=3)
df2 = df.rechunk((3, 4))
res = df2.execute().fetch()
pd.testing.assert_frame_equal(data, res)
data = pd.DataFrame(np.random.rand(10, 10), index=np.random.randint(-100, 100, size=(10,)),
columns=[np.random.bytes(10) for _ in range(10)])
df = from_pandas_df(data)
df2 = df.rechunk(5)
res = df2.execute().fetch()
pd.testing.assert_frame_equal(data, res)
# test Series rechunk execution.
data = pd.Series(np.random.rand(10,))
series = from_pandas_series(data)
series2 = series.rechunk(3)
res = series2.execute().fetch()
pd.testing.assert_series_equal(data, res)
series2 = series.rechunk(1)
res = series2.execute().fetch()
pd.testing.assert_series_equal(data, res)
# test index rechunk execution
data = pd.Index(np.random.rand(10,))
index = from_pandas_index(data)
index2 = index.rechunk(3)
res = index2.execute().fetch()
pd.testing.assert_index_equal(data, res)
index2 = index.rechunk(1)
res = index2.execute().fetch()
pd.testing.assert_index_equal(data, res)
# test rechunk on mixed typed columns
data = pd.DataFrame({0: [1, 2], 1: [3, 4], 'a': [5, 6]})
df = from_pandas_df(data)
df = df.rechunk((2, 2)).rechunk({1: 3})
res = df.execute().fetch()
pd.testing.assert_frame_equal(data, res)
def test_series_map_execution(setup):
raw = pd.Series(np.arange(10))
s = from_pandas_series(raw, chunk_size=7)
with pytest.raises(ValueError):
# cannot infer dtype, the inferred is int,
# but actually it is float
# just due to nan
s.map({5: 10})
r = s.map({5: 10}, dtype=float)
result = r.execute().fetch()
expected = raw.map({5: 10})
pd.testing.assert_series_equal(result, expected)
r = s.map({i: 10 + i for i in range(7)}, dtype=float)
result = r.execute().fetch()
expected = raw.map({i: 10 + i for i in range(7)})
pd.testing.assert_series_equal(result, expected)
r = s.map({5: 10}, dtype=float, na_action='ignore')
result = r.execute().fetch()
expected = raw.map({5: 10}, na_action='ignore')
pd.testing.assert_series_equal(result, expected)
# dtype can be inferred
r = s.map({5: 10.})
result = r.execute().fetch()
expected = raw.map({5: 10.})
pd.testing.assert_series_equal(result, expected)
r = s.map(lambda x: x + 1, dtype=int)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1)
pd.testing.assert_series_equal(result, expected)
def f(x: int) -> float:
return x + 1.
# dtype can be inferred for function
r = s.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_series_equal(result, expected)
def f(x: int):
return x + 1.
# dtype can be inferred for function
r = s.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_series_equal(result, expected)
# test arg is a md.Series
raw2 = pd.Series([10], index=[5])
s2 = from_pandas_series(raw2)
r = s.map(s2, dtype=float)
result = r.execute().fetch()
expected = raw.map(raw2)
pd.testing.assert_series_equal(result, expected)
# test arg is a md.Series, and dtype can be inferred
raw2 = pd.Series([10.], index=[5])
s2 = from_pandas_series(raw2)
r = s.map(s2)
result = r.execute().fetch()
expected = raw.map(raw2)
pd.testing.assert_series_equal(result, expected)
# test str
raw = pd.Series(['a', 'b', 'c', 'd'])
s = from_pandas_series(raw, chunk_size=2)
r = s.map({'c': 'e'})
result = r.execute().fetch()
expected = raw.map({'c': 'e'})
pd.testing.assert_series_equal(result, expected)
# test map index
raw = pd.Index(np.random.rand(7))
idx = from_pandas_index(pd.Index(raw), chunk_size=2)
r = idx.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_index_equal(result, expected)
def test_describe_execution(setup):
s_raw = pd.Series(np.random.rand(10))
# test one chunk
series = from_pandas_series(s_raw, chunk_size=10)
r = series.describe()
result = r.execute().fetch()
expected = s_raw.describe()
pd.testing.assert_series_equal(result, expected)
r = series.describe(percentiles=[])
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[])
pd.testing.assert_series_equal(result, expected)
# test multi chunks
series = from_pandas_series(s_raw, chunk_size=3)
r = series.describe()
result = r.execute().fetch()
expected = s_raw.describe()
pd.testing.assert_series_equal(result, expected)
r = series.describe(percentiles=[])
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[])
pd.testing.assert_series_equal(result, expected)
rs = np.random.RandomState(5)
df_raw = pd.DataFrame(rs.rand(10, 4), columns=list('abcd'))
df_raw['e'] = rs.randint(100, size=10)
# test one chunk
df = from_pandas_df(df_raw, chunk_size=10)
r = df.describe()
result = r.execute().fetch()
expected = df_raw.describe()
pd.testing.assert_frame_equal(result, expected)
r = series.describe(percentiles=[], include=np.float64)
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[], include=np.float64)
pd.testing.assert_series_equal(result, expected)
# test multi chunks
df = from_pandas_df(df_raw, chunk_size=3)
r = df.describe()
result = r.execute().fetch()
expected = df_raw.describe()
pd.testing.assert_frame_equal(result, expected)
r = df.describe(percentiles=[], include=np.float64)
result = r.execute().fetch()
expected = df_raw.describe(percentiles=[], include=np.float64)
pd.testing.assert_frame_equal(result, expected)
# test skip percentiles
r = df.describe(percentiles=False, include=np.float64)
result = r.execute().fetch()
expected = df_raw.describe(percentiles=[], include=np.float64)
expected.drop(['50%'], axis=0, inplace=True)
pd.testing.assert_frame_equal(result, expected)
with pytest.raises(ValueError):
df.describe(percentiles=[1.1])
with pytest.raises(ValueError):
# duplicated values
df.describe(percentiles=[0.3, 0.5, 0.3])
# test input dataframe which has unknown shape
df = from_pandas_df(df_raw, chunk_size=3)
df2 = df[df['a'] < 0.5]
r = df2.describe()
result = r.execute().fetch()
expected = df_raw[df_raw['a'] < 0.5].describe()
pd.testing.assert_frame_equal(result, expected)
def test_data_frame_apply_execute(setup):
cols = [chr(ord('A') + i) for i in range(10)]
df_raw = pd.DataFrame(dict((c, [i ** 2 for i in range(20)]) for c in cols))
old_chunk_store_limit = options.chunk_store_limit
try:
options.chunk_store_limit = 20
df = from_pandas_df(df_raw, chunk_size=5)
r = df.apply('ffill')
result = r.execute().fetch()
expected = df_raw.apply('ffill')
pd.testing.assert_frame_equal(result, expected)
r = df.apply(['sum', 'max'])
result = r.execute().fetch()
expected = df_raw.apply(['sum', 'max'])
pd.testing.assert_frame_equal(result, expected)
r = df.apply(np.sqrt)
result = r.execute().fetch()
expected = df_raw.apply(np.sqrt)
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: pd.Series([1, 2]))
result = r.execute().fetch()
expected = df_raw.apply(lambda x: pd.Series([1, 2]))
pd.testing.assert_frame_equal(result, expected)
r = df.apply(np.sum, axis='index')
result = r.execute().fetch()
expected = df_raw.apply(np.sum, axis='index')
pd.testing.assert_series_equal(result, expected)
r = df.apply(np.sum, axis='columns')
result = r.execute().fetch()
expected = df_raw.apply(np.sum, axis='columns')
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: [1, 2], axis=1)
result = r.execute().fetch()
expected = df_raw.apply(lambda x: [1, 2], axis=1)
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
result = r.execute().fetch()
expected = df_raw.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: [1, 2], axis=1, result_type='expand')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: [1, 2], axis=1, result_type='expand')
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: list(range(10)), axis=1, result_type='reduce')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: list(range(10)), axis=1, result_type='reduce')
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: list(range(10)), axis=1, result_type='broadcast')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: list(range(10)), axis=1, result_type='broadcast')
pd.testing.assert_frame_equal(result, expected)
finally:
options.chunk_store_limit = old_chunk_store_limit
def test_series_apply_execute(setup):
idxes = [chr(ord('A') + i) for i in range(20)]
s_raw = pd.Series([i ** 2 for i in range(20)], index=idxes)
series = from_pandas_series(s_raw, chunk_size=5)
r = series.apply('add', args=(1,))
result = r.execute().fetch()
expected = s_raw.apply('add', args=(1,))
pd.testing.assert_series_equal(result, expected)
r = series.apply(['sum', 'max'])
result = r.execute().fetch()
expected = s_raw.apply(['sum', 'max'])
pd.testing.assert_series_equal(result, expected)
r = series.apply(np.sqrt)
result = r.execute().fetch()
expected = s_raw.apply(np.sqrt)
pd.testing.assert_series_equal(result, expected)
r = series.apply('sqrt')
result = r.execute().fetch()
expected = s_raw.apply('sqrt')
pd.testing.assert_series_equal(result, expected)
r = series.apply(lambda x: [x, x + 1], convert_dtype=False)
result = r.execute().fetch()
expected = s_raw.apply(lambda x: [x, x + 1], convert_dtype=False)
pd.testing.assert_series_equal(result, expected)
s_raw2 = pd.Series([np.array([1, 2, 3]), np.array([4, 5, 6])])
series = from_pandas_series(s_raw2)
dtypes = pd.Series([np.dtype(float)] * 3)
r = series.apply(pd.Series, output_type='dataframe',
dtypes=dtypes)
result = r.execute().fetch()
expected = s_raw2.apply(pd.Series)
pd.testing.assert_frame_equal(result, expected)
@pytest.mark.skipif(pa is None, reason='pyarrow not installed')
def test_apply_with_arrow_dtype_execution(setup):
df1 = pd.DataFrame({'a': [1, 2, 1],
'b': ['a', 'b', 'a']})
df = from_pandas_df(df1)
df['b'] = df['b'].astype('Arrow[string]')
r = df.apply(lambda row: str(row[0]) + row[1], axis=1)
result = r.execute().fetch()
expected = df1.apply(lambda row: str(row[0]) + row[1], axis=1)
pd.testing.assert_series_equal(result, expected)
s1 = df1['b']
s = from_pandas_series(s1)
s = s.astype('arrow_string')
r = s.apply(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = s1.apply(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
def test_transform_execute(setup):
cols = [chr(ord('A') + i) for i in range(10)]
df_raw = pd.DataFrame(dict((c, [i ** 2 for i in range(20)]) for c in cols))
idx_vals = [chr(ord('A') + i) for i in range(20)]
s_raw = pd.Series([i ** 2 for i in range(20)], index=idx_vals)
def rename_fn(f, new_name):
f.__name__ = new_name
return f
old_chunk_store_limit = options.chunk_store_limit
try:
options.chunk_store_limit = 20
# DATAFRAME CASES
df = from_pandas_df(df_raw, chunk_size=5)
# test transform scenarios on data frames
r = df.transform(lambda x: list(range(len(x))))
result = r.execute().fetch()
expected = df_raw.transform(lambda x: list(range(len(x))))
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: list(range(len(x))), axis=1)
result = r.execute().fetch()
expected = df_raw.transform(lambda x: list(range(len(x))), axis=1)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(['cumsum', 'cummax', lambda x: x + 1])
result = r.execute().fetch()
expected = df_raw.transform(['cumsum', 'cummax', lambda x: x + 1])
pd.testing.assert_frame_equal(result, expected)
fn_dict = OrderedDict([
('A', 'cumsum'),
('D', ['cumsum', 'cummax']),
('F', lambda x: x + 1),
])
r = df.transform(fn_dict)
result = r.execute().fetch()
expected = df_raw.transform(fn_dict)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.iloc[:-1], _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.iloc[:-1])
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.iloc[:-1], axis=1, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.iloc[:-1], axis=1)
pd.testing.assert_frame_equal(result, expected)
fn_list = [rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1'),
lambda x: x.iloc[:-1].reset_index(drop=True)]
r = df.transform(fn_list, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(fn_list)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.sum(), _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.sum())
pd.testing.assert_series_equal(result, expected)
fn_dict = OrderedDict([
('A', rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1')),
('D', [rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1'),
lambda x: x.iloc[:-1].reset_index(drop=True)]),
('F', lambda x: x.iloc[:-1].reset_index(drop=True)),
])
r = df.transform(fn_dict, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(fn_dict)
pd.testing.assert_frame_equal(result, expected)
# SERIES CASES
series = from_pandas_series(s_raw, chunk_size=5)
# test transform scenarios on series
r = series.transform(lambda x: x + 1)
result = r.execute().fetch()
expected = s_raw.transform(lambda x: x + 1)
pd.testing.assert_series_equal(result, expected)
r = series.transform(['cumsum', lambda x: x + 1])
result = r.execute().fetch()
expected = s_raw.transform(['cumsum', lambda x: x + 1])
pd.testing.assert_frame_equal(result, expected)
# test transform on string dtype
df_raw = pd.DataFrame({'col1': ['str'] * 10, 'col2': ['string'] * 10})
df = from_pandas_df(df_raw, chunk_size=3)
r = df['col1'].transform(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = df_raw['col1'].transform(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
r = df.transform(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = df_raw.transform(lambda x: x + '_suffix')
pd.testing.assert_frame_equal(result, expected)
r = df['col2'].transform(lambda x: x + '_suffix', dtype=np.dtype('str'))
result = r.execute().fetch()
expected = df_raw['col2'].transform(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
finally:
options.chunk_store_limit = old_chunk_store_limit
@pytest.mark.skipif(pa is None, reason='pyarrow not installed')
def test_transform_with_arrow_dtype_execution(setup):
df1 = pd.DataFrame({'a': [1, 2, 1],
'b': ['a', 'b', 'a']})
df = from_pandas_df(df1)
df['b'] = df['b'].astype('Arrow[string]')
r = df.transform({'b': lambda x: x + '_suffix'})
result = r.execute().fetch()
expected = df1.transform({'b': lambda x: x + '_suffix'})
pd.testing.assert_frame_equal(result, expected)
s1 = df1['b']
s = from_pandas_series(s1)
s = s.astype('arrow_string')
r = s.transform(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = s1.transform(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
def test_string_method_execution(setup):
s = pd.Series(['s1,s2', 'ef,', 'dd', np.nan])
s2 = pd.concat([s, s, s])
series = from_pandas_series(s, chunk_size=2)
series2 = from_pandas_series(s2, chunk_size=2)
# test getitem
r = series.str[:3]
result = r.execute().fetch()
expected = s.str[:3]
pd.testing.assert_series_equal(result, expected)
# test split, expand=False
r = series.str.split(',', n=2)
result = r.execute().fetch()
expected = s.str.split(',', n=2)
pd.testing.assert_series_equal(result, expected)
# test split, expand=True
r = series.str.split(',', expand=True, n=1)
result = r.execute().fetch()
expected = s.str.split(',', expand=True, n=1)
pd.testing.assert_frame_equal(result, expected)
# test rsplit
r = series.str.rsplit(',', expand=True, n=1)
result = r.execute().fetch()
expected = s.str.rsplit(',', expand=True, n=1)
pd.testing.assert_frame_equal(result, expected)
# test cat all data
r = series2.str.cat(sep='/', na_rep='e')
result = r.execute().fetch()
expected = s2.str.cat(sep='/', na_rep='e')
assert result == expected
# test cat list
r = series.str.cat(['a', 'b', np.nan, 'c'])
result = r.execute().fetch()
expected = s.str.cat(['a', 'b', np.nan, 'c'])
pd.testing.assert_series_equal(result, expected)
# test cat series
r = series.str.cat(series.str.capitalize(), join='outer')
result = r.execute().fetch()
expected = s.str.cat(s.str.capitalize(), join='outer')
pd.testing.assert_series_equal(result, expected)
# test extractall
r = series.str.extractall(r"(?P<letter>[ab])(?P<digit>\d)")
result = r.execute().fetch()
expected = s.str.extractall(r"(?P<letter>[ab])(?P<digit>\d)")
pd.testing.assert_frame_equal(result, expected)
# test extract, expand=False
r = series.str.extract(r'[ab](\d)', expand=False)
result = r.execute().fetch()
expected = s.str.extract(r'[ab](\d)', expand=False)
pd.testing.assert_series_equal(result, expected)
# test extract, expand=True
r = series.str.extract(r'[ab](\d)', expand=True)
result = r.execute().fetch()
expected = s.str.extract(r'[ab](\d)', expand=True)
pd.testing.assert_frame_equal(result, expected)
def test_datetime_method_execution(setup):
# test datetime
s = pd.Series([pd.Timestamp('2020-1-1'),
pd.Timestamp('2020-2-1'),
np.nan])
series = from_pandas_series(s, chunk_size=2)
r = series.dt.year
result = r.execute().fetch()
expected = s.dt.year
pd.testing.assert_series_equal(result, expected)
r = series.dt.strftime('%m-%d-%Y')
result = r.execute().fetch()
expected = s.dt.strftime('%m-%d-%Y')
pd.testing.assert_series_equal(result, expected)
# test timedelta
s = pd.Series([pd.Timedelta('1 days'),
pd.Timedelta('3 days'),
np.nan])
series = from_pandas_series(s, chunk_size=2)
r = series.dt.days
result = r.execute().fetch()
expected = s.dt.days
pd.testing.assert_series_equal(result, expected)
def test_isin_execution(setup):
# one chunk in multiple chunks
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=10)
sb = from_pandas_series(b, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
# multiple chunk in one chunks
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
sb = from_pandas_series(b, chunk_size=4)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
# multiple chunk in multiple chunks
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
sb = from_pandas_series(b, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = np.array([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
sb = tensor(b, chunk_size=3)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = {2, 1, 9, 3} # set
sa = from_pandas_series(a, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(10, 3)))
df = from_pandas_df(raw, chunk_size=(5, 2))
# set
b = {2, 1, raw[1][0]}
r = df.isin(b)
result = r.execute().fetch()
expected = raw.isin(b)
pd.testing.assert_frame_equal(result, expected)
# mars object
b = tensor([2, 1, raw[1][0]], chunk_size=2)
r = df.isin(b)
result = r.execute().fetch()
expected = raw.isin([2, 1, raw[1][0]])
pd.testing.assert_frame_equal(result, expected)
# dict
b = {1: tensor([2, 1, raw[1][0]], chunk_size=2),
2: [3, 10]}
r = df.isin(b)
result = r.execute().fetch()
expected = raw.isin({1: [2, 1, raw[1][0]], 2: [3, 10]})
pd.testing.assert_frame_equal(result, expected)
def test_cut_execution(setup):
session = setup
rs = np.random.RandomState(0)
raw = rs.random(15) * 1000
s = pd.Series(raw, index=[f'i{i}' for i in range(15)])
bins = [10, 100, 500]
ii = pd.interval_range(10, 500, 3)
labels = ['a', 'b']
t = tensor(raw, chunk_size=4)
series = from_pandas_series(s, chunk_size=4)
iii = from_pandas_index(ii, chunk_size=2)
# cut on Series
r = cut(series, bins)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s, bins))
r, b = cut(series, bins, retbins=True)
r_result = r.execute().fetch()
b_result = b.execute().fetch()
r_expected, b_expected = pd.cut(s, bins, retbins=True)
pd.testing.assert_series_equal(r_result, r_expected)
np.testing.assert_array_equal(b_result, b_expected)
# cut on tensor
r = cut(t, bins)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins)
assert len(result) == len(expected)
for r, e in zip(result, expected):
np.testing.assert_equal(r, e)
# one chunk
r = cut(s, tensor(bins, chunk_size=2), right=False, include_lowest=True)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s, bins, right=False, include_lowest=True))
# test labels
r = cut(t, bins, labels=labels)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins, labels=labels)
assert len(result) == len(expected)
for r, e in zip(result, expected):
np.testing.assert_equal(r, e)
r = cut(t, bins, labels=False)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins, labels=False)
np.testing.assert_array_equal(result, expected)
# test labels which is tensor
labels_t = tensor(['a', 'b'], chunk_size=1)
r = cut(raw, bins, labels=labels_t, include_lowest=True)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins, labels=labels, include_lowest=True)
assert len(result) == len(expected)
for r, e in zip(result, expected):
np.testing.assert_equal(r, e)
# test labels=False
r, b = cut(raw, ii, labels=False, retbins=True)
# result and expected is array whose dtype is CategoricalDtype
r_result, b_result = session.fetch(*session.execute(r, b))
r_expected, b_expected = pd.cut(raw, ii, labels=False, retbins=True)
for r, e in zip(r_result, r_expected):
np.testing.assert_equal(r, e)
pd.testing.assert_index_equal(b_result, b_expected)
# test bins which is md.IntervalIndex
r, b = cut(series, iii, labels=tensor(labels, chunk_size=1), retbins=True)
r_result = r.execute().fetch()
b_result = b.execute().fetch()
r_expected, b_expected = pd.cut(s, ii, labels=labels, retbins=True)
pd.testing.assert_series_equal(r_result, r_expected)
pd.testing.assert_index_equal(b_result, b_expected)
# test duplicates
bins2 = [0, 2, 4, 6, 10, 10]
r, b = cut(s, bins2, labels=False, retbins=True,
right=False, duplicates='drop')
r_result = r.execute().fetch()
b_result = b.execute().fetch()
r_expected, b_expected = pd.cut(s, bins2, labels=False, retbins=True,
right=False, duplicates='drop')
pd.testing.assert_series_equal(r_result, r_expected)
np.testing.assert_array_equal(b_result, b_expected)
# test integer bins
r = cut(series, 3)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s, 3))
r, b = cut(series, 3, right=False, retbins=True)
r_result, b_result = session.fetch(*session.execute(r, b))
r_expected, b_expected = pd.cut(s, 3, right=False, retbins=True)
pd.testing.assert_series_equal(r_result, r_expected)
np.testing.assert_array_equal(b_result, b_expected)
# test min max same
s2 = pd.Series([1.1] * 15)
r = cut(s2, 3)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s2, 3))
# test inf exist
s3 = s2.copy()
s3[-1] = np.inf
with pytest.raises(ValueError):
cut(s3, 3).execute()
def test_transpose_execution(setup):
raw = pd.DataFrame({"a": ['1', '2', '3'], "b": ['5', '-6', '7'], "c": ['1', '2', '3']})
# test 1 chunk
df = from_pandas_df(raw)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
# test multi chunks
df = from_pandas_df(raw, chunk_size=2)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
df = from_pandas_df(raw, chunk_size=2)
result = df.T.execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
# dtypes are varied
raw = pd.DataFrame({"a": [1.1, 2.2, 3.3], "b": [5, -6, 7], "c": [1, 2, 3]})
df = from_pandas_df(raw, chunk_size=2)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
raw = pd.DataFrame({"a": [1.1, 2.2, 3.3], "b": ['5', '-6', '7']})
df = from_pandas_df(raw, chunk_size=2)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
# Transposing from results of other operands
raw = pd.DataFrame(np.arange(0, 100).reshape(10, 10))
df = DataFrame(arange(0, 100, chunk_size=5).reshape(10, 10))
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
df = DataFrame(rand(100, 100, chunk_size=10))
raw = df.to_pandas()
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
def test_to_numeric_execition(setup):
rs = np.random.RandomState(0)
s = pd.Series(rs.randint(5, size=100))
s[rs.randint(100)] = np.nan
# test 1 chunk
series = from_pandas_series(s)
r = to_numeric(series)
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s))
# test multi chunks
series = from_pandas_series(s, chunk_size=20)
r = to_numeric(series)
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s))
# test object dtype
s = pd.Series(['1.0', 2, -3, '2.0'])
series = from_pandas_series(s)
r = to_numeric(series)
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s))
# test errors and downcast
s = pd.Series(['appple', 2, -3, '2.0'])
series = from_pandas_series(s)
r = to_numeric(series, errors='ignore', downcast='signed')
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s, errors='ignore', downcast='signed'))
# test list data
l = ['1.0', 2, -3, '2.0']
r = to_numeric(l)
np.testing.assert_array_equal(r.execute().fetch(),
pd.to_numeric(l))
def test_q_cut_execution(setup):
rs = np.random.RandomState(0)
raw = rs.random(15) * 1000
s = pd.Series(raw, index=[f'i{i}' for i in range(15)])
series = from_pandas_series(s)
r = qcut(series, 3)
result = r.execute().fetch()
expected = pd.qcut(s, 3)
pd.testing.assert_series_equal(result, expected)
r = qcut(s, 3)
result = r.execute().fetch()
expected = pd.qcut(s, 3)
pd.testing.assert_series_equal(result, expected)
series = from_pandas_series(s)
r = qcut(series, [0.3, 0.5, 0.7])
result = r.execute().fetch()
expected = pd.qcut(s, [0.3, 0.5, 0.7])
pd.testing.assert_series_equal(result, expected)
r = qcut(range(5), 3)
result = r.execute().fetch()
expected = pd.qcut(range(5), 3)
assert isinstance(result, type(expected))
pd.testing.assert_series_equal(pd.Series(result),
pd.Series(expected))
r = qcut(range(5), [0.2, 0.5])
result = r.execute().fetch()
expected = pd.qcut(range(5), [0.2, 0.5])
assert isinstance(result, type(expected))
pd.testing.assert_series_equal(pd.Series(result),
pd.Series(expected))
r = qcut(range(5), tensor([0.2, 0.5]))
result = r.execute().fetch()
expected = pd.qcut(range(5), [0.2, 0.5])
assert isinstance(result, type(expected))
pd.testing.assert_series_equal(pd.Series(result),
pd.Series(expected))
def test_shift_execution(setup):
# test dataframe
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(10, 8)),
columns=['col' + str(i + 1) for i in range(8)])
df = from_pandas_df(raw, chunk_size=5)
for periods in (2, -2, 6, -6):
for axis in (0, 1):
for fill_value in (None, 0, 1.):
r = df.shift(periods=periods, axis=axis,
fill_value=fill_value)
try:
result = r.execute().fetch()
expected = raw.shift(periods=periods, axis=axis,
fill_value=fill_value)
pd.testing.assert_frame_equal(result, expected, check_dtype=False)
except AssertionError as e: # pragma: no cover
raise AssertionError(
f'Failed when periods: {periods}, axis: {axis}, fill_value: {fill_value}'
) from e
raw2 = raw.copy()
raw2.index = pd.date_range('2020-1-1', periods=10)
raw2.columns = pd.date_range('2020-3-1', periods=8)
df2 = from_pandas_df(raw2, chunk_size=5)
# test freq not None
for periods in (2, -2):
for axis in (0, 1):
for fill_value in (None, 0, 1.):
r = df2.shift(periods=periods, freq='D', axis=axis,
fill_value=fill_value)
try:
result = r.execute().fetch()
expected = raw2.shift(periods=periods, freq='D', axis=axis,
fill_value=fill_value)
pd.testing.assert_frame_equal(result, expected)
except AssertionError as e: # pragma: no cover
raise AssertionError(
f'Failed when periods: {periods}, axis: {axis}, fill_value: {fill_value}') from e
# test tshift
r = df2.tshift(periods=1)
result = r.execute().fetch()
expected = raw2.tshift(periods=1)
pd.testing.assert_frame_equal(result, expected)
with pytest.raises(ValueError):
_ = df.tshift(periods=1)
# test series
s = raw.iloc[:, 0]
series = from_pandas_series(s, chunk_size=5)
for periods in (0, 2, -2, 6, -6):
for fill_value in (None, 0, 1.):
r = series.shift(periods=periods, fill_value=fill_value)
try:
result = r.execute().fetch()
expected = s.shift(periods=periods, fill_value=fill_value)
pd.testing.assert_series_equal(result, expected)
except AssertionError as e: # pragma: no cover
raise AssertionError(
f'Failed when periods: {periods}, fill_value: {fill_value}') from e
s2 = raw2.iloc[:, 0]
# test freq not None
series2 = from_pandas_series(s2, chunk_size=5)
for periods in (2, -2):
for fill_value in (None, 0, 1.):
r = series2.shift(periods=periods, freq='D', fill_value=fill_value)
try:
result = r.execute().fetch()
expected = s2.shift(periods=periods, freq='D', fill_value=fill_value)
pd.testing.assert_series_equal(result, expected)
except AssertionError as e: # pragma: no cover
raise AssertionError(
f'Failed when periods: {periods}, fill_value: {fill_value}') from e
def test_diff_execution(setup):
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(10, 8)),
columns=['col' + str(i + 1) for i in range(8)])
raw1 = raw.copy()
raw1['col4'] = raw1['col4'] < 400
r = from_pandas_df(raw1, chunk_size=(10, 5)).diff(-1)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw1.diff(-1))
r = from_pandas_df(raw1, chunk_size=5).diff(-1)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw1.diff(-1))
r = from_pandas_df(raw, chunk_size=(5, 8)).diff(1, axis=1)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.diff(1, axis=1))
r = from_pandas_df(raw, chunk_size=5).diff(1, axis=1)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.diff(1, axis=1), check_dtype=False)
# test series
s = raw.iloc[:, 0]
s1 = s.copy() < 400
r = from_pandas_series(s, chunk_size=10).diff(-1)
pd.testing.assert_series_equal(r.execute().fetch(),
s.diff(-1))
r = from_pandas_series(s, chunk_size=5).diff(-1)
pd.testing.assert_series_equal(r.execute().fetch(),
s.diff(-1))
r = from_pandas_series(s1, chunk_size=5).diff(1)
pd.testing.assert_series_equal(r.execute().fetch(),
s1.diff(1))
def test_value_counts_execution(setup):
rs = np.random.RandomState(0)
s = pd.Series(rs.randint(5, size=100), name='s')
s[rs.randint(100)] = np.nan
# test 1 chunk
series = from_pandas_series(s, chunk_size=100)
r = series.value_counts()
pd.testing.assert_series_equal(r.execute().fetch(),
s.value_counts())
r = series.value_counts(bins=5, normalize=True)
pd.testing.assert_series_equal(r.execute().fetch(),
s.value_counts(bins=5, normalize=True))
# test multi chunks
series = from_pandas_series(s, chunk_size=30)
r = series.value_counts(method='tree')
pd.testing.assert_series_equal(r.execute().fetch(),
s.value_counts())
r = series.value_counts(method='tree', normalize=True)
pd.testing.assert_series_equal(r.execute().fetch(),
s.value_counts(normalize=True))
# test bins and normalize
r = series.value_counts(method='tree', bins=5, normalize=True)
pd.testing.assert_series_equal(r.execute().fetch(),
s.value_counts(bins=5, normalize=True))
def test_astype(setup):
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(20, 8)),
columns=['c' + str(i + 1) for i in range(8)])
# single chunk
df = from_pandas_df(raw)
r = df.astype('int32')
result = r.execute().fetch()
expected = raw.astype('int32')
pd.testing.assert_frame_equal(expected, result)
# multiply chunks
df = from_pandas_df(raw, chunk_size=6)
r = df.astype('int32')
result = r.execute().fetch()
expected = raw.astype('int32')
pd.testing.assert_frame_equal(expected, result)
# dict type
df = from_pandas_df(raw, chunk_size=5)
r = df.astype({'c1': 'int32', 'c2': 'float', 'c8': 'str'})
result = r.execute().fetch()
expected = raw.astype({'c1': 'int32', 'c2': 'float', 'c8': 'str'})
pd.testing.assert_frame_equal(expected, result)
# test arrow_string dtype
df = from_pandas_df(raw, chunk_size=8)
r = df.astype({'c1': 'arrow_string'})
result = r.execute().fetch()
expected = raw.astype({'c1': 'arrow_string'})
pd.testing.assert_frame_equal(expected, result)
# test series
s = pd.Series(rs.randint(5, size=20))
series = from_pandas_series(s)
r = series.astype('int32')
result = r.execute().fetch()
expected = s.astype('int32')
pd.testing.assert_series_equal(result, expected)
series = from_pandas_series(s, chunk_size=6)
r = series.astype('arrow_string')
result = r.execute().fetch()
expected = s.astype('arrow_string')
pd.testing.assert_series_equal(result, expected)
# test index
raw = pd.Index(rs.randint(5, size=20))
mix = from_pandas_index(raw)
r = mix.astype('int32')
result = r.execute().fetch()
expected = raw.astype('int32')
pd.testing.assert_index_equal(result, expected)
# multiply chunks
series = from_pandas_series(s, chunk_size=6)
r = series.astype('str')
result = r.execute().fetch()
expected = s.astype('str')
pd.testing.assert_series_equal(result, expected)
# test category
raw = pd.DataFrame(rs.randint(3, size=(20, 8)),
columns=['c' + str(i + 1) for i in range(8)])
df = from_pandas_df(raw)
r = df.astype('category')
result = r.execute().fetch()
expected = raw.astype('category')
pd.testing.assert_frame_equal(expected, result)
df = from_pandas_df(raw)
r = df.astype({'c1': 'category', 'c8': 'int32', 'c4': 'str'})
result = r.execute().fetch()
expected = raw.astype({'c1': 'category', 'c8': 'int32', 'c4': 'str'})
pd.testing.assert_frame_equal(expected, result)
df = from_pandas_df(raw, chunk_size=5)
r = df.astype('category')
result = r.execute().fetch()
expected = raw.astype('category')
pd.testing.assert_frame_equal(expected, result)
df = from_pandas_df(raw, chunk_size=3)
r = df.astype({'c1': 'category', 'c8': 'int32', 'c4': 'str'})
result = r.execute().fetch()
expected = raw.astype({'c1': 'category', 'c8': 'int32', 'c4': 'str'})
pd.testing.assert_frame_equal(expected, result)
df = from_pandas_df(raw, chunk_size=6)
r = df.astype({'c1': 'category', 'c5': 'float', 'c2': 'int32',
'c7': pd.CategoricalDtype([1, 3, 4, 2]),
'c4': pd.CategoricalDtype([1, 3, 2])})
result = r.execute().fetch()
expected = raw.astype({'c1': 'category', 'c5': 'float', 'c2': 'int32',
'c7':
|
pd.CategoricalDtype([1, 3, 4, 2])
|
pandas.CategoricalDtype
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import random
import requests
from CodOpY.misc import initiate_code_table
def retrieve_kazusa(taxID):
'''Returns a table with codon usage frequencies per 1000 nt from the
Kazusa website.
Parameters
==========
taxID : int or str
taxID can be the NCBI Taxonomy ID of an organism or the latin name (or
part thereof) of an organism.
Returns
=======
pandas.core.frame.DataFrame
A dataframe containing codons, amino acid abbreviations and the
relative usage frequency per 1000 codons for each of the 64 possible
RNA codons.
'''
if type(taxID) == str:
if not taxID.isdecimal():
search_result = requests.get('http://www.kazusa.or.jp/codon/cgi-bin/spsearch.cgi?species=' + taxID.replace(' ','+') + '&c=i')
if '\nNot found\n' in search_result.text:
return 'Search term not found'
result_lines = search_result.text.split('<A')
ids = []
print('Available entries include:\n')
for line in result_lines[:-1]:
if line[1:5] == 'HREF':
idx = line.split('=')[2].split('\"')[0]
species = line.split('<I>')[1].split('</I>')[0]
if idx.isdecimal():
ids.append(idx)
print(species + ': ' + idx)
if len(ids) == 1:
print('Using unique result')
taxID = ids[0]
else:
query = input('\nPlease select a numerical ID')
if query not in ids:
return 'Invalid ID'
else:
taxID = query
base_frame = initiate_code_table()
base_frame['codon'] = base_frame['codon'].str.replace('T','U')
search_result = requests.get('http://www.kazusa.or.jp/codon/cgi-bin/showcodon.cgi?species=' + str(taxID))
if 'Not found' in search_result.text:
return 'Search term not found'
species = search_result.text.split('\n')[7].split('<i>')[1].split(' </i>')[0]
print('\nRetrieving data for ' + species)
if len(search_result.text.split('PRE')) == 1:
print('no codon usage data found for this taxonomy ID')
return
else:
result_table = search_result.text.split('PRE')[1].replace('\n','').replace('>','').replace('</','')
result_table = result_table.split(')')
#retrieve information from lines
codons,frequency = [],[]
for line in result_table:
#remove leading spaces
if len(line)>1:
while line[0] == ' ':
line=line[1:]
codons.append(line[:3])
frequency.append(float(line[line.find(' ')+1:line.find('(')]))
results_frame = pd.DataFrame({'codon':codons,'usage.frequency':frequency})
results_frame = base_frame.merge(results_frame, how = 'outer',on='codon')
return results_frame.sort_values(by='codon').reset_index(drop=True)
#==================================================================================================
def opt_seq(seq,diversify=['K','N','I','H','V','G','D','Y','C','F'],
diversify_range=0.2,ref_table = 'Scer',
optimise_by=['decoding.time',min]):
'''
Makes an optimised DNA sequence corresponding to an input amino acid sequence.
Parameters
==========
seq : str
The amino aid sequence to be otimised
diversify : list of str
A list specifying individual amino acids for which codons should be
diversified.
diversify_range : float
The proportion over which the optimisation parameters that is allowed to
vary for diversified amno acids, relative to 1.
ref_table : str
the name of the data file containing the codon data.
optimise_by : list of str and function
The str part of optimise_by specifies which cloumn of the ref_tble shold be
used for optimisation. Function can be min or max and specifies whether the
highestt or lowest value should be selected for optimisation.
Returns
=======
str
Returns a DNA sequence string.
'''
#prepare package data for use
try:
import importlib.resources as pkg_resources
except ImportError:
# Try backported to PY<37 `importlib_resources`.
import importlib_resources as pkg_resources
from . import Data # relative-import the *package* containing the data
#import the stored data for the dataset in question
try:
with open(Data.__path__[0] + '/' + ref_table + '.csv') as read_file:
parameterset = pd.read_csv(read_file)
except:
raise ValueError('ref_table does not refer to a valid dataset.')
#define the reverse translation dictionary: which codons should be considered for which amino acid?
#if an amino acid is not in the diversify list, use the fastest codon
#if an amino acid is in the diversify list, diversify codon choice by random draw from all codons for which the diversify_range threshold applies
reverse_dict = {}
for aa in parameterset['one.letter'].unique():
codons_for_aa = parameterset.loc[parameterset['one.letter']==aa]
if aa in diversify:
if optimise_by[1] == min:
diversify_threshold = min(codons_for_aa[optimise_by[0]]) * (1 + diversify_range)
acceptable_codons_for_aa = list(codons_for_aa.loc[codons_for_aa['decoding.time']<diversify_threshold]['codon'])
elif optimise_by[1] == max:
diversify_threshold = max(codons_for_aa[optimise_by[0]]) * (1 - diversify_range)
acceptable_codons_for_aa = list(codons_for_aa.loc[codons_for_aa['decoding.time']>diversify_threshold]['codon'])
else:
return
else:
best_decoding_time_for_aa = optimise_by[1](codons_for_aa['decoding.time'])
acceptable_codons_for_aa = list(codons_for_aa.loc[codons_for_aa['decoding.time']==best_decoding_time_for_aa]['codon'])
reverse_dict[aa] = acceptable_codons_for_aa
codon_seq = []
for seq_aa in seq:
codon_seq.append(random.choice(reverse_dict[seq_aa]))
return ''.join(codon_seq).replace('U','T')
#==================================================================================================
def remove_RE(site, test_seq, ref_table = 'Scer',optimise_by=['decoding.time',min],suppress_not_found=False):
'''Removes restriction enzyme sites from DNA sequences without altering the encoded
amino acid sequence and while maintaining codon optimisation as much as possible.
Parameters
==========
site : str
the name of the restriction enzyme for which sites should be removed.
test_seq : str
the sequence from which sites are to be removed.
ref_table : str
the name of the reference table from which the optimisation information
is being used.
optimise_by : list of str and Function
As for opt_seq, the name of the column of ref_table from which
optimisation info is generated, and whether optimal is the minimum or
maximum.
Returns
=======
str
A DNA sequence string.
'''
#prepare package data for use
try:
import importlib.resources as pkg_resources
except ImportError:
# Try backported to PY<37 `importlib_resources`.
import importlib_resources as pkg_resources
from . import Data # relative-import the *package* containing the data
#import the stored data for S cerevisiae
with open(Data.__path__[0] + '/RE_List.csv') as read_file:
RE_ref =
|
pd.read_csv(read_file)
|
pandas.read_csv
|
import pandas as pd
import numpy as np
import lightgbm as lgb
from sklearn.model_selection import KFold
from catboost import CatBoostRegressor
from utils import *
import argparse
from sklearn import preprocessing
import wordbatch
from wordbatch.extractors import WordBag
from wordbatch.models import FM_FTRL
class TargetEncoder:
# Adapted from https://www.kaggle.com/ogrellier/python-target-encoding-for-categorical-features
def __repr__(self):
return 'TargetEncoder'
def __init__(self, cols, smoothing=1, min_samples_leaf=1, noise_level=0, keep_original=False):
self.cols = cols
self.smoothing = smoothing
self.min_samples_leaf = min_samples_leaf
self.noise_level = noise_level
self.keep_original = keep_original
@staticmethod
def add_noise(series, noise_level):
return series * (1 + noise_level * np.random.randn(len(series)))
def encode(self, train, test, target):
for col in self.cols:
if self.keep_original:
train[col + '_te'], test[col + '_te'] = self.encode_column(train[col], test[col], target)
else:
train[col], test[col] = self.encode_column(train[col], test[col], target)
return train, test
def encode_column(self, trn_series, tst_series, target):
temp = pd.concat([trn_series, target], axis=1)
# Compute target mean
averages = temp.groupby(by=trn_series.name)[target.name].agg(["mean", "count"])
# Compute smoothing
smoothing = 1 / (1 + np.exp(-(averages["count"] - self.min_samples_leaf) / self.smoothing))
# Apply average function to all target data
prior = target.mean()
# The bigger the count the less full_avg is taken into account
averages[target.name] = prior * (1 - smoothing) + averages["mean"] * smoothing
averages.drop(['mean', 'count'], axis=1, inplace=True)
# Apply averages to trn and tst series
ft_trn_series = pd.merge(
trn_series.to_frame(trn_series.name),
averages.reset_index().rename(columns={'index': target.name, target.name: 'average'}),
on=trn_series.name,
how='left')['average'].rename(trn_series.name + '_mean').fillna(prior)
# pd.merge does not keep the index so restore it
ft_trn_series.index = trn_series.index
ft_tst_series = pd.merge(
tst_series.to_frame(tst_series.name),
averages.reset_index().rename(columns={'index': target.name, target.name: 'average'}),
on=tst_series.name,
how='left')['average'].rename(trn_series.name + '_mean').fillna(prior)
# pd.merge does not keep the index so restore it
ft_tst_series.index = tst_series.index
return self.add_noise(ft_trn_series, self.noise_level), self.add_noise(ft_tst_series, self.noise_level)
def rmse(y, y0):
assert len(y) == len(y0)
return np.sqrt(np.mean(np.power((y - y0), 2)))
stopwords = {x: 1 for x in stopwords.words('russian')}
non_alphanums = re.compile(u'[^A-Za-z0-9]+')
non_alphanumpunct = re.compile(u'[^A-Za-z0-9\.?!,; \(\)\[\]\'\"\$]+')
RE_PUNCTUATION = '|'.join([re.escape(x) for x in string.punctuation])
train = pd.read_csv('../input/train.csv', index_col = "item_id", parse_dates = ["activation_date"])
test =
|
pd.read_csv('../input/test.csv', index_col = "item_id", parse_dates = ["activation_date"])
|
pandas.read_csv
|
import torch
import torch.nn as nn
from model import WaveletLeTransform
from config.config import get_cfg
from utils import *
from solver import make_lr_scheduler, make_optimizer
from tqdm import tqdm
from torch.utils.data import DataLoader, Subset
from apex import amp
from sklearn.metrics import roc_auc_score, log_loss
import argparse
import os
from datasets.rsna_dataset import RSNAHemorrhageDS3d
from class_weighted_bce_loss import WeightedBCEWithLogitsLoss
import numpy as np
import pandas as pd
from datasets.custom_dataset import IntracranialDataset
import glob
from einops import rearrange
import gc
from torch.utils.tensorboard import SummaryWriter
import sys
from torchvision.utils import make_grid
from torchsampler.weighted_sampler import ImbalancedDatasetSampler as imb
import pandas as pd
from sklearn.utils import shuffle
import warnings
from utils.utils import EarlyStopping, test_time_augmentation
from collections import OrderedDict
from colorama import init, Fore, Back, Style
import matplotlib.pyplot as plt
import json
import torchmetrics
# from torchsampler.imbalanced_sampler import ImbalancedDatasetSampler as imb
warnings.filterwarnings('ignore')
tb = SummaryWriter("runs/wavelet")
metric = torchmetrics.Accuracy(num_classes=6,average='macro')
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, default="",
help="config yaml path")
parser.add_argument("--load", type=str, default="./weights/RSNA_Wavelet_Transformer.pth",
help="path to model weight")
parser.add_argument("--fold", type=int, default=0,
help="fold for validation")
parser.add_argument("-ft", "--finetune", action="store_true",
help="path to model weight")
parser.add_argument("-m", "--mode", type=str, default="train",
help="model running mode (train/valid/test)")
parser.add_argument("--valid", action="store_true",
help="enable evaluation mode for validation", )
parser.add_argument("--test", action="store_true",
help="enable evaluation mode for testset")
parser.add_argument("--tta", action="store_true",
help="enable tta infer")
parser.add_argument("-d", "--debug", action="store_true",
help="enable debug mode for test")
parser.add_argument('-y', '--autocrop', action="store",
dest="autocrop", help="Autocrop", default="T")
parser.add_argument('-s', '--seed', action="store",
dest="seed", help="model seed", default="1234")
parser.add_argument('-p', '--nbags', action="store",
dest="nbags", help="Number of bags for averaging", default="0")
parser.add_argument('-e', '--epochs', action="store",
dest="epochs", help="epochs", default="5")
parser.add_argument('-j', '--start', action="store",
dest="start", help="Start epochs", default="0")
parser.add_argument('-w', '--workpath', action="store",
dest="workpath", help="Working path", default="densenetv1/weights")
parser.add_argument('-f', '--weightsname', action="store",
dest="weightsname", help="Weights file name", default="pytorch_model.bin")
parser.add_argument('-g', '--logmsg', action="store",
dest="logmsg", help="root directory", default="Recursion-pytorch")
parser.add_argument('-c', '--size', action="store",
dest="size", help="model size", default="512")
parser.add_argument('-a', '--infer', action="store",
dest="infer", help="root directory", default="TRN")
parser.add_argument('-z', '--wtsize', action="store",
dest="wtsize", help="model size", default="999")
parser.add_argument('-hf', '--hflip', action="store",
dest="hflip", help="Augmentation - Embedding horizontal flip", default="F")
parser.add_argument('-tp', '--transpose', action="store",
dest="transpose", help="Augmentation - Embedding transpose", default="F")
parser.add_argument('-xg', '--stage2', action="store",
dest="stage2", help="Stage2 embeddings only", default="F")
args = parser.parse_args()
if args.valid:
args.mode = "valid"
elif args.test:
args.mode = "test"
return args
def build_model(cfg):
model = WaveletLeTransform(cfg.MODEL.WL_CHNS, cfg.MODEL.CONV_CHNS, cfg.MODEL.LEVELS)
return model
def dataloader(cfg, df, autocrop, hflip, transpose, class_props, intra_class_props, mode='train'):
DataSet = IntracranialDataset
data_loader = ""
if mode == 'train':
train_img_path = os.path.join(cfg.DIRS.DATA, cfg.DIRS.TRAIN)
train_ds = DataSet(cfg, df, train_img_path, labels=True, AUTOCROP=autocrop, HFLIP=hflip, TRANSPOSE=transpose,
mode="train")
if cfg.DEBUG:
train_ds = Subset(train_ds, np.random.choice(np.arange(len(train_ds)), 500))
data_loader = DataLoader(train_ds, 4, pin_memory=True, shuffle=True, drop_last=False, num_workers=1)
elif mode == 'valid':
valid_img_path = os.path.join(cfg.DIRS.DATA, cfg.DIRS.TRAIN)
valid_ds = DataSet(cfg, df, valid_img_path, labels=True, AUTOCROP=autocrop, HFLIP=hflip, TRANSPOSE=transpose,
mode="valid")
if cfg.DEBUG:
valid_ds = Subset(valid_ds, np.random.choice(np.arange(len(valid_ds)), 500))
data_loader = DataLoader(valid_ds, 4, pin_memory=True, shuffle=False, drop_last=False, num_workers=2)
# test_img_path = os.path.join(cfg.DIRS.DATA,cfg.DIRS.TEST)
# test_ds = DataSet(cfg,test,test_img_path,labels=True,AUTOCROP=autocrop,HFLIP=hflip,TRANSPOSE=transpose, mode="test")
return data_loader
def weighted_multi_label_logloss(criterion, prediction, target, weights):
assert target.size() == prediction.size()
assert weights.shape[0] == target.size(1)
loss = 0
for i in range(target.size(1)):
loss += weights[i] * criterion(prediction[:, i], target[:, i])
return loss
def train_loop(_print, cfg, train_criterion, valid_criterion, valid_criterion_2,class_props, intra_class_props, train, AUTOCROP, HFLIP,
TRANSPOSE, start_epoch, best_metric):
start = time.time()
# Create model
model = build_model(cfg)
scores, fold_data = [], OrderedDict()
device = device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
tl = {}
tc = {}
acc = {}
accuracy,total_correct = 0,0
optimizer = make_optimizer(cfg, model)
#optimizer = torch.optim.AdamW(model.parameters(), lr=0.001)
# CUDA & Mixed Precision
if cfg.SYSTEM.CUDA:
model = model.cuda()
train_criterion = train_criterion.cuda()
'''
if cfg.SYSTEM.FP16:
model, optimizer = amp.initialize(models=model, optimizers=optimizer,
opt_level=cfg.SYSTEM.OPT_L,
keep_batchnorm_fp32=(True if cfg.SYSTEM.OPT_L == "O2" else None))
'''
valdf = train[train['fold'] <2].reset_index(drop=True)
trndf = train[train['fold'] >1].reset_index(drop=True)
# shuffle the train df
trndf = shuffle(trndf, random_state=48)
eno=0
# split trndf into 6 parts
#train_csvs = np.array_split(trndf, 3)
# reset the index of each dataframe
#train_csvs = [df.reset_index() for df in train_csvs]
valid_loader = dataloader(cfg, valdf, AUTOCROP, HFLIP, TRANSPOSE, class_props, intra_class_props, mode="valid")
train_loader = dataloader(cfg, trndf, AUTOCROP, HFLIP, TRANSPOSE, class_props, intra_class_props,mode="train")
#early_stopping = EarlyStopping(patience=3, verbose=True, file_path=f"{cfg.EXP}.pth")
#scheduler = make_lr_scheduler(cfg, optimizer, train_loader)
#scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=cfg.TRAIN.EPOCHS)
weights = np.array([2, 1, 1, 1, 1, 1])
weights_norm = weights / weights.sum()
#if torch.cuda.is_available():
#model, optimizer = amp.initialize(model, optimizer, opt_level="O1")
#for k, train in enumerate(train_csvs):
train_loss, valid_loss, valid_metric = {}, {}, {}
#train_loader = dataloader(cfg, train, AUTOCROP, HFLIP, TRANSPOSE, class_props, intra_class_props,
#mode="train")
#scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=cfg.TRAIN.EPOCHS)
scheduler = make_lr_scheduler(cfg, optimizer, train_loader)
# Load checkpoint
try:
if args.load != "":
if os.path.isfile(args.load):
_print(Fore.YELLOW + f"=> loading checkpoint {args.load}")
checkpoint = torch.load(args.load, "cpu")
model.load_state_dict(checkpoint.pop('state_dict'))
if not args.finetune:
_print(Fore.GREEN + "resuming optimizer ...")
optimizer.load_state_dict(checkpoint.pop('optimizer'))
start_epoch, best_metric = checkpoint['epoch']+1, checkpoint['best_metric']
_print(f"=> loaded checkpoint '{args.load}' (epoch {checkpoint['epoch']}, best_metric: {checkpoint['best_metric']})")
#train_loss, valid_loss = checkpoint["train_loss"], checkpoint["valid_loss"]
#valid_metric = checkpoint['valid_metric']
#optimizer.load_state_dict(checkpoint['optimizer'])
#scheduler.load_state_dict(checkpoint['lr_scheduler'])
#start_epoch = checkpoint["epoch"] + 1
#early_stopping.load_state_dict(checkpoint["stopping_params"])
else:
_print(Fore.RED + f"=> no checkpoint found at '{args.load}'")
start_epoch = 0
except ValueError:
_print(Fore.RED + f"=> no checkpoint found at '{args.load}'")
start_epoch = 0
for epoch in range(start_epoch, cfg.TRAIN.EPOCHS):
_print(Fore.GREEN + f"Starting Training.....")
mixed_target = ""
lamb = ""
t_loss = 0
total_loss = 0
targets_all = []
outputs_all = []
t_losses = []
auc_results=""
losses = AverageMeter()
model.train()
tbar = tqdm(train_loader)
torch.cuda.empty_cache()
for x, batch in enumerate(tbar):
image = batch["image"].cuda()
target = batch["labels"].cuda()
image = rearrange(image, 'b w h c->b c w h')
# calculate loss
if cfg.DATA.CUTMIX:
image, target, mixed_target, lamb = cutmix_data(image, target, cfg.DATA.CM_ALPHA)
elif cfg.DATA.MIXUP:
image, target, mixed_target, lamb = mixup_data(image, target, cfg.DATA.CM_ALPHA)
output = model(image)
target, mixed_target = target.type(torch.float32), mixed_target.type(torch.float32)
t_loss = mixup_criterion(train_criterion, output, target, mixed_target, lamb)
with torch.no_grad():
#ids_all.extend(ids)
targets_all.extend(target.cpu().numpy())
outputs_all.extend(torch.sigmoid(output).cpu().numpy())
t_losses.append(t_loss.item())
#outputs_all.append(torch.softmax(outputs, dim=1).cpu().numpy())
#t_loss = weighted_multi_label_logloss(train_criterion,output,target,weights_norm)
#if cfg.SYSTEM.FP16:
#with amp.scale_loss(t_loss, optimizer) as scaled_loss:
#scaled_loss.backward()
#else:
#t_loss.backward()
#total_correct += get_num_correct(output, target)
#accuracy = metric(output.cpu(), target.type(torch.int8).cpu())
del target, mixed_target, lamb, output
gc.collect()
torch.cuda.empty_cache()
# gradient accumulation
t_loss = t_loss / cfg.OPT.GD_STEPS
total_loss+=t_loss.item()
t_loss.backward()
'''
if cfg.SYSTEM.FP16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
'''
if (x + 1) % cfg.OPT.GD_STEPS == 0:
optimizer.step()
scheduler.step()
optimizer.zero_grad()
# record loss
losses.update(t_loss.item() * cfg.OPT.GD_STEPS, image.size(0))
tbar.set_description(Fore.YELLOW + f"Epoch:({epoch+1}/{2}) ==> Train Loss: {losses.avg:.5f} lr: {optimizer.param_groups[-1]['lr']:.6f}")
#tbar.set_description(Fore.YELLOW + "Train loss: %.5f, learning rate: %.6f, \
#Correct Prediction: %s., Accuracy: %.3f" % (losses.avg, optimizer.param_groups[-1]['lr'],
#total_correct,accuracy))
tb.add_scalar(f"Train_Data/Train_Loss_{epoch+1}", t_loss.item(), x)
tb.add_scalar(f"Train_Data/Train_Loss_avg_{epoch+1}", losses.avg, x)
#tb.add_scalar(f"Train_Data/Accuracy_{epoch+1}", accuracy, x)
#tb.add_scalar("Data/Learning Rate", optimizer.param_groups[-1]['lr'], x)
tb.close()
result = {
'targets': np.array(targets_all),
'outputs': np.array(outputs_all),
'loss': np.sum(t_losses) / (x+1),
}
result.update(calc_auc(result['targets'], result['outputs']))
result.update(calc_logloss(result['targets'], result['outputs']))
_print(Fore.GREEN + "Train_loss: %.5f, learning rate: %.6f" % (losses.avg, optimizer.param_groups[-1]['lr']))
_print(Fore.GREEN + f"AUC[auc:{result['auc_micro']}, micro:{result['auc_micro']}, macro:{result['auc_macro']}")
_print(Fore.GREEN + f"Log Loss:{result['logloss']}, Log Loss classes:{np.round(result['logloss_classes'],6)}")
#_print(Fore.GREEN + "Accuracy: %.3f, Total Correct Prediction: %s." % (accuracy, total_correct))
_print(Fore.YELLOW + f"Training Done in {(time.time() - start) // 60} minutes")
start = time.time()
_print(Fore.YELLOW + f"Validation Started....")
auc_score, val_loss = test_time_augmentation(model, valid_loader, cfg, epoch, tb, "Valid",valid_criterion_2,weights_norm,_print,valid_criterion)
_print(Fore.YELLOW + f"Validation Done in {(time.time() - start) // 60} minutes")
scores.append(auc_score)
validation_metric = val_loss.item()
running_loss = total_loss / len(train_loader)
_print(Fore.GREEN + f'Train Loss: {running_loss:.5f}, '
f'Validation Loss: {val_loss:.5f}, '
f'Validation Metric: {validation_metric:.5f}, '
f'ROC AUC: {auc_score:.5f}')
is_best = val_loss < best_metric
best_metric = min(val_loss, best_metric)
_print(Fore.YELLOW + f'Best Metric: {best_metric}')
train_loss[f"train_loss_{epoch}"] = running_loss
valid_loss[f"valid_loss_{epoch}"] = val_loss.item()
valid_metric[f"valid_metric_{epoch}"] = validation_metric
if epoch in [2,5,7,10]:
_print(Fore.YELLOW + 'Saving checkpoint........')
save_checkpoint({
"epoch": epoch,
"arch": cfg.EXP,
"state_dict": model.state_dict(),
"best_metric": best_metric,
"optimizer": optimizer.state_dict(),
}, is_best, root=cfg.DIRS.WEIGHTS, filename=f"{cfg.EXP}.pth")
_print(Fore.YELLOW + 'Checkpoint Saved!')
total_loss,total_correct,accuracy =0,0,0
'''
early_stopping(validation_metric, model, epoch=epoch, args=args, optimizer=optimizer.state_dict(),
train_loss=train_loss, valid_loss=valid_loss, valid_metric=valid_metric,
lr_scheduler=scheduler.state_dict())
if early_stopping.early_stop:
print("Early stopping")
break
'''
_print(f"Best score: {validation_metric:.5f}")
train_loss_keys = list(train_loss.keys())
train_loss_values = list(train_loss.values())
for i in range(len(train_loss_keys)):
fold_data[train_loss_keys[i]]=pd.Series(train_loss_values[i])
valid_loss_keys = list(valid_loss.keys())
valid_loss_values = list(valid_loss.values())
for i in range(len(valid_loss_keys)):
fold_data[valid_loss_keys[i]]=pd.Series(valid_loss_values[i])
valid_metric_keys = list(valid_metric.keys())
valid_metric_values = list(valid_metric.values())
for i in range(len(valid_metric_keys)):
fold_data[valid_metric_keys[i]]=pd.Series(valid_metric_values[i])
train_loss = valid_loss = valid_metric = []
print(fold_data)
metric_values = [v.iloc[-1] for k, v in fold_data.items() if 'valid_metric' in k]
for i, (ll, roc) in enumerate(zip(metric_values, scores)):
print(Fore.GREEN + f'Epoch {i+1} LogLoss: {ll:.4f}, ROC AUC: {roc:.4f}')
print(Fore.CYAN + f'CV mean ROC AUC score: {np.mean(scores):.4f}, std: {np.std(scores):.4f}')
print(Fore.CYAN + f'CV mean Log Loss: {np.mean(metric_values):.4f}, std: {np.std(metric_values):.4f}')
'''
# Make a plot
fold_data = pd.DataFrame(fold_data)
fig, ax = plt.subplots()
plt.title(f"CV Mean score: {np.mean(metric_values):.4f} +/- {np.std(metric_values):.4f}")
valid_curves = fold_data.loc[:, fold_data.columns.str.startswith('valid_loss')]
train_curves = fold_data.loc[:, fold_data.columns.str.startswith('train_loss')]
print(valid_curves,train_curves)
valid_curves.plot(ax=ax, colormap='Blues_r')
train_curves.plot(ax=ax, colormap='Reds_r')
# ax.set_ylim([np.min(train_curves.values), np.max(valid_curves.values)])
ax.tick_params(labelleft=True, labelright=True, left=True, right=True)
plt.savefig(os.path.join(cfg.DIRS.OUTPUTS, f"train_epoch_{eno}.png"))
print("Done in", (time.time() - start) // 60, "minutes")
if valid_loader is not None:
loss = valid_model(_print, cfg, model, valid_loader, valid_criterion,tb)
is_best = loss < best_metric
best_metric = min(loss, best_metric)
tb.add_scalar("Data/valid_Loss",loss.item(),epoch)
_print(f"best Metric: {best_metric}")
save_checkpoint({
"epoch": epoch,
"arch": cfg.EXP,
"state_dict": model.state_dict(),
"best_metric": best_metric,
"optimizer": optimizer.state_dict(),
}, is_best, root=cfg.DIRS.WEIGHTS, filename=f"{cfg.EXP}.pth")
'''
def calc_auc(targets, outputs):
macro = roc_auc_score(np.round(targets), outputs, average='macro')
micro = roc_auc_score(np.round(targets), outputs, average='micro')
return {
'auc': (macro + micro) / 2,
'auc_macro': macro,
'auc_micro': micro,
}
def calc_logloss(targets, outputs, eps=1e-5):
# for RSNA
try:
logloss_classes = [log_loss(np.round(targets[:,i]), np.clip(outputs[:,i], eps, 1-eps)) for i in range(6)]
except ValueError as e:
logloss_classes = [1, 1, 1, 1, 1, 1]
return {
'logloss_classes': logloss_classes,
'logloss': np.average(logloss_classes, weights=[2,1,1,1,1,1]),
}
def write_json(new_data,details,filename='log.json'):
with open(filename,'r+') as file:
# First we load existing data into a dict.
file_data = json.load(file)
# Join new_data with file_data inside emp_details
file_data[details].append(new_data)
# Sets file's current position at offset.
file.seek(0)
# convert back to json.
json.dump(file_data, file, indent = 4)
def valid_model(_print, cfg, model, valid_loader, valid_criterion,tb):
folds = [0, 1, 2, 3, 4]
# switch to evaluate mode
model.eval()
mixed_target = ""
lamb = ""
valid_loss= ""
losses = AverageMeter()
preds = []
targets = []
tbar = tqdm(valid_loader)
with torch.no_grad():
for i, batch in enumerate(tbar):
image = batch['image'].cuda()
target = batch['labels'].cuda()
image = rearrange(image, 'b w h c->b c w h')
'''
if cfg.DATA.CUTMIX:
image, target, mixed_target, lamb = cutmix_data(image, target, cfg.DATA.CM_ALPHA)
elif cfg.DATA.MIXUP:
image, target, mixed_target, lamb = mixup_data(image, target, cfg.DATA.CM_ALPHA)
target, mixed_target = target.type(torch.float32), mixed_target.type(torch.float32)
valid_loss = mixup_criterion(valid_criterion, output, target, mixed_target, lamb)
'''
output = model(image)
preds.append(output.cpu())
targets.append(target.cpu())
preds, targets = torch.cat(preds, 0), torch.cat(targets, 0)
targets = targets.type(torch.float32)
# record loss
loss_tensor = valid_criterion(preds, targets)
val_loss = loss_tensor.sum() / valid_criterion.class_weights.sum()
any_loss = loss_tensor[0]
intraparenchymal_loss = loss_tensor[1]
intraventricular_loss = loss_tensor[2]
subarachnoid_loss = loss_tensor[3]
subdural_loss = loss_tensor[4]
epidural_loss = loss_tensor[5]
_print(
"Val. loss: %.5f - any: %.3f - intraparenchymal: %.3f - intraventricular: %.3f - subarachnoid: %.3f - subdural: %.3f - epidural: %.3f" % (
val_loss, any_loss,
intraparenchymal_loss, intraventricular_loss,
subarachnoid_loss, subdural_loss, epidural_loss))
# record AUC
auc = roc_auc_score(targets[:, 1:].numpy(), preds[:, 1:].numpy(), average=None)
_print(
"Val. AUC - intraparenchymal: %.3f - intraventricular: %.3f - subarachnoid: %.3f - subdural: %.3f - epidural: %.3f" % (
auc[0], auc[1], auc[2], auc[3], auc[4]))
return val_loss
def test_model(_print, cfg, model, test_loader):
# switch to evaluate mode
model.eval()
ids = []
probs = []
tbar = tqdm(test_loader)
with torch.no_grad():
for i, (image, id_code) in enumerate(tbar):
image = image.cuda()
id_code = list(*zip(*id_code))
bsize, seq_len, c, h, w = image.size()
image = image.view(bsize * seq_len, c, h, w)
output = model(image, seq_len)
output = torch.sigmoid(output)
probs.append(output.cpu().numpy())
ids += id_code
probs = np.concatenate(probs, 0)
submit = pd.concat([
|
pd.Series(ids)
|
pandas.Series
|
######################################################################
## DeepBiome
## - Main code
##
## July 10. 2019
## Youngwon (<EMAIL>)
##
## Reference
## - Keras (https://github.com/keras-team/keras)
######################################################################
import os
import sys
import json
import time
import numpy as np
import pandas as pd
import gc
import warnings
warnings.filterwarnings("ignore")
import logging
from sklearn.model_selection import KFold
from . import logging_daily
from . import configuration
from . import loss_and_metric
from . import readers
from . import build_network
from .utils import file_path_fold, argv_parse, taxa_selection_accuracy
import keras.backend as k
import tensorflow as tf
import copy
from ete3 import Tree, faces, AttrFace, TreeStyle, NodeStyle, CircleFace, TextFace, RectFace
import matplotlib.colors as mcolors
pd.set_option('display.float_format', lambda x: '%.03f' % x)
np.set_printoptions(formatter={'float_kind':lambda x: '%.03f' % x})
def deepbiome_train(log, network_info, path_info, number_of_fold=None,
tree_level_list = ['Genus', 'Family', 'Order', 'Class', 'Phylum'],
max_queue_size=10, workers=1, use_multiprocessing=False,
verbose=True):
"""
Function for training the deep neural network with phylogenetic tree weight regularizer.
It uses microbiome abundance data as input and uses the phylogenetic taxonomy to guide the decision of the optimal number of layers and neurons in the deep learning architecture.
Parameters
----------
log (logging instance) :
python logging instance for logging
network_info (dictionary) :
python dictionary with network_information
path_info (dictionary):
python dictionary with path_information
number_of_fold (int):
default=None
tree_level_list (list):
name of each level of the given reference tree weights
default=['Genus', 'Family', 'Order', 'Class', 'Phylum']
max_queue_size (int):
default=10
workers (int):
default=1
use_multiprocessing (boolean):
default=False
verbose (boolean):
show the log if True
default=True
Returns
-------
test_evaluation (numpy array):
numpy array of the evaluation using testset from all fold
train_evaluation (numpy array):
numpy array of the evaluation using training from all fold
network (deepbiome network instance):
deepbiome class instance
Examples
--------
Training the deep neural network with phylogenetic tree weight regularizer.
test_evaluation, train_evaluation, network = deepbiome_train(log, network_info, path_info)
"""
if tf.__version__.startswith('2'):
gpus = tf.config.experimental.get_visible_devices(device_type='GPU')
try: tf.config.experimental.set_memory_growth(gpus, True)
except: pass
else:
config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
### Argument #########################################################################################
model_save_dir = path_info['model_info']['model_dir']
model_path = os.path.join(model_save_dir, path_info['model_info']['weight'])
try:
hist_path = os.path.join(model_save_dir, path_info['model_info']['history'])
is_save_hist = True
except:
is_save_hist = False
try:
warm_start = network_info['training_info']['warm_start'] == 'True'
warm_start_model = network_info['training_info']['warm_start_model']
except:
warm_start = False
# try: save_frequency=int(network_info['training_info']['save_frequency'])
# except: save_frequency=None
### Reader ###########################################################################################
if verbose: log.info('-----------------------------------------------------------------')
reader_class = getattr(readers, network_info['model_info']['reader_class'].strip())
# TODO: fix path_info
reader = reader_class(log, path_info, verbose=verbose)
data_path = path_info['data_info']['data_path']
y_path = '%s/%s'%(data_path, path_info['data_info']['y_path'])
############################################
# Set the cross-validation
try:
idxs = np.array(pd.read_csv(path_info['data_info']['idx_path'])-1, dtype=np.int)
if number_of_fold == None:
number_of_fold = idxs.shape[1]
except:
nsample = pd.read_csv(y_path).shape[0]
if number_of_fold == None:
number_of_fold = nsample
kf = KFold(n_splits=number_of_fold, shuffle=True, random_state=12)
cv_gen = kf.split(range(nsample))
idxs = np.array([train_idx for train_idx, test_idx in cv_gen]).T
############################################
try:
count_path = path_info['data_info']['count_path']
x_list = np.array(pd.read_csv(path_info['data_info']['count_list_path'], header=None).iloc[:,0])
x_path = np.array(['%s/%s'%(count_path, x_list[fold]) for fold in range(x_list.shape[0]) if '.csv' in x_list[fold]])
except:
x_path = np.array(['%s/%s'%(data_path, path_info['data_info']['x_path']) for fold in range(number_of_fold)])
### Simulations #################################################################################
# if is_save_hist: history = []
train_evaluation = []
test_evaluation = []
# train_tot_idxs = []
# test_tot_idxs = []
starttime = time.time()
for fold in range(number_of_fold):
if verbose: log.info('-------%d simulation start!----------------------------------' % (fold+1))
foldstarttime = time.time()
### Read datasets ####################################################################################
reader.read_dataset(x_path[fold], y_path, fold)
try: fold_idxs = idxs[:,fold]
except: fold_idxs = idxs[fold]
x_train, x_test, y_train, y_test = reader.get_dataset(fold_idxs)
num_classes = reader.get_num_classes()
### Bulid network ####################################################################################
if not tf.__version__.startswith('2'): k.set_session(tf.Session(config=config))
if verbose:
log.info('-----------------------------------------------------------------')
log.info('Build network for %d simulation' % (fold+1))
network_class = getattr(build_network, network_info['model_info']['network_class'].strip())
network = network_class(network_info, path_info, log, fold, num_classes=num_classes,
tree_level_list = tree_level_list,
is_covariates=reader.is_covariates, covariate_names = reader.covariate_names, verbose=verbose)
network.model_compile() ## TODO : weight clear only (no recompile)
if warm_start:
network.load_weights(file_path_fold(warm_start_model, fold))
sys.stdout.flush()
### Training #########################################################################################
log.info('-----------------------------------------------------------------')
log.info('%d fold computing start!----------------------------------' % (fold+1))
hist = network.fit(x_train, y_train,
max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing,
model_path=file_path_fold(model_path, fold))
# if is_save_hist: history.append(hist.history)
sys.stdout.flush()
network.save_weights(file_path_fold(model_path, fold))
if verbose: log.debug('Save weight at {}'.format(file_path_fold(model_path, fold)))
if is_save_hist:
network.save_history(file_path_fold(hist_path, fold), hist.history)
if verbose: log.debug('Save history at {}'.format(file_path_fold(hist_path, fold)))
sys.stdout.flush()
# Evaluation
train_eval_res = network.evaluate(x_train, y_train)
train_evaluation.append(train_eval_res)
test_eval_res = network.evaluate(x_test, y_test)
test_evaluation.append(test_eval_res)
if not tf.__version__.startswith('2'): k.clear_session()
if verbose: log.info('Compute time : {}'.format(time.time()-foldstarttime))
if verbose: log.info('%d fold computing end!---------------------------------------------' % (fold+1))
### Summary #########################################################################################
train_evaluation = np.vstack(train_evaluation)
mean = np.mean(train_evaluation, axis=0)
std = np.std(train_evaluation, axis=0)
test_evaluation = np.vstack(test_evaluation)
test_mean = np.mean(test_evaluation, axis=0)
test_std = np.std(test_evaluation, axis=0)
if verbose:
log.info('-----------------------------------------------------------------')
log.info('Train Evaluation : %s' % np.array(['loss']+[metric.strip() for metric in network_info['model_info']['metrics'].split(',')]))
log.info(' mean : %s',mean)
log.info(' std : %s',std)
log.info('-----------------------------------------------------------------')
log.info('Test Evaluation : %s' % np.array(['loss']+[metric.strip() for metric in network_info['model_info']['metrics'].split(',')]))
log.info(' mean : %s',test_mean)
log.info(' std : %s',test_std)
log.info('-----------------------------------------------------------------')
### Save #########################################################################################
np.save(os.path.join(model_save_dir, 'train_%s'%path_info['model_info']['evaluation']),train_evaluation)
np.save(os.path.join(model_save_dir, 'test_%s'%path_info['model_info']['evaluation']),test_evaluation)
# np.save(os.path.join(model_save_dir, path_info['model_info']['train_tot_idxs']), np.array(train_tot_idxs))
# np.save(os.path.join(model_save_dir, path_info['model_info']['test_tot_idxs']), np.array(test_tot_idxs))
### Exit #########################################################################################
gc.collect()
if verbose:
log.info('Total Computing Ended')
log.info('-----------------------------------------------------------------')
return test_evaluation, train_evaluation, network
def deepbiome_test(log, network_info, path_info, number_of_fold=None,
tree_level_list = ['Genus', 'Family', 'Order', 'Class', 'Phylum'],
max_queue_size=10, workers=1, use_multiprocessing=False,
verbose=True):
"""
Function for testing the pretrained deep neural network with phylogenetic tree weight regularizer.
If you use the index file, this function provide the evaluation using test index (index set not included in the index file) for each fold. If not, this function provide the evaluation using the whole samples.
Parameters
----------
log (logging instance) :
python logging instance for logging
network_info (dictionary) :
python dictionary with network_information
path_info (dictionary):
python dictionary with path_information
number_of_fold (int):
If `number_of_fold` is set as `k`, the function will test the model only with first `k` folds.
default=None
tree_level_list (list):
name of each level of the given reference tree weights
default=['Genus', 'Family', 'Order', 'Class', 'Phylum']
max_queue_size (int):
default=10
workers (int):
default=1
use_multiprocessing (boolean):
default=False
verbose (boolean):
show the log if True
default=True
Returns
-------
evaluation (numpy array):
evaluation result using testset as a numpy array with a shape of (number of fold, number of evaluation measures)
Examples
--------
Test the pre-trained deep neural network with phylogenetic tree weight regularizer.
evaluation = deepbiome_test(log, network_info, path_info)
"""
if tf.__version__.startswith('2'):
gpus = tf.config.experimental.get_visible_devices(device_type='GPU')
try: tf.config.experimental.set_memory_growth(gpus, True)
except: pass
else:
config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
### Argument #########################################################################################
model_save_dir = path_info['model_info']['model_dir']
model_path = os.path.join(model_save_dir, path_info['model_info']['weight'])
### Reader ###########################################################################################
if verbose: log.info('-----------------------------------------------------------------')
reader_class = getattr(readers, network_info['model_info']['reader_class'].strip())
reader = reader_class(log, path_info, verbose=verbose)
data_path = path_info['data_info']['data_path']
y_path = '%s/%s'%(data_path, path_info['data_info']['y_path'])
############################################
# Set the cross-validation
try:
idxs = np.array(pd.read_csv(path_info['data_info']['idx_path'])-1, dtype=np.int)
if number_of_fold == None:
number_of_fold = idxs.shape[1]
except:
nsample = pd.read_csv(y_path).shape[0]
if number_of_fold == None:
number_of_fold = pd.read_csv(y_path).shape[1]
try: idxs = np.array([np.arange(nsample) for i in range(number_of_fold)]).T
except: idxs = np.array([np.arange(nsample)]).T
############################################
try:
count_path = path_info['data_info']['count_path']
x_list = np.array(pd.read_csv(path_info['data_info']['count_list_path'], header=None).iloc[:,0])
x_path = np.array(['%s/%s'%(count_path, x_list[fold]) for fold in range(x_list.shape[0]) if '.csv' in x_list[fold]])
except:
x_path = np.array(['%s/%s'%(data_path, path_info['data_info']['x_path']) for fold in range(number_of_fold)])
### Simulations #################################################################################
train_evaluation = []
test_evaluation = []
starttime = time.time()
if verbose: log.info('Test Evaluation : %s' % np.array(['loss']+[metric.strip() for metric in network_info['model_info']['metrics'].split(',')]))
for fold in range(number_of_fold):
if verbose: log.info('-------%d fold test start!----------------------------------' % (fold+1))
foldstarttime = time.time()
### Read datasets ####################################################################################
reader.read_dataset(x_path[fold], y_path, fold)
x_train, x_test, y_train, y_test = reader.get_dataset(idxs[:,fold])
num_classes = reader.get_num_classes()
### Bulid network ####################################################################################
if not tf.__version__.startswith('2'): k.set_session(tf.Session(config=config))
if verbose:
log.info('-----------------------------------------------------------------')
log.info('Build network for %d fold testing' % (fold+1))
network_class = getattr(build_network, network_info['model_info']['network_class'].strip())
network = network_class(network_info, path_info, log, fold, num_classes=num_classes,
tree_level_list = tree_level_list,
is_covariates=reader.is_covariates, covariate_names = reader.covariate_names, verbose=verbose)
network.model_compile() ## TODO : weight clear only (no recompile)
network.load_weights(file_path_fold(model_path, fold), verbose=verbose)
sys.stdout.flush()
### Training #########################################################################################
if verbose:
log.info('-----------------------------------------------------------------')
log.info('%d fold computing start!----------------------------------' % (fold+1))
test_eval_res = network.evaluate(x_test, y_test)
test_evaluation.append(test_eval_res)
if not tf.__version__.startswith('2'): k.clear_session()
if verbose:
log.info('' % test_eval_res)
log.info('Compute time : {}'.format(time.time()-foldstarttime))
log.info('%d fold computing end!---------------------------------------------' % (fold+1))
### Summary #########################################################################################
test_evaluation = np.vstack(test_evaluation)
mean = np.mean(test_evaluation, axis=0)
std = np.std(test_evaluation, axis=0)
gc.collect()
if verbose:
log.info('-----------------------------------------------------------------')
log.info('Test Evaluation : %s' % np.array(['loss']+[metric.strip() for metric in network_info['model_info']['metrics'].split(',')]))
log.info(' mean : %s',mean)
log.info(' std : %s',std)
log.info('-----------------------------------------------------------------')
log.info('Total Computing Ended')
log.info('-----------------------------------------------------------------')
return test_evaluation
def deepbiome_prediction(log, network_info, path_info, num_classes, number_of_fold=None,
change_weight_for_each_fold=False, get_y = False,
tree_level_list = ['Genus', 'Family', 'Order', 'Class', 'Phylum'],
max_queue_size=10, workers=1, use_multiprocessing=False,
verbose=True):
"""
Function for prediction by the pretrained deep neural network with phylogenetic tree weight regularizer.
Parameters
----------
log (logging instance) :
python logging instance for logging
network_info (dictionary) :
python dictionary with network_information
path_info (dictionary):
python dictionary with path_information
num_classes (int):
number of classes for the network. 0 for regression, 1 for binary classificatin.
number_of_fold (int):
1) For the list of input files for repeatitions, the function will predict the output of the first `number_of_fold` repetitions. If `number_of_fold` is None, then the function will predict the output of the whole repetitions.
2) For the one input file for cross-validation, the function will predict the output of the `k`-fold cross validatoin. If `number_of_fold` is None, then the function will predict the output of the LOOCV.
default=None
change_weight_for_each_fold (boolean):
If `True`, weight will be changed for each fold (repetition). For example, if the given weight's name is `weight.h5` then `weight_0.h5` will loaded for the first fold (repetition). If `False`, weight path in the path_info will used for whole prediction. For example, if the given weight's name is `weight_0.h5` then `weight_0.h5` will used for whole fold (repetition).
default=False
get_y (boolean):
If 'True', the function will provide a list of tuples (prediction, true output) as a output.
degault=False
tree_level_list (list):
name of each level of the given reference tree weights
default=['Genus', 'Family', 'Order', 'Class', 'Phylum']
max_queue_size (int):
default=10
workers (int):
default=1
use_multiprocessing (boolean):
default=False
verbose (boolean):
show the log if True
default=True
Returns
-------
prediction (numpy array):
prediction using whole dataset in the data path
Examples
--------
Prediction by the pre-trained deep neural network with phylogenetic tree weight regularizer.
prediction = deepbiome_predictoin(log, network_info, path_info, num_classes)
For LOOCV prediction, we can use this options.
prediction = deepbiome_predictoin(log, network_info, path_info, num_classes, number_of_fold=None, change_weight_for_each_fold=True)
"""
if tf.__version__.startswith('2'):
gpus = tf.config.experimental.get_visible_devices(device_type='GPU')
try: tf.config.experimental.set_memory_growth(gpus, True)
except: pass
else:
config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
### Argument #########################################################################################
model_save_dir = path_info['model_info']['model_dir']
model_path = os.path.join(model_save_dir, path_info['model_info']['weight'])
### Reader ###########################################################################################
if verbose: log.info('-----------------------------------------------------------------')
reader_class = getattr(readers, network_info['model_info']['reader_class'].strip())
reader = reader_class(log, path_info, verbose=verbose)
data_path = path_info['data_info']['data_path']
if get_y: y_path = '%s/%s'%(data_path, path_info['data_info']['y_path'])
############################################################
# Set the cross-validation
try:
idxs = np.array(pd.read_csv(path_info['data_info']['idx_path'])-1, dtype=np.int)
if number_of_fold == None:
number_of_fold = idxs.shape[1]
except:
# TODO: check
if number_of_fold == None:
number_of_fold = 1
idxs = None
############################################################
try:
count_path = path_info['data_info']['count_path']
x_list = np.array(pd.read_csv(path_info['data_info']['count_list_path'], header=None).iloc[:,0])
x_path = np.array(['%s/%s'%(count_path, x_list[fold]) for fold in range(x_list.shape[0]) if '.csv' in x_list[fold]])
except:
x_path = np.array(['%s/%s'%(data_path, path_info['data_info']['x_path']) for fold in range(number_of_fold)])
############################################################
starttime = time.time()
if not tf.__version__.startswith('2'): k.set_session(tf.Session(config=config))
prediction = []
for fold in range(number_of_fold):
if verbose: log.info('-------%d th repeatition prediction start!----------------------------------' % (fold+1))
foldstarttime = time.time()
### Read datasets ####################################################################################
if get_y:
reader.read_dataset(x_path[fold], y_path, fold)
if not np.all(idxs == None):
x_train, x_test, y_train, y_test = reader.get_dataset(idxs[:,fold])
else:
x_test, _, y_test, _ = reader.get_dataset()
else:
reader.read_dataset(x_path[fold], None, fold)
if not np.all(idxs == None):
x_train, x_test = reader.get_input(idxs[:,fold])
else:
x_test, _ = reader.get_input()
### Bulid network ####################################################################################
if verbose: log.info('-----------------------------------------------------------------')
network_class = getattr(build_network, network_info['model_info']['network_class'].strip())
network = network_class(network_info, path_info, log, fold=fold, num_classes=num_classes,
tree_level_list = tree_level_list,
is_covariates=reader.is_covariates, covariate_names = reader.covariate_names, verbose=verbose)
network.model_compile()
if change_weight_for_each_fold:network.load_weights(file_path_fold(model_path, fold), verbose=verbose)
else: network.load_weights(model_path, verbose=verbose)
sys.stdout.flush()
### Training #########################################################################################
if verbose: log.info('-----------------------------------------------------------------')
pred = network.predict(x_test)
if get_y: prediction.append(np.array(list(zip(pred, y_test))))
else: prediction.append(pred)
if verbose: log.info('Compute time : {}'.format(time.time()-foldstarttime))
if verbose: log.info('%d fold computing end!---------------------------------------------' % (fold+1))
### Exit #########################################################################################
if not tf.__version__.startswith('2'): k.clear_session()
prediction = np.array(prediction)
if verbose: log.info('Total Computing Ended')
if verbose: log.info('-----------------------------------------------------------------')
gc.collect()
return prediction
def deepbiome_get_trained_weight(log, network_info, path_info, num_classes, weight_path,
tree_level_list = ['Genus', 'Family', 'Order', 'Class', 'Phylum'],
verbose=True):
"""
Function for prediction by the pretrained deep neural network with phylogenetic tree weight regularizer.
Parameters
----------
log (logging instance) :
python logging instance for logging
network_info (dictionary) :
python dictionary with network_information
path_info (dictionary):
python dictionary with path_information
num_classes (int):
number of classes for the network. 0 for regression, 1 for binary classificatin.
weight_path (string):
path of the model weight
tree_level_list (list):
name of each level of the given reference tree weights
default=['Genus', 'Family', 'Order', 'Class', 'Phylum']
verbose (boolean):
show the log if True
default=True
Returns
-------
list of pandas dataframe:
the trained model's weight
Examples
--------
Trained weight of the deep neural network with phylogenetic tree weight regularizer.
tree_weight_list = deepbiome_get_trained_weight(log, network_info, path_info, num_classes, weight_path)
"""
if tf.__version__.startswith('2'):
gpus = tf.config.experimental.get_visible_devices(device_type='GPU')
try: tf.config.experimental.set_memory_growth(gpus, True)
except: pass
else:
config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
if not tf.__version__.startswith('2'): k.set_session(tf.Session(config=config))
reader_class = getattr(readers, network_info['model_info']['reader_class'].strip())
reader = reader_class(log, path_info, verbose=verbose)
data_path = path_info['data_info']['data_path']
try:
count_path = path_info['data_info']['count_path']
x_list = np.array(pd.read_csv(path_info['data_info']['count_list_path'], header=None).iloc[:,0])
x_path = np.array(['%s/%s'%(count_path, x_list[fold]) for fold in range(x_list.shape[0]) if '.csv' in x_list[fold]])
except:
x_path = np.array(['%s/%s'%(data_path, path_info['data_info']['x_path']) for fold in range(1)])
reader.read_dataset(x_path[0], None, 0)
network_class = getattr(build_network, network_info['model_info']['network_class'].strip())
network = network_class(network_info, path_info, log, fold=0, num_classes=num_classes,
tree_level_list = tree_level_list,
is_covariates=reader.is_covariates, covariate_names = reader.covariate_names,
verbose=verbose)
network.fold = ''
network.load_weights(weight_path, verbose=False)
tree_weight_list = network.get_trained_weight()
if not tf.__version__.startswith('2'): k.clear_session()
if reader.is_covariates:
if len(tree_weight_list[-1].index) - len(reader.covariate_names) > 0:
tree_weight_list[-1].index = list(tree_weight_list[-1].index)[:-len(reader.covariate_names)] + list(reader.covariate_names)
return tree_weight_list
def deepbiome_taxa_selection_performance(log, network_info, path_info, num_classes,
true_tree_weight_list, trained_weight_path_list,
tree_level_list = ['Genus', 'Family', 'Order', 'Class', 'Phylum'],
lvl_category_dict = None,
verbose=True):
"""
Function for prediction by the pretrained deep neural network with phylogenetic tree weight regularizer.
Parameters
----------
log (logging instance) :
python logging instance for logging
network_info (dictionary) :
python dictionary with network_information
path_info (dictionary):
python dictionary with path_information
num_classes (int):
number of classes for the network. 0 for regression, 1 for binary classificatin.
true_tree_weight_list (ndarray):
lists of the true weight information with the shape of (k folds, number of weights)
`true_tree_weight_list[0][0]` is the true weight information between the first and second layers for the first fold. It is a numpy array with the shape of (number of nodes for the first layer, number of nodes for the second layer).
trained_weight_path_list (list):
lists of the path of trained weight for each fold.
tree_level_list (list):
name of each level of the given reference tree weights
default=['Genus', 'Family', 'Order', 'Class', 'Phylum']
verbose (boolean):
show the log if True
default=True
Returns
-------
summary (numpy array):
summary of the taxa selection performance
Examples
--------
The taxa selection performance of the trained deep neural network with phylogenetic tree weight regularizer.
summary = deepbiome_taxa_selection_performance(log, network_info, path_info, num_classes)
"""
if tf.__version__.startswith('2'):
gpus = tf.config.experimental.get_visible_devices(device_type='GPU')
try: tf.config.experimental.set_memory_growth(gpus, True)
except: pass
else:
config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
### Argument #########################################################################################
model_save_dir = path_info['model_info']['model_dir']
model_path = os.path.join(model_save_dir, path_info['model_info']['weight'])
data_path = path_info['data_info']['data_path']
starttime = time.time()
if not tf.__version__.startswith('2'): k.set_session(tf.Session(config=config))
reader_class = getattr(readers, network_info['model_info']['reader_class'].strip())
reader = reader_class(log, path_info, verbose=verbose)
data_path = path_info['data_info']['data_path']
try:
count_path = path_info['data_info']['count_path']
x_list = np.array(pd.read_csv(path_info['data_info']['count_list_path'], header=None).iloc[:,0])
x_path = np.array(['%s/%s'%(count_path, x_list[fold]) for fold in range(x_list.shape[0]) if '.csv' in x_list[fold]])
except:
x_path = np.array(['%s/%s'%(data_path, path_info['data_info']['x_path']) for fold in range(1)])
reader.read_dataset(x_path[0], None, 0)
network_class = getattr(build_network, network_info['model_info']['network_class'].strip())
network = network_class(network_info, path_info, log, fold=0, num_classes=num_classes,
tree_level_list = tree_level_list,
is_covariates=reader.is_covariates, covariate_names = reader.covariate_names,
# lvl_category_dict = lvl_category_dict,
verbose=False)
prediction = []
accuracy_list = []
for fold in range(len(trained_weight_path_list)):
foldstarttime = time.time()
network.load_weights(trained_weight_path_list[fold], verbose=verbose)
tree_weight_list = network.get_trained_weight()
# true_tree_weight_list = network.load_true_tree_weight_list(path_info['data_info']['data_path'])
try:
accuracy_list.append(np.array(taxa_selection_accuracy(tree_weight_list, true_tree_weight_list[fold])))
except:
for tree_level in range(len(tree_weight_list)):
tw = tree_weight_list[tree_level]
row_setdiff = np.setdiff1d(lvl_category_dict[tree_level], tw.index)
if len(row_setdiff) > 0:
for new_row in row_setdiff:
tw = tw.append(pd.Series(0, index=tw.columns, name=new_row))
tree_weight_list[tree_level] = tw.loc[lvl_category_dict[tree_level],:]
if tree_level+1 < len(tree_weight_list):
tw = tree_weight_list[tree_level]
col_setdiff = np.setdiff1d(lvl_category_dict[tree_level+1], tw.columns)
if len(col_setdiff) > 0:
for new_col in col_setdiff:
tw[new_col] = 0
tree_weight_list[tree_level] = tw.loc[:, lvl_category_dict[tree_level+1]]
accuracy_list.append(np.array(taxa_selection_accuracy(tree_weight_list, true_tree_weight_list[fold])))
accuracy_list = np.array(accuracy_list)[:,:,1:]
for fold in range(len(trained_weight_path_list)):
tree_level = []
selected = []
true_node = []
for i in range(accuracy_list.shape[1]):
tree_tw = true_tree_weight_list[fold][i].astype(np.int32)
tree_level.append(network.tree_level_list[i])
selected.append(np.sum(np.sum(tree_tw, axis=1)>0))
true_node.append(tree_tw.shape[0])
taxa_metrics = [ms.strip() for ms in network_info['model_info']['taxa_selection_metrics'].split(',')]
metrics_names = list(np.array([['%s_mean' % ms.capitalize(), '%s_std' % ms.capitalize()] for ms in taxa_metrics]).flatten())
summary =
|
pd.DataFrame(columns=['Model','PhyloTree','No. true taxa', 'No. total taxa'] + metrics_names)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
# @Time :2018/5/7 下午10:39
# @Author :Xuxian
"""
import numpy as np
import pandas as pd
import lightgbm as lgb
import time
def model_train(lgb_train, num_boost_round):
params = {
'learning_rate': 0.03,
'boosting_type': 'gbdt',
'objective': 'regression',
'metric': 'l2',
'sub_feature': 0.7,
'bagging_fraction': 0.7,
'bagging_freq': 1,
'min_data': 85,
'max_depth': 14,
'verbose': -1,
}
def fScore(preds, train_data):
labels = train_data.get_label()
a = np.log1p(preds) - np.log1p(labels)
score = np.power(a, 2)
return 'fScore', score.mean(), False
gbm = lgb.train(params,
lgb_train,
feval=fScore,
valid_sets=[lgb_train],
num_boost_round=num_boost_round,
verbose_eval=10, )
return gbm
def get_feature_and_target(feature, target, test):
train_vid = target[['vid']]
data = pd.merge(feature, train_vid, on='vid')
data = pd.merge(data, target, 'left', on='vid')
# 处理异常值
data = data.drop(data[data['收缩压'].isnull()].index)
data = data.drop(data[data['vid'] == '7685d48685028a006c84070f68854ce1'].index, axis=0)
data = data.drop(data[data['vid'] == 'fa04c8db6d201b9f705a00c3086481b0'].index, axis=0)
data = data.drop(data[data['vid'] == 'bd0322cf42fc6c2932be451e0b54ed02'].index, axis=0)
data = data.drop(data[data['vid'] == 'de82a4130c4907cff4bfb96736674bbc'].index, axis=0)
data = data.drop(data[data['vid'] == 'd9919661f0a45fbcacc4aa2c1119c3d2'].index, axis=0)
data = data.drop(data[data['vid'] == '798d859a63044a8a5addf1f8c528629e'].index, axis=0)
data_feature = data.drop(['vid', '收缩压', '舒张压', '血清甘油三酯', '血清高密度脂蛋白', '血清低密度脂蛋白'], axis=1)
data_target = data[['收缩压', '舒张压', '血清甘油三酯', '血清高密度脂蛋白', '血清低密度脂蛋白']]
test_feature = pd.merge(feature, test[['vid']], on='vid')
return data_feature, data_target, test_feature
def get_result(data_feature, data_target, test_feature, test):
test_vid = test[['vid']]
score =
|
pd.DataFrame()
|
pandas.DataFrame
|
"""Class to process full HydReSGeo dataset.
Note: If IRUtils.py is not available, you need to download it before the
installation of the package into the `hprocessing/` folder:
.. code:: bash
wget -P hprocessing/ https://raw.githubusercontent.com/felixriese/thermal
-image-processing/master/tiprocessing/IRUtils.py
"""
import configparser
import glob
import itertools
import os
import numpy as np
import pandas as pd
from tqdm import tqdm
from .ProcessEnviFile import (ProcessEnviFile, getEnviFile, getEnviHeader,
readEnviHeader)
from .IRUtils import getIRDataFromMultipleZones
class ProcessFullDataset():
"""
Class to process the full HydReSGeo dataset.
Parameters
----------
envi_hdr_filepath : str
Path to envi header file (low resolution)
meas_name : str
Name of measurement
positions_hyp : dict
Dictionary with information of the positions config file for the
hyperspectral camera
positions_lwir : dict
Dictionary with information of the positions config file for the
lwir camera
zone_list : list
List of measurement zones in the image. That does not include the
spectralon (white reference). If a zone needs to be ignored, it needs
to be removed from this list.
lwir_path : str
Path to long-wave infrared (LWIR) data
soilmoisture_filepath : str
Path to soil moisture data
masks : pd.DataFrame or None
Masks for hyperspectral images
soilmode : str
Mode of the soil measurements (e.g. KW33, Lysimeter)
imageshape : tuple, optional (default= (50, 50))
Height and width of the image
time_window_width : int, optional (default=6)
Time window width to match the hyperspectral image to the soil moisture
data. The unit of the time window width is minutes.
hyp_stat_mode : str
Mode for calculating the "mean spectrum" of a hyperspectral image.
Possible values: median, mean, max, max10 (= maximum of the top 10
pixels), std.
hyp_spectralon_factor : float, optional (default=0.95)
Factor of how much solar radiation the spectralon reflects.
verbose : int, optional (default=0)
Controls the verbosity.
Todo
-----
- Add attributes to class docstring.
- Remove self.date and self.time, only use self.datetime. Remove all
unnecessary functions of self.date and self.time.
"""
def __init__(self,
hyp_hdr_path: str,
meas_name: str,
positions_hyp: dict,
positions_lwir: dict,
zone_list: list,
lwir_path: str,
soilmoisture_path: str,
masks: pd.DataFrame,
grid: tuple = (1, 1),
imageshape: tuple = (50, 50),
time_window_width: int = 6,
hyp_stat_mode: str = "median",
hyp_spectralon_factor: float = 0.95,
verbose=0):
"""Initialize ProcessDataset instance."""
self.hyp_hdr_path = hyp_hdr_path
self.meas_name = meas_name
self.positions_hyp = positions_hyp
self.positions_lwir = positions_lwir
self.zone_list = zone_list
self.lwir_path = lwir_path
self.soilmoisture_path = soilmoisture_path
self.masks = masks
self.grid = grid
self.imageshape = imageshape
self.time_window_width = time_window_width
self.hyp_stat_mode = hyp_stat_mode
self.hyp_spectralon_factor = hyp_spectralon_factor
self.verbose = verbose
# get Envi files
self.envi_hdr_highres_path = self.hyp_hdr_path[:-4] + "_highres.hdr"
self.hdr, self.envi_img = getEnviFile(self.hyp_hdr_path)
self.hdr_highres = getEnviHeader(self.envi_hdr_highres_path)
self.date, self.time = readEnviHeader(self.hdr_highres)
# set datetime TODO: remove hard-coded timezone
self.datetime = pd.to_datetime(self.date+" "+self.time+"+02:00",
utc=True)
# read out header file
self.wavelengths = self.hdr_highres["Wavelength"]
self.bbl = self.hdr_highres["bbl"]
# get measurement index
self.index_of_meas = int(np.argwhere(
positions_hyp["measurement"].values == meas_name))
self.mask = None
# improvised solution to translate between zone1-8 to A1-D2
self.zone_dict = {
"A1": "zone1", "A2": "zone2", "B1": "zone3", "B2": "zone4",
"C1": "zone5", "C2": "zone6", "D1": "zone7", "D2": "zone8"}
def process(self) -> pd.DataFrame:
"""
Process a full dataset.
Returns
-------
pd.DataFrame
Dataframe with hyperspectral, LWIR, and soil moisture data for
one image.
"""
# set mask
if self.masks is not None:
mask_index = self.masks.index[
self.masks["measurement"] == self.meas_name].tolist()[0]
if self.index_of_meas != mask_index:
raise IOError(("positions.csv and mask.csv don't have the"
"same sequence of dates."))
self.mask = getMask(
masks=self.masks,
index_of_meas=self.index_of_meas,
imageshape=self.imageshape)
# random check if hyperspectral image is empty
if np.sum(self.envi_img[:, :, 5]) == 0:
if self.verbose:
print("Error: The hyperspectral image is empty.")
return None
# process
envi_processor = ProcessEnviFile(
image=self.envi_img,
wavelengths=self.wavelengths,
bbl=self.bbl,
zone_list=self.zone_list,
positions=self.positions_hyp,
index_of_meas=self.index_of_meas,
mask=self.mask,
grid=self.grid,
stat_mode=self.hyp_stat_mode,
spectralon_factor=self.hyp_spectralon_factor)
df_hyp = envi_processor.getMultipleSpectra()
# add datetime as column
df_hyp["datetime"] = self.datetime
# add soil moisture data
df_hyd = self.getSoilMoistureData()
df_hyd = df_hyd.drop(labels=["zone"], axis=1)
# add IR data
df_lwir = self.getLwirData()
df_lwir = df_lwir.drop(labels=["zone"], axis=1)
return pd.concat([df_hyp, df_hyd, df_lwir], axis=1)
def getSoilMoistureData(self):
"""
Get soil moisture data.
To match the dates of the soil moisture measurements and the
hyperspectral image, the timezones are converted to UTC.
Returns
-------
pd.Dataframe
Dataframe of soil moisture measurements which correspond to the
hyperspectral image of this instance.
Todo
----
- Move the CSV file read out into process-function outside this file
- Add an optional time shift correction between soil moisture data and
the hyperspectral data.
"""
soilmoisture_sensors = getUppermostSoilMoistureSensors()
# read out soil moisture data
df_sm = pd.read_csv(self.soilmoisture_path)
df_sm["timestamp"] = pd.to_datetime(df_sm["timestamp"], utc=True)
sm_dict = {"zone": [], "volSM_vol%": [], "T_C": []}
for i, sensor in enumerate(soilmoisture_sensors["number"]):
# only consider sensors in zone_list
zone = soilmoisture_sensors["zone"].iloc[i]
if self.zone_dict[zone] not in self.zone_list:
continue
# find nearest date
nearest_date, time_delta = findNearestDate(
df_sm[df_sm["sensorID"] == "T"+str(sensor)].timestamp,
self.datetime)
if time_delta > self.time_window_width / 2:
if self.verbose:
print("Warning: Could not find a soil moisture measurement"
"for sensor {0}".format(sensor))
continue
nearest_row = df_sm[(df_sm["sensorID"] == "T"+str(sensor)) &
(df_sm["timestamp"] == nearest_date)]
sm_dict["zone"].append(self.zone_dict[zone])
sm_dict["volSM_vol%"].append(nearest_row["volSM_vol%"].values[0])
sm_dict["T_C"].append(nearest_row["T_C"].values[0])
return pd.DataFrame(sm_dict)
def getLwirData(self):
"""
Get LWIR data from one of the CSV export files.
This function is based on code from another repository by the authors:
https://github.com/felixriese/thermal-image-processing
Parameters
----------
date : str
Date formatted as yyyymmdd, e.g. 20170816
time : str
Time formatted as hh-mm-ss, e.g. 13-31-40.
Returns
-------
pd.DataFrame
IR data of the current datapoint (matched to date and time)
Todo
-----
- Implement grid-wise LWIR data extraction. (For now, only zone-wise
data extraction is implemented.)
"""
# find LWIR file within the correct time window
lwir_datetime_list = []
for csvfile in glob.glob(self.lwir_path+"/ir_export_*.csv"):
csvfile_list = csvfile.split("/")[-1].split("_")
lwir_datetime = pd.to_datetime(
csvfile_list[2]+" "+csvfile_list[5][:-4].replace("-", ":") +
"+02:00", utc=True)
lwir_datetime_list.append(lwir_datetime)
nearest_date, time_delta = findNearestDate(
lwir_datetime_list, self.datetime)
# check if the nearest datetime is close enough
if time_delta > self.time_window_width / 2:
if self.verbose:
print("Warning: Did not find LWIR data.")
return pd.DataFrame({"zone": [np.nan], "mean": [np.nan],
"med": [np.nan], "std": [np.nan]})
# load LWIR CSV file
csvfile = glob.glob(self.lwir_path+"ir_export_" +
nearest_date.strftime("%Y%m%d")+"_*" +
nearest_date.tz_convert("Europe/Berlin").strftime(
"%H-%M-%S")+".csv")[0]
# get data from different zones
df_lwir_original = getIRDataFromMultipleZones(
csvpath=csvfile,
positions=self.positions_lwir.to_dict('list'),
zone_list=self.zone_list)
# The `df_lwir_original` results in one row and column names such as
# "ir_zone1_med". In the next step, one row per zone needs to be
# generated.
lwir_dict = {"zone": [], "mean": [], "med": [], "std": []}
for zone in self.zone_list:
lwir_dict["zone"].append(zone)
lwir_dict["mean"].append(
df_lwir_original["ir_"+str(zone)+"_mean"].values[0])
lwir_dict["med"].append(
df_lwir_original["ir_"+str(zone)+"_med"].values[0])
lwir_dict["std"].append(
df_lwir_original["ir_"+str(zone)+"_std"].values[0])
return pd.DataFrame(lwir_dict)
def getMask(masks, index_of_meas, imageshape=(50, 50)):
"""
Mask image with masks from mask.csv file.
Parameters
----------
masks : pd.DataFrame or None
Masks for hyperspectral images
index_of_meas : int
Index of the measurement in the file
imageshape : tuple, optional (default= (50, 50))
Height and width of the image
Returns
-------
mask : 2D numpy array
Mask in imageshape with 1 (= true value) and 0 (= mask)
"""
mask = np.ones(imageshape, dtype=int)
# define borders
start_row = int(masks["start_row"][index_of_meas])
end_row = int(masks["end_row"][index_of_meas])
start_col = int(masks["start_col"][index_of_meas])
end_col = int(masks["end_col"][index_of_meas])
# mask around borders
mask[:start_row] = 0
mask[end_row:] = 0
mask[:, :start_col] = 0
mask[:, end_col:] = 0
# bar masks
for i in range(1, 5):
wooden_bar = getWoodenBarMask(
[masks["bar"+str(i)+"_p1_x"][index_of_meas],
masks["bar"+str(i)+"_p1_y"][index_of_meas]],
[masks["bar"+str(i)+"_p2_x"][index_of_meas],
masks["bar"+str(i)+"_p2_y"][index_of_meas]],
height=masks["bar"+str(i)+"_height"][index_of_meas],
imageshape=imageshape)
mask[[x[0] for x in wooden_bar], [x[1] for x in wooden_bar]] = 0
return mask
def getWoodenBarMask(point1, point2, height, imageshape=(50, 50)):
"""
Get mask for wooden bar.
Parameters
----------
point1, point2 : list of int
Coordinates of the two points
height : int
Height/width of the bar in y (row) direction
imageshape : tuple, optional (default= (50, 50))
Height and width of the image
Returns
-------
wooden_bar : list of tuple (int, int)
List of pixels to be masked
"""
m1, c1 = getLineFromPoints(point1, point2)
m2, c2 = getLineFromPoints((point1[0] + height, point1[1]),
(point2[0] + height, point2[1]))
def woodenBarUpper(x):
return m1*x + c1
def woodenBarLower(x):
return m2*x + c2
wooden_bar = [(x, y) for (x, y) in itertools.product(
range(imageshape[0]), range(imageshape[1]))
if woodenBarLower(x) < y < woodenBarUpper(x)]
return wooden_bar
def getAllSoilMoistureSensors():
"""
Get information about the soil moisture sensors.
The sensor data is taken from the HydReSGeo dataset. For other datasets,
the dictionary `sensors` has to be modified.
Returns
-------
sensors : dict
Sensor information consisting of number, field, depth, and name.
"""
sensors = {
"number": [36554, 36555, 36556, 36547, 36557, 36558,
36559, 36553, 36549, 36550, 36551, 36552,
36560, 36562, 36563, 36564, 36565, 36561],
"zone": ["A1", "A1", "A1", "A2", "B1", "B1", "B1", "B2", "C1",
"C1", "C1", "C1", "C2", "D1", "D1", "D1", "D1", "D2"],
"depth": [2.5, 5.0, 10.0, 5.0, 2.5, 5.0, 10.0, 5.0, 2.5,
5.0, 10.0, 20.0, 5.0, 2.5, 5.0, 10.0, 20.0, 5.0]}
sensors["name"] = ["SM_" + str(sensors["number"][i]) + "_" +
str(sensors["zone"][i]) + "_" + str(sensors["depth"][i])
for i in range(len(sensors["number"]))]
return sensors
def getUppermostSoilMoistureSensors():
"""
Get information about the soil moisture sensors.
Returns
-------
sensors : dict
Sensor information consisting of number, field, depth, and name.
"""
sensors = pd.DataFrame(getAllSoilMoistureSensors())
df_temp_list = []
for zone in np.unique(sensors["zone"].values):
min_index = sensors[sensors["zone"] == zone]["depth"].values.argmin()
df_temp_list.append(sensors[sensors["zone"] == zone].iloc[min_index])
return pd.concat(df_temp_list, axis=1).T
def findNearestDate(date_list, date):
"""
Find closest datapoint of each uppermost sensor in time window.
Adapted from https://stackoverflow.com/a/32237949/3816498 .
Parameters
----------
date_list : array-like
List of dates
date : datetime
The date, to which the nearest date in `items` should be found.
Returns
-------
nearest_date : datetime
Nearest date to `date` in `date_list`
time_delta : int
Time difference in minutes
"""
nearest_date = min(date_list, key=lambda x: abs(x - date))
time_delta = (nearest_date - date).total_seconds() / 60.
return nearest_date, time_delta
def readConfig(config_path: str,
data_directory: str,
verbose=0) -> dict:
"""
Read config file to process a dataset.
Parameters
----------
config_path : str
Path to config file
data_directory : str
Directory of the dataset folder.
verbose : int, optional (default=0)
Controls the verbosity.
Returns
-------
config_dict : dict
Configuration of the processing
"""
# open config file
config = configparser.ConfigParser(allow_no_value=True)
config.read(config_path)
if verbose:
print("Config file is valid.")
if verbose > 1:
print("Config file sections: {0}.".format(config.sections()))
config_dict = {}
# read out data paths
for var in ["data_hyp", "data_lwir", "data_sm"]:
config_dict[var] = (data_directory + config["Paths"][var])
config_dict["data_output"] = config["Paths"]["data_output"]
# read out positions, ignore-csv-files, and masks
for var in ["positions_hyp", "positions_lwir",
"ignore_hyp_measurements", "ignore_hyp_fields",
"ignore_hyp_datapoints", "masks_hyp"]:
config_dict[var] = pd.read_csv(data_directory + config["Paths"][var],
sep="\s+")
if "measurement" in config_dict[var].columns:
config_dict[var]["measurement"] = config_dict[var][
"measurement"].astype("str")
# read out grid size
config_dict["grid"] = (1, 1)
if (config["Process"]["grid_rows"].isdigit() and
config["Process"]["grid_columns"].isdigit()):
config_dict["grid"] = (int(config["Process"]["grid_rows"]),
int(config["Process"]["grid_columns"]))
# read out image shape
config_dict["imageshape"] = (50, 50)
if (config["Process"]["hyp_image_rows"].isdigit() and
config["Process"]["hyp_image_columns"].isdigit()):
config_dict["imageshape"] = (
int(config["Process"]["hyp_image_rows"]),
int(config["Process"]["hyp_image_columns"]))
# read out booleans
config_dict["overwrite_csv_file"] = config["Process"].getboolean(
"overwrite_csv_file")
# read out time window width
config_dict["time_window_width"] = int(
config["Process"]["time_window_width"])
# read out hyperspectral spectralon factor
config_dict["hyp_spectralon_factor"] = float(
config["Process"]["hyp_spectralon_factor"])
# read out hyperspectral spectralon factor
config_dict["hyp_stat_mode"] = str(
config["Process"]["hyp_stat_mode"])
return config_dict
def getLineFromPoints(point1, point2):
"""
Get line parameter (y = mx +c) from two points.
Parameters
----------
point1, point2 : list of int
Coordinates of the two points
Returns
-------
m, c : float
Line parameters for y = mx +c
"""
# m = (y2 - y1)/(x1 - x2)
m = (point2[1] - point1[1]) / (point2[0] - point1[0])
# c = y2 - m*x2
c = point2[1] - m * point2[0]
return m, c
def processHydReSGeoDataset(config_path: str,
data_directory: str,
verbose=0) -> pd.DataFrame:
"""
Process the full HydReSGeo dataset.
Parameters
----------
config_path : str
Path to config file
data_directory : str
Directory of the dataset folder.
verbose : int, optional (default=0)
Controls the verbosity.
Returns
-------
pd.DataFrame
Output data of the processing
"""
# path to the output folder
config = readConfig(config_path=config_path, data_directory=data_directory)
params = {
"positions_hyp": config["positions_hyp"],
"positions_lwir": config["positions_lwir"],
"lwir_path": config["data_lwir"],
"soilmoisture_path": config["data_sm"],
"masks": config["masks_hyp"],
"grid": config["grid"],
"imageshape": config["imageshape"],
"time_window_width": config["time_window_width"],
"verbose": verbose
}
output_list = []
if (not config["overwrite_csv_file"] and
os.path.isfile(config["data_output"])):
print("Processing not executed, file already exists.")
print("To overwrite the existing file, change the config.")
# loop through hyperspectral images
for _, hyp_header in enumerate(
tqdm(glob.glob(config["data_hyp"]+"*/*[0-9].hdr"))):
meas_name = hyp_header.split("/")[-2].replace("_hyp", "")
file_number = int(hyp_header.split("/")[-1][4:7])
zone_list = ["zone"+str(i) for i in range(1, 9)]
# ignore measurements
if verbose:
print("-"*50)
print("Processing {0} - file {1}...".format(
meas_name, file_number))
if meas_name in config["ignore_hyp_measurements"].values:
if verbose:
print("Ignoring measurement.")
continue
# ignore datapoint
if meas_name in config["ignore_hyp_datapoints"]["measurement"].values:
if file_number in config["ignore_hyp_datapoints"][
config["ignore_hyp_datapoints"]["measurement"] ==
meas_name]["filenumber"].values:
if verbose:
print("Ignoring file.")
continue
# ignore field
if meas_name in config["ignore_hyp_fields"]["measurement"].values:
if file_number in config["ignore_hyp_fields"][
config["ignore_hyp_fields"]["measurement"] ==
meas_name]["filenumber"].values:
zones_to_drop = config["ignore_hyp_fields"][
(config["ignore_hyp_fields"]["measurement"] == meas_name) &
(config["ignore_hyp_fields"]["filenumber"] == file_number)
]["zone"].values
for zone_to_drop in zones_to_drop:
zone_list.remove("zone"+str(zone_to_drop))
if verbose:
print("Removed {0} zone(s).".format(len(zones_to_drop)))
proc = ProcessFullDataset(
hyp_hdr_path=hyp_header,
meas_name=meas_name,
zone_list=zone_list,
**params)
datapoint = proc.process()
if datapoint is not None:
output_list.append(datapoint)
output_df =
|
pd.concat(output_list, axis=0, ignore_index=True)
|
pandas.concat
|
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
"""
BLIS - Balancing Load of Intermittent Solar:
A characteristic-based transient power plant model
Copyright (C) 2020. University of Virginia Licensing & Ventures Group (UVA LVG). All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Hardcoded Inputs:
debug = False # If True, additional information is presented to the console
plotDPI = 300
omitPeriod = 0 # Number of samples to ignore (5 hours to give sufficient start-up time)
threshold = 0.001 # threshold for rounding (MW)
# General Imports:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# BLIS Imports:
from blis import defaultInputs, PowerPlant, Solar, Fuel, Battery, Grid
# ========================================================================
# Class to simulate and analyze Hybrid Renewable Energy System (HRES)
# ========================================================================
# Time Series Data
attributes_time_series = ['PowerRequest', 'PowerOutput', 'PowerRamp', 'HeatInput', 'Efficiency',
'battCharge', 'battIncrease', 'battDecrease', 'battDischargeRate', 'battChargeRate',
'battRamp',
'solarUsed', 'loadShed', 'deficit', 'gridUsed', 'CO2_produced', 'CO2_captured', 'Emissions']
# Results
attributes_results = ['demand_MWh', 'solar_MWh', 'powerOutput_MWh', 'heatInput_MWh', 'solarUsed_MWh', 'loadShed_MWh',
'gridUsed_MWh',
'fuelCost_dollars', 'LCOE', 'efficiency_pct', 'emissions_tons', 'deficit_max', 'deficit_min',
'deficit_pct_time',
'deficit_pct_energy', 'solarCurtail_pct', 'loadShed_pct_energy', 'loadShed_pct_time']
# Add time of day attributes
tod_vars = ['emissions', 'costs', 'demand']
tod_hrs = range(24)
attributes_tod = []
for var in tod_vars:
for hr in tod_hrs:
if hr < 10:
attributes_tod.append(var + '_hr0' + str(hr))
else:
attributes_tod.append(var + '_hr' + str(hr))
attributes_results = attributes_results + attributes_tod
# ========================================================================
# Class to simulate and analyze Hybrid Renewable Energy System (HRES)
# ========================================================================
class HRES:
# ========================================================================
# Initialize HRES Simulation
# ========================================================================
def __init__(self, data, plant, solar=Solar(), batt=Battery(), fuel=Fuel(), grid=Grid(), i=0.02, n=20):
# Store Inputs
self.data = data
self.solar = solar
self.batt = batt
self.fuel = fuel
self.plant = plant
self.grid = grid
self.i = i # (fraction) Interst rate
self.n = n # (years) System lifetime
# Record number of datapoints
self.steps = len(data)
# Create pandas dataframe to hold time series performance
rows = range(self.steps)
self.perf = pd.DataFrame(data=0.0, index=rows, columns=attributes_time_series)
# ----
# Create pandas series to store results
# ----
self.results =
|
pd.Series(index=attributes_results)
|
pandas.Series
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import anchors
from poola import core as pool
from sklearn.metrics import auc
## Reformatting functions
##
def clean_Sanjana_data(df, guide_col='Guide', library = False):
'''
Input: 1. df: Reads dataframe with guide_col and data columns
2. guide_col: Formatted as 'library_guide_gene' (e.g. 'HGLibA_00001_A1BG')
Output: df_clean: Dataframe with columns 'Guide', 'Gene Symbol', 'Reads'
'''
df_clean = df.rename(columns={guide_col:'old_Guide'})
library_list = []
guide_list = []
gene_list = []
for i, row in enumerate(df_clean.loc[:,'old_Guide']):
split_row = row.split('_')
library = split_row[0]
library_list.append(library)
guide = split_row[1]
guide_list.append(guide)
gene = split_row[2]
gene_list.append(gene)
df_clean['Library'] =
|
pd.Series(library_list)
|
pandas.Series
|
"""
See also: test_reindex.py:TestReindexSetIndex
"""
from datetime import (
datetime,
timedelta,
)
import numpy as np
import pytest
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
date_range,
period_range,
to_datetime,
)
import pandas._testing as tm
class TestSetIndex:
def test_set_index_multiindex(self):
# segfault in GH#3308
d = {"t1": [2, 2.5, 3], "t2": [4, 5, 6]}
df = DataFrame(d)
tuples = [(0, 1), (0, 2), (1, 2)]
df["tuples"] = tuples
index = MultiIndex.from_tuples(df["tuples"])
# it works!
df.set_index(index)
def test_set_index_empty_column(self):
# GH#1971
df = DataFrame(
[
{"a": 1, "p": 0},
{"a": 2, "m": 10},
{"a": 3, "m": 11, "p": 20},
{"a": 4, "m": 12, "p": 21},
],
columns=["a", "m", "p", "x"],
)
result = df.set_index(["a", "x"])
expected = df[["m", "p"]]
expected.index = MultiIndex.from_arrays([df["a"], df["x"]], names=["a", "x"])
tm.assert_frame_equal(result, expected)
def test_set_index_empty_dataframe(self):
# GH#38419
df1 = DataFrame(
{"a": Series(dtype="datetime64[ns]"), "b": Series(dtype="int64"), "c": []}
)
df2 = df1.set_index(["a", "b"])
result = df2.index.to_frame().dtypes
expected = df1[["a", "b"]].dtypes
tm.assert_series_equal(result, expected)
def test_set_index_multiindexcolumns(self):
columns = MultiIndex.from_tuples([("foo", 1), ("foo", 2), ("bar", 1)])
df = DataFrame(np.random.randn(3, 3), columns=columns)
result = df.set_index(df.columns[0])
expected = df.iloc[:, 1:]
expected.index = df.iloc[:, 0].values
expected.index.names = [df.columns[0]]
tm.assert_frame_equal(result, expected)
def test_set_index_timezone(self):
# GH#12358
# tz-aware Series should retain the tz
idx = DatetimeIndex(["2014-01-01 10:10:10"], tz="UTC").tz_convert("Europe/Rome")
df = DataFrame({"A": idx})
assert df.set_index(idx).index[0].hour == 11
assert DatetimeIndex(Series(df.A))[0].hour == 11
assert df.set_index(df.A).index[0].hour == 11
def test_set_index_cast_datetimeindex(self):
df = DataFrame(
{
"A": [datetime(2000, 1, 1) + timedelta(i) for i in range(1000)],
"B": np.random.randn(1000),
}
)
idf = df.set_index("A")
assert isinstance(idf.index, DatetimeIndex)
def test_set_index_dst(self):
di = date_range("2006-10-29 00:00:00", periods=3, freq="H", tz="US/Pacific")
df = DataFrame(data={"a": [0, 1, 2], "b": [3, 4, 5]}, index=di).reset_index()
# single level
res = df.set_index("index")
exp = DataFrame(
data={"a": [0, 1, 2], "b": [3, 4, 5]},
index=Index(di, name="index"),
)
exp.index = exp.index._with_freq(None)
tm.assert_frame_equal(res, exp)
# GH#12920
res = df.set_index(["index", "a"])
exp_index = MultiIndex.from_arrays([di, [0, 1, 2]], names=["index", "a"])
exp = DataFrame({"b": [3, 4, 5]}, index=exp_index)
tm.assert_frame_equal(res, exp)
def test_set_index(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df = df.set_index(idx)
tm.assert_index_equal(df.index, idx)
with pytest.raises(ValueError, match="Length mismatch"):
df.set_index(idx[::2])
def test_set_index_names(self):
df = tm.makeDataFrame()
df.index.name = "name"
assert df.set_index(df.index).index.names == ["name"]
mi = MultiIndex.from_arrays(df[["A", "B"]].T.values, names=["A", "B"])
mi2 = MultiIndex.from_arrays(
df[["A", "B", "A", "B"]].T.values, names=["A", "B", "C", "D"]
)
df = df.set_index(["A", "B"])
assert df.set_index(df.index).index.names == ["A", "B"]
# Check that set_index isn't converting a MultiIndex into an Index
assert isinstance(df.set_index(df.index).index, MultiIndex)
# Check actual equality
tm.assert_index_equal(df.set_index(df.index).index, mi)
idx2 = df.index.rename(["C", "D"])
# Check that [MultiIndex, MultiIndex] yields a MultiIndex rather
# than a pair of tuples
assert isinstance(df.set_index([df.index, idx2]).index, MultiIndex)
# Check equality
tm.assert_index_equal(df.set_index([df.index, idx2]).index, mi2)
def test_set_index_cast(self):
# issue casting an index then set_index
df = DataFrame(
{"A": [1.1, 2.2, 3.3], "B": [5.0, 6.1, 7.2]}, index=[2010, 2011, 2012]
)
df2 = df.set_index(df.index.astype(np.int32))
tm.assert_frame_equal(df, df2)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_drop_inplace(self, frame_of_index_cols, drop, inplace, keys):
df = frame_of_index_cols
if isinstance(keys, list):
idx = MultiIndex.from_arrays([df[x] for x in keys], names=keys)
else:
idx = Index(df[keys], name=keys)
expected = df.drop(keys, axis=1) if drop else df
expected.index = idx
if inplace:
result = df.copy()
return_value = result.set_index(keys, drop=drop, inplace=True)
assert return_value is None
else:
result = df.set_index(keys, drop=drop)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_append(self, frame_of_index_cols, drop, keys):
df = frame_of_index_cols
keys = keys if isinstance(keys, list) else [keys]
idx = MultiIndex.from_arrays(
[df.index] + [df[x] for x in keys], names=[None] + keys
)
expected = df.drop(keys, axis=1) if drop else df.copy()
expected.index = idx
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_append_to_multiindex(self, frame_of_index_cols, drop, keys):
# append to existing multiindex
df = frame_of_index_cols.set_index(["D"], drop=drop, append=True)
keys = keys if isinstance(keys, list) else [keys]
expected = frame_of_index_cols.set_index(["D"] + keys, drop=drop, append=True)
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
def test_set_index_after_mutation(self):
# GH#1590
df = DataFrame({"val": [0, 1, 2], "key": ["a", "b", "c"]})
expected = DataFrame({"val": [1, 2]}, Index(["b", "c"], name="key"))
df2 = df.loc[df.index.map(lambda indx: indx >= 1)]
result = df2.set_index("key")
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# Add list-of-list constructor because list is ambiguous -> lambda
# also test index name if append=True (name is duplicate here for B)
@pytest.mark.parametrize(
"box",
[
Series,
Index,
np.array,
list,
lambda x: [list(x)],
lambda x: MultiIndex.from_arrays([x]),
],
)
@pytest.mark.parametrize(
"append, index_name", [(True, None), (True, "B"), (True, "test"), (False, None)]
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_single_array(
self, frame_of_index_cols, drop, append, index_name, box
):
df = frame_of_index_cols
df.index.name = index_name
key = box(df["B"])
if box == list:
# list of strings gets interpreted as list of keys
msg = "['one', 'two', 'three', 'one', 'two']"
with pytest.raises(KeyError, match=msg):
df.set_index(key, drop=drop, append=append)
else:
# np.array/list-of-list "forget" the name of B
name_mi = getattr(key, "names", None)
name = [getattr(key, "name", None)] if name_mi is None else name_mi
result = df.set_index(key, drop=drop, append=append)
# only valid column keys are dropped
# since B is always passed as array above, nothing is dropped
expected = df.set_index(["B"], drop=False, append=append)
expected.index.names = [index_name] + name if append else name
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# also test index name if append=True (name is duplicate here for A & B)
@pytest.mark.parametrize(
"box", [Series, Index, np.array, list, lambda x: MultiIndex.from_arrays([x])]
)
@pytest.mark.parametrize(
"append, index_name",
[(True, None), (True, "A"), (True, "B"), (True, "test"), (False, None)],
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_arrays(
self, frame_of_index_cols, drop, append, index_name, box
):
df = frame_of_index_cols
df.index.name = index_name
keys = ["A", box(df["B"])]
# np.array/list "forget" the name of B
names = ["A", None if box in [np.array, list, tuple, iter] else "B"]
result = df.set_index(keys, drop=drop, append=append)
# only valid column keys are dropped
# since B is always passed as array above, only A is dropped, if at all
expected = df.set_index(["A", "B"], drop=False, append=append)
expected = expected.drop("A", axis=1) if drop else expected
expected.index.names = [index_name] + names if append else names
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# We also emulate a "constructor" for the label -> lambda
# also test index name if append=True (name is duplicate here for A)
@pytest.mark.parametrize(
"box2",
[
Series,
Index,
np.array,
list,
iter,
lambda x: MultiIndex.from_arrays([x]),
lambda x: x.name,
],
)
@pytest.mark.parametrize(
"box1",
[
Series,
Index,
np.array,
list,
iter,
lambda x: MultiIndex.from_arrays([x]),
lambda x: x.name,
],
)
@pytest.mark.parametrize(
"append, index_name", [(True, None), (True, "A"), (True, "test"), (False, None)]
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_arrays_duplicate(
self, frame_of_index_cols, drop, append, index_name, box1, box2
):
df = frame_of_index_cols
df.index.name = index_name
keys = [box1(df["A"]), box2(df["A"])]
result = df.set_index(keys, drop=drop, append=append)
# if either box is iter, it has been consumed; re-read
keys = [box1(df["A"]), box2(df["A"])]
# need to adapt first drop for case that both keys are 'A' --
# cannot drop the same column twice;
# plain == would give ambiguous Boolean error for containers
first_drop = (
False
if (
isinstance(keys[0], str)
and keys[0] == "A"
and isinstance(keys[1], str)
and keys[1] == "A"
)
else drop
)
# to test against already-tested behaviour, we add sequentially,
# hence second append always True; must wrap keys in list, otherwise
# box = list would be interpreted as keys
expected = df.set_index([keys[0]], drop=first_drop, append=append)
expected = expected.set_index([keys[1]], drop=drop, append=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_multiindex(self, frame_of_index_cols, drop, append):
df = frame_of_index_cols
keys = MultiIndex.from_arrays([df["A"], df["B"]], names=["A", "B"])
result = df.set_index(keys, drop=drop, append=append)
# setting with a MultiIndex will never drop columns
expected = df.set_index(["A", "B"], drop=False, append=append)
tm.assert_frame_equal(result, expected)
def test_construction_with_categorical_index(self):
ci =
|
tm.makeCategoricalIndex(10)
|
pandas._testing.makeCategoricalIndex
|
import plotly
import json
import requests
import pandas as pd
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from io import StringIO
#Swiching pandas ploting backend to plotly
pd.options.plotting.backend = "plotly"
class globalvars_class:
def __init__(self):
self.covid_df = self.covid_pd_make()
self.covid_positive = self.covid_positive_frame()
self.database_date = self.updated_to()
self.daily_covid_positive = self.covid_positive.groupby(self.covid_positive.index).sum()
self.daily_covid_positive_saved = self.make_daily_covid_positive_saved()
self.saved_ma_list= []
def make_daily_covid_positive_saved(self):
temp = self.daily_covid_positive.copy(deep=False)
temp.rename(columns={'corona_result':'Covid-19 Positive results'}, inplace=True)
return temp
#Reading and manipulating the Covid-19 result lab data from csv
def covid_pd_make(self):
csv_lab = pd.read_csv("corona_lab_tests_ver_0090.csv")
csv_lab.index = pd.DatetimeIndex(csv_lab["result_date"])
csv_lab.drop(["result_date"], axis=1, inplace=True)
csv_lab["corona_result"][csv_lab["corona_result"] == "שלילי"] = 0
csv_lab["corona_result"][csv_lab["corona_result"] == "חיובי"] = 1
csv_lab["is_first_Test"][csv_lab["is_first_Test"] == "Yes"] = 1
csv_lab["is_first_Test"][csv_lab["is_first_Test"] == "No"] = 0
return csv_lab
'''In this function we are reading and making database from info.data.gov.il about the corona test lab result
From the api we getting the result in json and inside the json string in csv format.
used query parameters: limit,get_total,resource_id,total_records,records_format
read more how to execute api quary :https://docs.ckan.org/en/latest/maintaining/datastore.html#ckanext.datastore.logic.action.datastore_search
'''
def covid_pd_make_gov_api(self):
# gov api resource code to corona lab test results
resource_id = 'dcf999c1-d394-4b57-a5e0-9d014a62e046'
limit = 0
get_total = "True"
records_format ="csv"
#here we get by quary the total records of covids_test results
url = "https://data.gov.il/api/3/action/datastore_search?resource_id={}&include_total={}&limit={}".format(resource_id,get_total, limit)
response = requests.get(url)
response_json = response.json()
total_records = response_json['result']["total"]
#here we genererate all the values from the json csv
url = "https://data.gov.il/api/3/action/datastore_search?resource_id={}&limit={}&records_format={}".format(
resource_id, total_records,records_format)
response = requests.get(url)
response_json = response.json()
records = response_json['result']['records']
#Making csv readable for pandas dataframe
data = StringIO(records)
df = pd.read_csv(data, sep=",", names=["_id", "test_date", "result_date", "corona_result", "lab_id",
"test_for_corona_diagnosis", "is_first_Test"])
## here we cuts and manipuilating the data
df.drop(["_id"], axis=1, inplace=True)
#result_date is the only persist date colnum so we set it as datetime index
df.index =
|
pd.DatetimeIndex(df["result_date"])
|
pandas.DatetimeIndex
|
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 6 15:41:19 2019
@Author: <NAME>, <NAME>
@Institution: CBDD Group, Xiangya School of Pharmaceutical Science, CSU, China
@Homepage: http://www.scbdd.com
@Mail: <EMAIL>; <EMAIL>
@Blog: https://blog.moyule.me
♥I love <NAME> forever♥
"""
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn.metrics import auc
from load import load
from matplotlib.ticker import MultipleLocator
class Enrichment(object):
def __init__(self, loadfile, label_col, Ascore_col, Dscore_col):
self.loadfile = loadfile
self.df = pd.DataFrame()
self.length = 0
self.hit_all = 0
self.scores = pd.Series()
self.score_col = Ascore_col + Dscore_col
self.Ascore = Ascore_col
self.Dscore = Dscore_col
self.label_col = label_col
def Load(self):
data = load(self.loadfile)
self.df = data
# self.df = abs(self.df)
self.length = len(self.df)
self.hit_all = len(self.df[self.df[self.label_col]==1])
self.scorers =
|
pd.Series(self.score_col)
|
pandas.Series
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Library to execute a exploratory data analysis (EDA). It is an approach to
analyzing data sets to summarize their main characteristics, often with visual
methods. Primarily EDA is for seeing what the data can tell us beyond the
formal modeling or hypothesis testing task.
@author: ucaiado
Created on 10/20/2016
"""
###########################################
# Suppress matplotlib user warnings
# Necessary for newer version of matplotlib
try:
import warnings
from IPython import get_ipython
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
from collections import defaultdict
import json
import numpy as np
import pandas as pd
import StringIO
warnings.filterwarnings('ignore', category=UserWarning,
module='matplotlib')
# Display inline matplotlib plots with IPython
get_ipython().run_line_magic('matplotlib', 'inline')
# aesthetics
sns.set_palette('deep', desat=.6)
sns.set_context(rc={'figure.figsize': (8, 4)})
sns.set_style('whitegrid')
sns.set_palette(sns.color_palette('Set2', 10))
# loading style sheet
get_ipython().run_cell('from IPython.core.display import HTML')
get_ipython().run_cell('HTML(open("ipython_style.css").read())')
except:
pass
###########################################
'''
Begin help functions
'''
def func_estimator(x):
'''
pseudo estimator to be used by poinplot
'''
return x[0]
'''
End help functions
'''
def read_logs(i_desired_trial, s_fname):
'''
Return a dictionary with information for the passed log file and trial and
the number of trades in the main instrument of the strategy
:param i_desired_trial: integer. the trial ID to colect data
:param s_fname: string. the name of the log file analized
'''
with open(s_fname) as fr:
# initiate the returned dictionary ans other control variables
d_rtn = {'pnl': defaultdict(dict),
'position': defaultdict(dict),
'duration': defaultdict(dict),
'mid': defaultdict(dict)}
f_reward = 0.
f_count_step = 0
last_reward = 0.
i_trial = 0
i_trades = 0
for idx, row in enumerate(fr):
if row == '\n':
continue
# extract desired information
# count the number of trials
if ' New Trial will start!' in row:
i_trial += 1
f_count_step = 0
f_reward = 0
elif '.update():' in row and i_trial == i_desired_trial:
s_aux = row.strip().split(';')[1]
s_x = row.split('time = ')[1].split(',')[0]
s_date_all = s_x
s_x = s_date_all[:-7]
s_date = s_x
ts_date_all = pd.to_datetime(s_date_all)
ts_date = pd.to_datetime(s_date)
last_reward = float(s_aux.split('reward = ')[1].split(',')[0])
f_duration = float(s_aux.split('duration = ')[1].split(',')[0])
f_reward += last_reward
f_count_step += 1.
# extract some data
d_rtn['duration'][i_trial][ts_date_all] = f_duration
if ', position = ' in s_aux:
s_pos = s_aux.split(', position = ')[1].split('}')[0][1:]
s_pos = s_pos.replace("'", "")
l_pos = [(a.strip(), float(b)) for a, b in
[s.split(': ')for s in s_pos.split(',')]]
d_rtn['position'][i_trial][ts_date_all] = dict(l_pos)
if ', pnl = ' in s_aux:
s_action = s_aux.split(', pnl = ')[1].split(',')[0]
f_aux = float(s_action)
d_rtn['pnl'][i_trial][ts_date_all] = f_aux
if 'crossed_prices' in s_aux or 'correction_by_trade' in s_aux:
i_trades += 1
if ', inputs = ' in s_aux:
s_mid = s_aux.split(', inputs = ')[1].split("{'midPrice':")
s_mid = s_mid[1][1:].split('}}')[0]
s_mid = s_mid.replace("'", "")[1:]
l_pos = [(a.strip(), float(b)) for a, b in
[s.split(': ')for s in s_mid.split(',')]]
d_rtn['mid'][i_trial][ts_date_all] = dict(l_pos)
# finish the loop as soon as the trial is analyzed
if i_trial > i_desired_trial:
break
return d_rtn, i_trades
def read_logs2(i_desired_trial, s_fname):
'''
Return a dictionary with information for the passed log file and trial and
the number of trades in the main instrument of the strategy
:param i_desired_trial: integer. the trial ID to colect data
:param s_fname: string. the name of the log file analized
'''
with open(s_fname) as fr:
# initiate the returned dictionary ans other control variables
d_rtn = {'pnl': defaultdict(dict),
'position': defaultdict(dict),
'duration': defaultdict(dict),
'mid': defaultdict(dict)}
f_reward = 0.
f_count_step = 0
last_reward = 0.
i_trial = 0
i_trades = 0
for idx, row in enumerate(fr):
if row == '\n':
continue
# extract desired information
# count the number of trials
if ' New Trial will start!' in row:
i_trial += 1
f_count_step = 0
f_reward = 0
elif '.update():' in row and i_trial == i_desired_trial:
s_aux = row.strip().split(';')[1]
s_x = row.split('time = ')[1].split(',')[0]
s_date_all = s_x
s_x = s_date_all[:-7]
s_date = s_x
ts_date_all = pd.to_datetime(s_date_all)
ts_date = pd.to_datetime(s_date)
last_reward = float(s_aux.split('reward = ')[1].split(',')[0])
f_duration = float(s_aux.split('duration = ')[1].split(',')[0])
f_reward += last_reward
f_count_step += 1.
# extract some data
d_rtn['duration'][i_trial][ts_date_all] = f_duration
if ', position = ' in s_aux:
s_pos = s_aux.split(', position = ')[1].split('}')[0][1:]
s_pos = s_pos.replace("'", "")
l_pos = [(a.strip(), float(b)) for a, b in
[s.split(': ')for s in s_pos.split(',')]]
d_rtn['position'][i_trial][ts_date_all] = dict(l_pos)
if ', pnl = ' in s_aux:
s_action = s_aux.split(', pnl = ')[1].split(',')[0]
f_aux = float(s_action)
d_rtn['pnl'][i_trial][ts_date_all] = f_aux
if 'crossed_prices' in s_aux or 'correction_by_trade' in s_aux:
i_trades += 1
if ', inputs = ' in s_aux:
s_mid = s_aux.split(', inputs = ')[1].split("{'midPrice':")
s_mid = s_mid[0].split(', position =')[0]
s_mid = s_mid.replace("'", '"').replace('None', '0')
l_mid = json.loads(s_mid)
s_mid = s_mid.replace("'", "")[1:]
l_mid = [(s_key, (float(x))) for s_key, x
in l_mid['midPrice'].iteritems()]
d_rtn['mid'][i_trial][ts_date_all] = dict(l_mid)
# finish the loop as soon as the trial is analyzed
if i_trial > i_desired_trial:
break
return d_rtn, i_trades
def read_logs_to_form_spread(i_desired_trial, s_fname):
'''
Return a dictionary with information for the passed log file and trial and
the number of trades in the main instrument of the strategy (just F21 and
F19)
:param i_desired_trial: integer. the trial ID to colect data
:param s_fname: string. the name of the log file analized
'''
with open(s_fname) as fr:
# initiate the returned dictionary ans other control variables
d_rtn = {'pnl': defaultdict(dict),
'position': defaultdict(dict),
'mid': defaultdict(dict),
'duration': defaultdict(dict),
'TOB_F21': defaultdict(dict),
'TOB_F19': defaultdict(dict),
'MY_PRICES': defaultdict(dict),
'EXEC': defaultdict(dict),
'LAST_SPREAD': defaultdict(dict)} # where I am most aggres
f_reward = 0.
f_count_step = 0
last_reward = 0.
i_trial = 0
i_trades = 0
l_trade_actions = ['TAKE', 'crossed_prices', 'correction_by_trade',
'HIT']
for idx, row in enumerate(fr):
if row == '\n':
continue
# extract desired information
# count the number of trials
if ' New Trial will start!' in row:
i_trial += 1
f_count_step = 0
f_reward = 0
elif '.update():' in row and i_trial == i_desired_trial:
s_aux = row.strip().split(';')[1]
s_x = row.split('time = ')[1].split(',')[0]
s_date_all = s_x
s_x = s_date_all[:-7]
s_date = s_x
ts_date_all = pd.to_datetime(s_date_all)
ts_date = pd.to_datetime(s_date)
last_reward = float(s_aux.split('reward = ')[1].split(',')[0])
f_duration = float(s_aux.split('duration = ')[1].split(',')[0])
f_reward += last_reward
f_count_step += 1.
# extract some data
d_rtn['duration'][i_trial][ts_date_all] = f_duration
if ', position = ' in s_aux:
s_pos = s_aux.split(', position = ')[1].split('}')[0][1:]
s_pos = s_pos.replace("'", "")
l_pos = [(a.strip(), float(b)) for a, b in
[s.split(': ')for s in s_pos.split(',')]]
d_rtn['position'][i_trial][ts_date_all] = dict(l_pos)
if 'action = ' in s_aux:
s_action = row.split('action = ')[1].split(',')[0].strip()
d_aux2 = {'DI1F21': 0, 'DI1F19': 0}
if ts_date_all not in d_rtn['EXEC'][i_trial]:
d_rtn['EXEC'][i_trial][ts_date_all] = d_aux2.copy()
d_aux2 = d_rtn['EXEC'][i_trial][ts_date_all]
if s_action in l_trade_actions:
s_msgs = s_aux.split('msgs_to_env = ')[1]
for d_aux in json.loads(s_msgs.replace("'", '"')):
i_mult = 1 if d_aux['S'] == 'Buy' else -1
d_aux2[d_aux['C']] += float(d_aux['P']) * i_mult
if ', pnl = ' in s_aux:
s_action = s_aux.split(', pnl = ')[1].split(',')[0]
f_aux = float(s_action)
d_rtn['pnl'][i_trial][ts_date_all] = f_aux
if 'crossed_prices' in s_aux or 'correction_by_trade' in s_aux:
i_trades += 1
if ', inputs = ' in s_aux:
s_mid = s_aux.split(', inputs = ')[1].split("{'midPrice':")
s_mid = s_mid[0].split(', position =')[0]
s_mid = s_mid.replace("'", '"').replace('None', '0')
l_mid = json.loads(s_mid)
s_mid = s_mid.replace("'", "")[1:]
l_mid = [(s_key, (float(x))) for s_key, x
in l_mid['midPrice'].iteritems()]
d_rtn['mid'][i_trial][ts_date_all] = dict(l_mid)
if s_mid[0] != '{':
s_mid = '{' + s_mid
d_input = json.loads(s_mid)
d_aux = d_input['TOB']['DI1F19']
d_rtn['TOB_F19'][i_trial][ts_date_all] = d_aux
d_aux = d_input['TOB']['DI1F21']
d_rtn['TOB_F21'][i_trial][ts_date_all] = d_aux
d_aux = dict(zip(['BID', 'ASK'], d_input['last_spread']))
d_rtn['LAST_SPREAD'][i_trial][ts_date_all] = d_aux
d_aux = dict(zip(['BID', 'ASK'],
[d_input['agentOrders']['agentBid'],
d_input['agentOrders']['agentAsk']]))
d_rtn['MY_PRICES'][i_trial][ts_date_all] = d_aux
# finish the loop as soon as the trial is analyzed
if i_trial > i_desired_trial:
break
return d_rtn, i_trades
def plot_trial(d_data, i_trades):
'''
Plots the data from logged metrics during a specific trial of a simulation.
It is designed to plot trades using D1F21, F19 and F23.
:param d_data: dict. data with the metrics used
:param i_trades: integer. number of trades in the simulation
'''
fig = plt.figure(figsize=(12, 10))
s_key = d_data['mid'].keys()[0]
majorFormatter = mpl.dates.DateFormatter('%H:%M')
###############
# Spread plot
###############
df_spread = pd.DataFrame(d_data['mid'][s_key]).T
df_spread = df_spread.resample('1min').last()
ax = plt.subplot2grid((6, 6), (4, 0), colspan=2, rowspan=2)
((df_spread['DI1F23'] - df_spread['DI1F21'])*10**2).plot(ax=ax)
ax.set_title('F23 - F21')
ax.set_ylabel('Spread')
# ax.xaxis.set_major_formatter(majorFormatter)
ax = plt.subplot2grid((6, 6), (4, 2), colspan=2, rowspan=2)
((df_spread['DI1F21'] - df_spread['DI1F19'])*10**2).plot(ax=ax)
ax.set_title('F21 - F19')
# ax.xaxis.set_major_formatter(majorFormatter)
###############
# PnL plot
###############
ax = plt.subplot2grid((6, 6), (0, 0), colspan=3, rowspan=4)
df_pnl = pd.Series(d_data['pnl'][s_key])
df_pnl = df_pnl.resample('1min').last()
df_pnl.plot(ax=ax)
ax.axhline(xmin=0, xmax=1, y=0, color='black', linestyle='dashed')
ax.set_title('PnL Curve')
ax.set_ylabel('Value')
# ax.xaxis.set_major_formatter(majorFormatter)
###############
# Position plot
###############
ax1 = plt.subplot2grid((6, 6), (0, 3), colspan=3, rowspan=2)
df_pos =
|
pd.DataFrame(d_data['position'][s_key])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
# Copyright 2017 <NAME> <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Python modules
import math
import datetime
import datetime as dt
from datetime import datetime
# Third party modules
import geopy
import geopy.distance as geo_dist
import pandas as pd
TIME_FORMATS = ['%H:%M', '%H%M', '%I:%M%p', '%I%M%p', '%H:%M:%S', '%H%M%S', '%I:%M:%S%p', '%I%M%S%p']
class TrackException(Exception):
"""
Generic exception for TrackAnimation
"""
def __init__(self, msg, original_exception):
super(TrackException, self).__init__(msg + (": %s" % original_exception))
self.original_exception = original_exception
def getBearing(start_point, end_point):
"""
Calculates the bearing between two points.
Parameters
----------
start_point: geopy.Point
end_point: geopy.Point
Returns
-------
point: int
Bearing in degrees between the start and end points.
"""
start_lat = math.radians(start_point.latitude)
start_lng = math.radians(start_point.longitude)
end_lat = math.radians(end_point.latitude)
end_lng = math.radians(end_point.longitude)
d_lng = end_lng - start_lng
if abs(d_lng) > math.pi:
if d_lng > 0.0:
d_lng = -(2.0 * math.pi - d_lng)
else:
d_lng = (2.0 * math.pi + d_lng)
tan_start = math.tan(start_lat / 2.0 + math.pi / 4.0)
tan_end = math.tan(end_lat / 2.0 + math.pi / 4.0)
dPhi = math.log(tan_end / tan_start)
bearing = (math.degrees(math.atan2(d_lng, dPhi)) + 360.0) % 360.0
return bearing
def getCoordinates(start_point, end_point, distance_meters):
"""
Calculates the new coordinates between two points depending
of the specified distance and the calculated bearing.
Parameters
----------
start_point: geopy.Point
end_point: geopy.Point
distance_meters: float
Returns
-------
point: geopy.Point
A new point between the start and the end points.
"""
bearing = getBearing(start_point, end_point)
distance_km = distance_meters / 1000
d = geo_dist.VincentyDistance(kilometers=distance_km)
destination = d.destination(point=start_point, bearing=bearing)
return geopy.Point(destination.latitude, destination.longitude)
def getPointInTheMiddle(start_point, end_point, time_diff, point_idx):
"""
Calculates a new point between two points depending of the
time difference between them and the point index.
Parameters
----------
start_point: DataFrame
end_point: DataFrame
time_diff: float
point_idx: int
Point index between the start and the end points
Returns
-------
point: list
A new point between the start and the end points.
"""
time_proportion = (time_diff * point_idx) / end_point['TimeDifference'].item()
distance_proportion = end_point['Distance'].item() * time_proportion
time_diff_proportion = end_point['TimeDifference'].item() * time_proportion
speed = distance_proportion / time_diff_proportion
distance = time_diff * speed
cum_time_diff = int(start_point['CumTimeDiff'].item() + time_diff_proportion)
# date = datetime.strptime(start_point['Date'].item(), '%Y-%m-%d %H:%M:%S') + dt.timedelta(seconds=int(time_diff_proportion))
date = pd.to_datetime(start_point['Date'].astype(str), format='%Y-%m-%d %H:%M:%S') + dt.timedelta(seconds=int(time_diff_proportion))
altitude = (end_point['Altitude'].item() + start_point['Altitude'].item()) / 2
name = start_point['CodeRoute'].item()
geo_start = geopy.Point(start_point['Latitude'].item(), start_point['Longitude'].item())
geo_end = geopy.Point(end_point['Latitude'].item(), end_point['Longitude'].item())
middlePoint = getCoordinates(geo_start, geo_end, distance_proportion)
df_middlePoint = ([[name, middlePoint.latitude, middlePoint.longitude, altitude,
date, speed, int(time_diff), distance, None, cum_time_diff]])
return df_middlePoint
def rgb(value, minimum, maximum):
"""
Calculates an rgb color of a value depending of
the minimum and maximum values.
Parameters
----------
value: float or int
minimum: float or int
maximum: float or int
Returns
-------
rgb: tuple
"""
value = float(value)
minimum = float(minimum)
maximum = float(maximum)
if minimum == maximum:
ratio = 0
else:
ratio = 2 * (value - minimum) / (maximum - minimum)
b = int(max(0, 255 * (1 - ratio)))
r = int(max(0, 255 * (ratio - 1)))
g = 255 - b - r
return r/255.0, g/255.0, b/255.0
def calculateCumTimeDiff(df):
"""
Calculates the cumulative of the time difference
between points for each track of 'dfTrack'
Parameters
----------
df: DataFrame
Returns
-------
df_cum: DataFrame
"""
df = df.copy()
df_cum = pd.DataFrame()
grouped = df['CodeRoute'].unique()
for name in grouped:
df_slice = df[df['CodeRoute'] == name]
df_slice = df_slice.reset_index(drop=True)
df_slice['CumTimeDiff'] = df_slice['TimeDifference'].cumsum()
df_cum =
|
pd.concat([df_cum, df_slice])
|
pandas.concat
|
"""
Quickly subset Redcap inventories.
"""
import argparse
import pandas as pd
import sys
# import sibispy
# Reports
## 1. Reports that indicate mistakes (site check required)
# empty_marked_present
def empty_marked_present(inventory):
# -> Site should investigate why the form was marked "not missing"
return ((inventory['non_nan_count'] == 0)
& (inventory['missing'] == 0)
& (inventory['exclude'] != 1)
& (inventory['form_name'] != 'biological_mr')) # false positives
# content_marked_missing
def content_marked_missing(inventory):
# -> Missingness likely applied by mistake, should be switched to present
return ((inventory['missing'] == 1)
& (inventory['non_nan_count'] > 0)
& (inventory['exclude'] != 1))
## 2. Reports that contain possible omissions (site check recommended)
def less_content_than_max(inventory):
# -> Site should ensure that no content was omitted
# (only makes sense on some forms)
return ((inventory['non_nan_count'] > 0) &
(inventory['non_nan_count'] < inventory['non_nan_count'].max()))
def empty_unmarked(inventory):
# -> Site should double-check that these cases are actually absent, and
# mark missingness where appropriate
# (potentially better handled in check_form_groups)
# (hits "grey" circles, but not just them)
return ((inventory['non_nan_count'] == 0)
& inventory['missing'].isnull()
& (inventory['exclude'] != 1))
## 3. Reports that indicate undermarking, and can be auto-marked (site consent requested)
### 3a. Undermarking of non-missingness
def content_unmarked(inventory):
# -> Site should confirm that hits can be automatically marked "not missing"
return (inventory['non_nan_count'] > 0) & (inventory['missing'].isnull())
### 3b. Undermarking of completion
def content_not_complete(inventory):
# -> Site should confirm that hits can be automatically marked "complete"
return ((inventory['non_nan_count'] > 0)
& (inventory['complete'] < 2)
# Computed forms that will be marked Complete once other forms are
& (~inventory['form_name'].isin(['clinical', 'brief']))
)
def missing_not_complete(inventory):
# -> Site should confirm that hits can be automatically marked "complete"
return (inventory['missing'] == 1) & (inventory['complete'] < 2)
### 4. Excluded forms with content on them
def excluded_with_content(inventory):
# -> Site should either unmark exclusion, or have the content deleted
return ((inventory['exclude'] == 1)
& (inventory['non_nan_count'] > 0)
& (~inventory['form_name'].isin(['visit_date', 'clinical'])))
# Reports -- end
def get_filter_results(inventorized_data, filter_function, verbose=False):
"""
Apply pd.Index-returning function to data and return it filtered.
"""
try:
index = filter_function(inventorized_data)
except KeyError as e:
if verbose:
print("Error in {}:".format(filter_function.__name__), str(e))
return None
else:
return inventorized_data.loc[index]
def parse_args(filter_choices, input_args=None):
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", help="Verbose operation",
action="store_true")
# parser.add_argument("-p", "--post-to-github",
# help="Post all issues to GitHub instead of stdout.",
# action="store_true")
parser.add_argument("-i", "--input",
help="Inventory file to operate on",
nargs='+',
required=True)
parser.add_argument('-o', '--output',
help="File to save filtered inventory to",
default=sys.stdout)
# `choices` in `help` courtesy of https://stackoverflow.com/a/20335589
parser.add_argument('filter', metavar='FILTER', choices=filter_choices,
help="Filter function to apply, one of following: "
"{%(choices)s}")
args = parser.parse_args(input_args)
return args
if __name__ == '__main__':
# TODO: There should be some way to auto-generate this - maybe embed the
# filters in a file, import it, then get the names of all callables?
FILTER_LIST = [
empty_marked_present,
content_marked_missing,
less_content_than_max,
empty_unmarked,
content_unmarked,
content_not_complete,
missing_not_complete,
excluded_with_content,
]
FILTERS = {x.__name__: x for x in FILTER_LIST}
args = parse_args(FILTERS.keys())
# TODO: Should explicitly assume + read in columns?
all_out = []
for filename in args.input:
data = pd.read_csv(filename)
filter_function = FILTERS[args.filter]
result = get_filter_results(data, filter_function,
verbose=args.verbose)
if result is None:
if args.verbose:
print("Filter {} failed on file {}; skipping"
.format(args.filter, filename))
elif not result.empty:
all_out.append(result)
if args.verbose:
if args.output == sys.stdout:
output_display_name = "stdout"
else:
output_display_name = args.output
print("Filter {} used on {} => {}"
.format(args.filter, filename, output_display_name))
else:
if args.verbose:
print("Filter {} used on {} => no matches, skipping."
.format(args.filter, filename))
if len(all_out) > 0:
(
|
pd.concat(all_out, sort=False)
|
pandas.concat
|
import numpy as np
import pandas as pd
import pytest
from dku_timeseries import WindowAggregator
from recipe_config_loading import get_windowing_params
@pytest.fixture
def columns():
class COLUMNS:
date = "Date"
category = "country"
aggregation = "value1_avg"
return COLUMNS
@pytest.fixture
def df(columns):
co2 = [315.58, 316.39, 316.79, 316.2]
country = ["first", "first", "second", "second"]
time_index = pd.date_range("1-1-1959", periods=4, freq="M")
df = pd.DataFrame.from_dict(
{"value1": co2, "value2": co2, columns.category: country, columns.date: time_index})
return df
@pytest.fixture
def long_df(columns):
co2 = [315.58, 316.39, 316.79, 316.2, 345, 234, 100, 299]
country = ["first", "first", "first", "first", "second", "second", "second", "second"]
time_index = pd.date_range("1-1-1959", periods=4, freq="D").append(pd.date_range("1-1-1959", periods=4, freq="D"))
df = pd.DataFrame.from_dict(
{"value1": co2, "value2": co2, columns.category: country, columns.date: time_index})
return df
@pytest.fixture
def long_df_2(columns):
co2 = [315.58, 316.39, 316.79, 316.2, 9, 10]
country = ["first", "first", "second", "second", "third", "third"]
country_2 = ["first", "first", "second", "second", "third", "third"]
time_index = pd.date_range("1-1-1959", periods=2, freq="M").append(pd.date_range("1-1-1959", periods=2, freq="M")).append(
pd.date_range("1-1-1959", periods=2, freq="M"))
df = pd.DataFrame.from_dict(
{"value1": co2, "value2": co2, columns.category: country, "item": country_2, columns.date: time_index})
return df
@pytest.fixture
def long_df_3(columns):
co2 = [315.58, 316.39, 316.79, 316.2, 9, 10, 2, 3]
country = ["first", "first", "second", "second", "third", "third", "fourth", "fourth"]
country_2 = ["first", "first", "second", "second", "third", "third", "fourth", "fourth"]
country_3 = ["first", "first", "second", "second", "third", "third", "fourth", "fourth"]
time_index = pd.date_range("1-1-1959", periods=2, freq="M").append(pd.date_range("1-1-1959", periods=2, freq="M")).append(
pd.date_range("1-1-1959", periods=2, freq="M")).append(pd.date_range("1-1-1959", periods=2, freq="M"))
df = pd.DataFrame.from_dict(
{"value1": co2, "value2": co2, columns.category: country, "item": country_2, "store": country_3, columns.date: time_index})
return df
@pytest.fixture
def long_df_4(columns):
co2 = [315.58, 316.39, 316.79, 316.2, 9, 10, 2, 3]
country = ["first", "first", "second", "second", "third", "third", "first", "first"]
country_2 = ["first", "first", "second", "second", "third", "third", "second", "first"]
country_3 = ["first", "first", "second", "second", "third", "third", "third", "fourth"]
time_index = pd.date_range("1-1-2020", periods=2, freq="M").append(pd.date_range("1-1-2020", periods=2, freq="M")).append(
pd.date_range("1-1-2020", periods=2, freq="M")).append(pd.date_range("1-1-2020", periods=2, freq="M"))
df = pd.DataFrame.from_dict(
{"value1": co2, "value2": co2, columns.category: country, "item": country_2, "store": country_3, columns.date: time_index})
return df
@pytest.fixture
def long_df_numerical(columns):
co2 = [315.58, 316.39, 316.79, 316.2, 345, 234, 100, 299]
country = [1, 1, 1, 1, 2, 2, 2, 2]
time_index = pd.date_range("1-1-1959", periods=4, freq="D").append(
|
pd.date_range("1-1-1959", periods=4, freq="D")
|
pandas.date_range
|
import pandas as pd
import numpy as np
users_df = pd.read_csv("../../data/processed/users.csv", delimiter = ',')
posts_df = pd.read_csv("../../data/processed//posts.csv", delimiter = ',')
def get_accepted_answerer_reputation():
accepted_answerer_data = []
userId_list = users_df['Id']
for user in userId_list:
accepted_postid_list = posts_df[(posts_df.OwnerUserId == user) & (posts_df.PostTypeId == 1) &
(posts_df.AcceptedAnswerId.notnull())]['AcceptedAnswerId']
accepted_answerer_userIds = posts_df[posts_df.Id.isin(accepted_postid_list)]['OwnerUserId']
mean_rep = users_df[users_df.Id.isin(accepted_answerer_userIds)].Reputation.mean()
accepted_answerer_data.append({'userid': user, 'mean_reputation': mean_rep})
accepted_answerer_rep =
|
pd.DataFrame(accepted_answerer_data)
|
pandas.DataFrame
|
from datetime import datetime
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
DatetimeIndex,
Series,
concat,
isna,
notna,
)
import pandas._testing as tm
import pandas.tseries.offsets as offsets
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs",
[
[np.mean, "mean", {}],
[np.nansum, "sum", {}],
pytest.param(
lambda x: np.isfinite(x).astype(float).sum(),
"count",
{},
marks=pytest.mark.filterwarnings("ignore:min_periods:FutureWarning"),
),
[np.median, "median", {}],
[np.min, "min", {}],
[np.max, "max", {}],
[lambda x: np.std(x, ddof=1), "std", {}],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}],
[lambda x: np.var(x, ddof=1), "var", {}],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}],
],
)
def test_series(series, compare_func, roll_func, kwargs):
result = getattr(series.rolling(50), roll_func)(**kwargs)
assert isinstance(result, Series)
tm.assert_almost_equal(result.iloc[-1], compare_func(series[-50:]))
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs",
[
[np.mean, "mean", {}],
[np.nansum, "sum", {}],
pytest.param(
lambda x: np.isfinite(x).astype(float).sum(),
"count",
{},
marks=pytest.mark.filterwarnings("ignore:min_periods:FutureWarning"),
),
[np.median, "median", {}],
[np.min, "min", {}],
[np.max, "max", {}],
[lambda x: np.std(x, ddof=1), "std", {}],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}],
[lambda x: np.var(x, ddof=1), "var", {}],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}],
],
)
def test_frame(raw, frame, compare_func, roll_func, kwargs):
result = getattr(frame.rolling(50), roll_func)(**kwargs)
assert isinstance(result, DataFrame)
tm.assert_series_equal(
result.iloc[-1, :],
frame.iloc[-50:, :].apply(compare_func, axis=0, raw=raw),
check_names=False,
)
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs, minp",
[
[np.mean, "mean", {}, 10],
[np.nansum, "sum", {}, 10],
[lambda x: np.isfinite(x).astype(float).sum(), "count", {}, 0],
[np.median, "median", {}, 10],
[np.min, "min", {}, 10],
[np.max, "max", {}, 10],
[lambda x: np.std(x, ddof=1), "std", {}, 10],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}, 10],
[lambda x: np.var(x, ddof=1), "var", {}, 10],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}, 10],
],
)
def test_time_rule_series(series, compare_func, roll_func, kwargs, minp):
win = 25
ser = series[::2].resample("B").mean()
series_result = getattr(ser.rolling(window=win, min_periods=minp), roll_func)(
**kwargs
)
last_date = series_result.index[-1]
prev_date = last_date - 24 * offsets.BDay()
trunc_series = series[::2].truncate(prev_date, last_date)
tm.assert_almost_equal(series_result[-1], compare_func(trunc_series))
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs, minp",
[
[np.mean, "mean", {}, 10],
[np.nansum, "sum", {}, 10],
[lambda x: np.isfinite(x).astype(float).sum(), "count", {}, 0],
[np.median, "median", {}, 10],
[np.min, "min", {}, 10],
[np.max, "max", {}, 10],
[lambda x: np.std(x, ddof=1), "std", {}, 10],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}, 10],
[lambda x: np.var(x, ddof=1), "var", {}, 10],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}, 10],
],
)
def test_time_rule_frame(raw, frame, compare_func, roll_func, kwargs, minp):
win = 25
frm = frame[::2].resample("B").mean()
frame_result = getattr(frm.rolling(window=win, min_periods=minp), roll_func)(
**kwargs
)
last_date = frame_result.index[-1]
prev_date = last_date - 24 * offsets.BDay()
trunc_frame = frame[::2].truncate(prev_date, last_date)
tm.assert_series_equal(
frame_result.xs(last_date),
trunc_frame.apply(compare_func, raw=raw),
check_names=False,
)
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs",
[
[np.mean, "mean", {}],
[np.nansum, "sum", {}],
[np.median, "median", {}],
[np.min, "min", {}],
[np.max, "max", {}],
[lambda x: np.std(x, ddof=1), "std", {}],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}],
[lambda x: np.var(x, ddof=1), "var", {}],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}],
],
)
def test_nans(compare_func, roll_func, kwargs):
obj = Series(np.random.randn(50))
obj[:10] = np.NaN
obj[-10:] = np.NaN
result = getattr(obj.rolling(50, min_periods=30), roll_func)(**kwargs)
tm.assert_almost_equal(result.iloc[-1], compare_func(obj[10:-10]))
# min_periods is working correctly
result = getattr(obj.rolling(20, min_periods=15), roll_func)(**kwargs)
assert isna(result.iloc[23])
assert not isna(result.iloc[24])
assert not isna(result.iloc[-6])
assert isna(result.iloc[-5])
obj2 = Series(np.random.randn(20))
result = getattr(obj2.rolling(10, min_periods=5), roll_func)(**kwargs)
assert isna(result.iloc[3])
assert notna(result.iloc[4])
if roll_func != "sum":
result0 = getattr(obj.rolling(20, min_periods=0), roll_func)(**kwargs)
result1 = getattr(obj.rolling(20, min_periods=1), roll_func)(**kwargs)
tm.assert_almost_equal(result0, result1)
def test_nans_count():
obj = Series(np.random.randn(50))
obj[:10] = np.NaN
obj[-10:] = np.NaN
result = obj.rolling(50, min_periods=30).count()
tm.assert_almost_equal(
result.iloc[-1], np.isfinite(obj[10:-10]).astype(float).sum()
)
@pytest.mark.parametrize(
"roll_func, kwargs",
[
["mean", {}],
["sum", {}],
["median", {}],
["min", {}],
["max", {}],
["std", {}],
["std", {"ddof": 0}],
["var", {}],
["var", {"ddof": 0}],
],
)
@pytest.mark.parametrize("minp", [0, 99, 100])
def test_min_periods(series, minp, roll_func, kwargs):
result = getattr(series.rolling(len(series) + 1, min_periods=minp), roll_func)(
**kwargs
)
expected = getattr(series.rolling(len(series), min_periods=minp), roll_func)(
**kwargs
)
nan_mask = isna(result)
tm.assert_series_equal(nan_mask, isna(expected))
nan_mask = ~nan_mask
tm.assert_almost_equal(result[nan_mask], expected[nan_mask])
def test_min_periods_count(series):
result = series.rolling(len(series) + 1, min_periods=0).count()
expected = series.rolling(len(series), min_periods=0).count()
nan_mask = isna(result)
tm.assert_series_equal(nan_mask, isna(expected))
nan_mask = ~nan_mask
tm.assert_almost_equal(result[nan_mask], expected[nan_mask])
@pytest.mark.parametrize(
"roll_func, kwargs, minp",
[
["mean", {}, 15],
["sum", {}, 15],
["count", {}, 0],
["median", {}, 15],
["min", {}, 15],
["max", {}, 15],
["std", {}, 15],
["std", {"ddof": 0}, 15],
["var", {}, 15],
["var", {"ddof": 0}, 15],
],
)
def test_center(roll_func, kwargs, minp):
obj = Series(np.random.randn(50))
obj[:10] = np.NaN
obj[-10:] = np.NaN
result = getattr(obj.rolling(20, min_periods=minp, center=True), roll_func)(
**kwargs
)
expected = getattr(
concat([obj, Series([np.NaN] * 9)]).rolling(20, min_periods=minp), roll_func
)(**kwargs)[9:].reset_index(drop=True)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"roll_func, kwargs, minp, fill_value",
[
["mean", {}, 10, None],
["sum", {}, 10, None],
["count", {}, 0, 0],
["median", {}, 10, None],
["min", {}, 10, None],
["max", {}, 10, None],
["std", {}, 10, None],
["std", {"ddof": 0}, 10, None],
["var", {}, 10, None],
["var", {"ddof": 0}, 10, None],
],
)
def test_center_reindex_series(series, roll_func, kwargs, minp, fill_value):
# shifter index
s = [f"x{x:d}" for x in range(12)]
series_xp = (
getattr(
series.reindex(list(series.index) + s).rolling(window=25, min_periods=minp),
roll_func,
)(**kwargs)
.shift(-12)
.reindex(series.index)
)
series_rs = getattr(
series.rolling(window=25, min_periods=minp, center=True), roll_func
)(**kwargs)
if fill_value is not None:
series_xp = series_xp.fillna(fill_value)
tm.assert_series_equal(series_xp, series_rs)
@pytest.mark.parametrize(
"roll_func, kwargs, minp, fill_value",
[
["mean", {}, 10, None],
["sum", {}, 10, None],
["count", {}, 0, 0],
["median", {}, 10, None],
["min", {}, 10, None],
["max", {}, 10, None],
["std", {}, 10, None],
["std", {"ddof": 0}, 10, None],
["var", {}, 10, None],
["var", {"ddof": 0}, 10, None],
],
)
def test_center_reindex_frame(frame, roll_func, kwargs, minp, fill_value):
# shifter index
s = [f"x{x:d}" for x in range(12)]
frame_xp = (
getattr(
frame.reindex(list(frame.index) + s).rolling(window=25, min_periods=minp),
roll_func,
)(**kwargs)
.shift(-12)
.reindex(frame.index)
)
frame_rs = getattr(
frame.rolling(window=25, min_periods=minp, center=True), roll_func
)(**kwargs)
if fill_value is not None:
frame_xp = frame_xp.fillna(fill_value)
tm.assert_frame_equal(frame_xp, frame_rs)
@pytest.mark.parametrize(
"f",
[
lambda x: x.rolling(window=10, min_periods=5).cov(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).corr(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).max(),
lambda x: x.rolling(window=10, min_periods=5).min(),
lambda x: x.rolling(window=10, min_periods=5).sum(),
lambda x: x.rolling(window=10, min_periods=5).mean(),
lambda x: x.rolling(window=10, min_periods=5).std(),
lambda x: x.rolling(window=10, min_periods=5).var(),
lambda x: x.rolling(window=10, min_periods=5).skew(),
lambda x: x.rolling(window=10, min_periods=5).kurt(),
lambda x: x.rolling(window=10, min_periods=5).quantile(quantile=0.5),
lambda x: x.rolling(window=10, min_periods=5).median(),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=False),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=True),
pytest.param(
lambda x: x.rolling(win_type="boxcar", window=10, min_periods=5).mean(),
marks=td.skip_if_no_scipy,
),
],
)
def test_rolling_functions_window_non_shrinkage(f):
# GH 7764
s = Series(range(4))
s_expected = Series(np.nan, index=s.index)
df = DataFrame([[1, 5], [3, 2], [3, 9], [-1, 0]], columns=["A", "B"])
df_expected = DataFrame(np.nan, index=df.index, columns=df.columns)
s_result = f(s)
tm.assert_series_equal(s_result, s_expected)
df_result = f(df)
tm.assert_frame_equal(df_result, df_expected)
def test_rolling_max_gh6297():
"""Replicate result expected in GH #6297"""
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 2 datapoints on one of the days
indices.append(datetime(1975, 1, 3, 6, 0))
series = Series(range(1, 7), index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
expected = Series(
[1.0, 2.0, 6.0, 4.0, 5.0],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").max().rolling(window=1).max()
tm.assert_series_equal(expected, x)
def test_rolling_max_resample():
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be max
expected = Series(
[0.0, 1.0, 2.0, 3.0, 20.0],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").max().rolling(window=1).max()
tm.assert_series_equal(expected, x)
# Now specify median (10.0)
expected = Series(
[0.0, 1.0, 2.0, 3.0, 10.0],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").median().rolling(window=1).max()
tm.assert_series_equal(expected, x)
# Now specify mean (4+10+20)/3
v = (4.0 + 10.0 + 20.0) / 3.0
expected = Series(
[0.0, 1.0, 2.0, 3.0, v],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").mean().rolling(window=1).max()
tm.assert_series_equal(expected, x)
def test_rolling_min_resample():
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be min
expected = Series(
[0.0, 1.0, 2.0, 3.0, 4.0],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
r = series.resample("D").min().rolling(window=1)
tm.assert_series_equal(expected, r.min())
def test_rolling_median_resample():
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be median
expected = Series(
[0.0, 1.0, 2.0, 3.0, 10],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").median().rolling(window=1).median()
tm.assert_series_equal(expected, x)
def test_rolling_median_memory_error():
# GH11722
n = 20000
Series(np.random.randn(n)).rolling(window=2, center=False).median()
Series(np.random.randn(n)).rolling(window=2, center=False).median()
@pytest.mark.parametrize(
"data_type",
[np.dtype(f"f{width}") for width in [4, 8]]
+ [np.dtype(f"{sign}{width}") for width in [1, 2, 4, 8] for sign in "ui"],
)
def test_rolling_min_max_numeric_types(data_type):
# GH12373
# Just testing that these don't throw exceptions and that
# the return type is float64. Other tests will cover quantitative
# correctness
result = DataFrame(np.arange(20, dtype=data_type)).rolling(window=5).max()
assert result.dtypes[0] == np.dtype("f8")
result = DataFrame(np.arange(20, dtype=data_type)).rolling(window=5).min()
assert result.dtypes[0] == np.dtype("f8")
@pytest.mark.parametrize(
"f",
[
lambda x: x.rolling(window=10, min_periods=0).count(),
lambda x: x.rolling(window=10, min_periods=5).cov(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).corr(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).max(),
lambda x: x.rolling(window=10, min_periods=5).min(),
lambda x: x.rolling(window=10, min_periods=5).sum(),
lambda x: x.rolling(window=10, min_periods=5).mean(),
lambda x: x.rolling(window=10, min_periods=5).std(),
lambda x: x.rolling(window=10, min_periods=5).var(),
lambda x: x.rolling(window=10, min_periods=5).skew(),
lambda x: x.rolling(window=10, min_periods=5).kurt(),
lambda x: x.rolling(window=10, min_periods=5).quantile(0.5),
lambda x: x.rolling(window=10, min_periods=5).median(),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=False),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=True),
pytest.param(
lambda x: x.rolling(win_type="boxcar", window=10, min_periods=5).mean(),
marks=td.skip_if_no_scipy,
),
],
)
def test_moment_functions_zero_length(f):
# GH 8056
s = Series(dtype=np.float64)
s_expected = s
df1 = DataFrame()
df1_expected = df1
df2 = DataFrame(columns=["a"])
df2["a"] = df2["a"].astype("float64")
df2_expected = df2
s_result = f(s)
|
tm.assert_series_equal(s_result, s_expected)
|
pandas._testing.assert_series_equal
|
# -*- coding: utf-8 -*-
"""
Functions for preprocessing the data and crowd results
Authors: <NAME>, <NAME>
URL: https://github.com/adriapr/crowdairway
"""
import json, csv
import pandas as pd
import numpy as np
from matplotlib.patches import Ellipse
import math
from skimage import draw
from parse import search
import os.path
# Define some constants
image_width = 500
image_height = 500
path_raw = 'data'
path_processed = 'data_processed'
file_subject = os.path.join(path_raw, 'subjects.csv')
file_truth = os.path.join(path_processed, 'airways_ground_truth.csv')
file_task = os.path.join(path_processed, 'tasks.csv')
file_res = os.path.join(path_processed, 'results.csv')
file_annot = os.path.join(path_processed, 'annotations.csv')
file_task_class = os.path.join(path_raw, 'airways_classified.csv')
def get_df_processed():
""""Returns data frames with previously processed data"""
df_subject = pd.read_csv(file_subject)
df_truth=pd.read_csv(file_truth)
df_task = pd.read_csv(file_task)
df_res = pd.read_csv(file_res)
df_annot =
|
pd.read_csv(file_annot)
|
pandas.read_csv
|
from statistics import mean, stdev, median
import pandas as pd
import numpy as np
class MacroIndicator(object):
"""
To build a macroeconomic indicator according to Ilmanem (2014).
"""
def __init__(self):
pass
def _historical_normalizer(self, df):
"""
Normalize the series iterating row by row.
Parameters
----------
df : Pandas Dataframe uni-column
Returns
-------
Series: with the normalized series
df: Pandas Dataframe with data as index and column of the normalized series
"""
df_aux = df.copy()
normalized_rows_aux = []
normalized_rows = []
for ind, row in df_aux.itertuples():
normalized_rows_aux.append(row)
if len(normalized_rows_aux) > 1:
try:
score = (normalized_rows_aux[-1] - mean(normalized_rows_aux)) / stdev(normalized_rows_aux)
normalized_rows.append(score)
except: # divisão por zero
score = normalized_rows[-1]
normalized_rows.append(score)
normalized_df = df_aux.iloc[1:]
normalized_df[str(normalized_df.columns[0])].replace(
to_replace=normalized_df[str(normalized_df.columns[0])].values,
value=pd.Series(normalized_rows),
inplace=True)
normalized_df.rename(columns={str(normalized_df.columns[0]): 'Normalized ' + str(normalized_df.columns[0])},
inplace=True)
return pd.Series(normalized_rows), normalized_df
def _expanding_median(self, df):
"""
Calculates the historical median iterating row by row
Parameters
----------
df : Pandas Dataframe uni-column
Returns
-------
Series: with the historical median
df: Pandas Dataframe with data as index and column of the median series
"""
df_aux = df.copy()
list_median_rows_aux = []
list_median_rows = []
for ind, row in df_aux.itertuples():
list_median_rows_aux.append(row)
list_median_rows.append(median(list_median_rows_aux))
df_aux[str(df_aux.columns[0])].replace(to_replace=df_aux[str(df_aux.columns[0])].values,
value=pd.Series(list_median_rows),
inplace=True)
df_aux.rename(columns={str(df_aux.columns[0]): 'Median-' + str(df_aux.columns[0])}, inplace=True)
return pd.Series(list_median_rows), df_aux
def get_macro_indicator(self, dfs, name, median_type, window=3):
"""
Takes series of macroeconomic data and provides signals if the variable is in an up or down scenario by comparing
the average series with the median series.
Parameters
----------
dfs: List of macroeconomic data Pandas DataFrame
name: str pf the name of the macro indicator, e.g. 'growth'
median_type: str 'expanding' or 'rolling'
window: number of years that will be used in the rolling median
Returns
-------
df : Pandas DataFrame with dates as index and columns as: (1) normalized series of each macro
data, (2) the mean series, (3) the median series, and (4) the signal up (1) and down (0).
"""
normalized_df_list = []
for df in dfs:
normalized_df = self._historical_normalizer(df)[1]
normalized_df_list.append(normalized_df)
concat_df =
|
pd.concat(normalized_df_list, join='inner', axis=1)
|
pandas.concat
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat May 30 22:25:05 2020
@author: usama
"""
import numpy as np
import warnings
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
warnings.filterwarnings("ignore")
def plot(dates,data,l,countryName,analysisType,baseline=True,factor=1):
plt.figure(figsize=(11,6.875))
if baseline:
a=data['Cumulative_cases'].to_numpy()
z=[x - a[i - 1] for i, x in enumerate(a)][1:]
z=z/max(z)
z=z*factor
plt.plot(z,label='Difference of consecutive Cummulative Cases',c='r')
mrkr=['H','o','1','4','*','h','2','+','3','x']
total=len(l)
c=0
for i in l:
plt.plot(data[i], label=i,marker=mrkr[c%total],linestyle='')
c=c+1
xx=np.arange(0,len(dates),10)
xDates=dates[xx]
plt.xticks(xx,xDates,rotation=30)
plt.xlabel('Time',fontsize=14)
plt.title(countryName+' - '+analysisType,fontsize=18)
plt.legend()
plt.grid()
def dataScaling(df):
scaling=MinMaxScaler()
_scaled=scaling.fit_transform(df)
dfScaled =
|
pd.DataFrame(_scaled, columns=df.columns,index=df.index)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 7 22:19:04 2020
@author: Briggs
some usefule links:
Average draft position (ADP) - 'https://fantasyfootballcalculator.com/api/v1/adp/standard?teams=8&year=2019'
Expert consensus Rankings - 'https://partners.fantasypros.com/api/v1/consensus-rankings.php?sport=NFL&year=2019&week=0&id=1054&position=ALL&type=ST&scoring=HALF&filters=1:2:3:4:5:7:8:9:285:699&export=json'
"""
import requests
import datetime
from bs4 import BeautifulSoup
import pandas as pd
import csv
import urllib.parse
from io import StringIO
slotnames = { 0:'QB', 1:'RB', 2:'WR', 3:'TE', 5:'DEF', 4:'KICKER'}
slotvalues = {}
for slotid in slotnames:
slotvalues[slotnames[slotid]]=slotid
class privateLeague():
'''create an instance of a league for the current year.
to change year use the setyear function'''
#teams = {}
def __init__(self, LID, UID,X,REALTIME,Season,MyTeamName):
self.league_id = LID
self.year = datetime.datetime.now().year # should change this from calendar year to be within the typical season timeframe
self.url = "https://www.rtsports.com/"
self.user_id = UID
self.parameters = {'LID': LID,
'UID': UID,
'X':X}
self.cookies = {'REALTIME':REALTIME}
self.scoreboard = [None] * 16
self.rosters = {}
self.teams = {}
self.rosterFormat = {}
self.leaguesettings = None
self.boxscore = None
self.season = Season
self.MyTeamName = MyTeamName
print('Getting the current Week')
self.setCurrentWeek()
print('Getting Rosters')
self.Rosters = self.getRosters(self.CurrentWeek)
self.Rankings = self.getROSECR()
self.MyRoster = self.Rosters[self.Rosters['ffl-team']==self.MyTeamName]
print('Getting Player Data')
self.Players = self.getPlayerData()
self.Players = pd.merge(self.Players,self.Rosters,how='outer',on='Player')
self.Players = pd.merge(self.Players,self.Rankings,how='outer',on='Player')
self.Players = pd.merge(self.Players,self.getWeeklyECR(),how='outer',on='Player')
self.Players.sort_values(by='rank_ecr',inplace=True)
print('Getting Rankings')
#%% SET VALUES
def setCurrentWeek(self):
data = requests.get(self.url +'football/lineup.php',
params=self.parameters,
cookies=self.cookies)
if data == None:
print("Failed to get Current Week")
soup = BeautifulSoup(data.content, 'html.parser')
week = soup.find(class_='header-notes hidden-tn')
print(week.string)
currentweek = int(week.string[10:12])
self.CurrentWeek = currentweek
return
#%% GET RTS DATA
def getRosters(self,Week):
csvparams={'CID':0,'FWK':Week,'CSV':'YES'}
csvparams.update(self.parameters)
data = requests.get(self.url +'football/report-rosters.php',
params=csvparams,
cookies=self.cookies).text
data = pd.read_csv(StringIO(data))
data.columns = ['ffl-team','Player','Position','nfl-team','Roster Status','']
data.drop(columns=['','Position','nfl-team'], inplace=True)
data = data.set_index('Player')
data.index = data.index.str.replace(' II','')
return data
def getPlayerData(self):
players = pd.DataFrame()
for Position in slotnames:
csvparams={'CONF':0,'CSV':'YES','POS':Position, 'STATS':'FFL','TEAM':-1,'SEASON': self.season}
csvparams.update(self.parameters)
data = requests.get(self.url +'football/report-top-players.php',
params=csvparams,
cookies=self.cookies).text
data = pd.read_csv(StringIO(data))
data = data.iloc[:,0:10]
data.columns = ["Rank","Player","Position","nfl-team","Bye","injury","ffl-team","pts","avg","avg-3wk"]
data.drop(columns=['ffl-team'], inplace=True)
data = data.set_index('Player')
data.index = data.index.str.replace(' II','')
data.index = data.index.str.replace(' V','')
data.index = data.index.str.replace(' IV','')
data.index = data.index.str.replace(' Jr.','')
data.index = data.index.str.replace(' ',' ')
players = players.append(data)
return players
#%% FREE AGENT STUFF
#%% RANKINGS
def getROSECR(self):
'''
valid position codes:
"QB, RB, WR, TE, K, OP, FLX, DST, IDP, DL, LB, DB, TK, TQB, TRB, TWR, TTE, TOL, HC, P"}
Valid type codes:
ST, weekly, Draft Half PPR, ROS
ROS will give rest of season rankings
weekly will give just this week
I am not sure what the type is for?
id: 1054
Unused params
'filters':'fc00:e968:6179::de52:7100:285:699',
not sure what the filters mean
Returns
-------
ecr : TYPE
DESCRIPTION.
'''
rankings = pd.DataFrame()
positions = ['QB', 'RB', 'WR', 'TE', 'K', 'DST']
for position in positions:
params = {'sport':'NFL','year':self.year,'week':0,
'position':position,'id':1054,'type':'ROS',"ranking_type_name":"ros",'scoring':'PPR',
'export':'json'}
url ='https://partners.fantasypros.com/api/v1/consensus-rankings.php' #?sport=NFL&year=2020&week=0&id=1054&position=ALL&type=ST&scoring=HALF&filters=1:2:3:4:5:7:8:9:285:699&export=json'
data = requests.get(url,params=params)
ecr = data.json()
positionrankings =
|
pd.DataFrame(ecr['players'])
|
pandas.DataFrame
|
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s =
|
Series(rng)
|
pandas.Series
|
import time
import numpy as np
import pandas as pd
from sklearn.datasets import fetch_openml
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.linear_model import LogisticRegression
from sklearn.decomposition import TruncatedSVD
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from dataset.biased_dataset import BiasedDataset
DATA_ADULT_TRAIN = './data/raw/adult.data.csv'
DATA_ADULT_TEST = './data/raw/adult.test.csv'
DATA_CRIME_FILENAME = './data/raw/crime.csv'
DATA_GERMAN_FILENAME = './data/raw/german.csv'
# ADULT DATASET
# Listing of attributes:
# target: >50K, <=50K.
# age: continuous.
# workclass: Private, Self-emp-not-inc, Self-emp-inc, Federal-gov, Local-gov, State-gov,
# Without-pay, Never-worked.
# fnlwgt: continuous.
# education: Bachelors, Some-college, 11th, HS-grad, Prof-school, Assoc-acdm, Assoc-voc, 9th,
# 7th-8th, 12th, Masters, 1st-4th, 10th, Doctorate, 5th-6th, Preschool.
# education-num: continuous.
# marital-status: Married-civ-spouse, Divorced, Never-married, Separated, Widowed,
# Married-spouse-absent, Married-AF-spouse.
# occupation: Tech-support, Craft-repair, Other-service, Sales, Exec-managerial, Prof-specialty,
# Handlers-cleaners, Machine-op-inspct, Adm-clerical, Farming-fishing, Transport-moving,
# Priv-house-serv, Protective-serv, Armed-Forces.
# relationship: Wife, Own-child, Husband, Not-in-family, Other-relative, Unmarried.
# race: White, Asian-Pac-Islander, Amer-Indian-Eskimo, Other, Black.
# sex: Female, Male.
# capital-gain: continuous.
# capital-loss: continuous.
# hours-per-week: continuous.
# native-country: United-States, Cambodia, England, Puerto-Rico, Canada, Germany,
# Outlying-US(Guam-USVI-etc), India, Japan, Greece, South, China, Cuba, Iran, Honduras,
# Philippines, Italy, Poland, Jamaica, Vietnam, Mexico, Portugal, Ireland, France,
# Dominican-Republic, Laos, Ecuador, Taiwan, Haiti, Columbia, Hungary, Guatemala, Nicaragua,
# Scotland, Thailand, Yugoslavia, El-Salvador, Trinadad&Tobago, Peru, Hong, Holand-Netherlands.
def get_adult_data(sensitive_features, drop_columns=[], test_size=0.2, random_state=42):
"""
train_path: path to training data
test_path: path to test data
returns: tuple of training features, training labels, test features and test labels
"""
train_df =
|
pd.read_csv(DATA_ADULT_TRAIN, na_values='?')
|
pandas.read_csv
|
"""Author: <NAME>
This contains the main Spomato class to be used to access the Spotify API and create new playlists based on the user's
defined criteria.
"""
import os
import pandas as pd
import spotipy
class Spomato():
"""Object used to access spotify API through spotipy and generate playlists.
This can take a combination user's saved tracks, playlists, and/or artist's songs to generate a playlist of a
specified length. This was conceived to use the Tomato Timer method as Spotify playlists.
This does require the user to provide a user API token from the spotify API. The API scopes used by this library are
playlist-read-private, playlist-modify-private, and user-library-read.
Parameters
----------
access_token : str
A valid Spotify Access token.
Attributes
----------
data : dictionary
Dictionary storing available data structures to create playlists.
spotipy_session : spotipy.client.Spotify
A spotipy session to access the spotify API.
access_token : str
A valid Spotify Access token. This requires the scopes playlist-read-private, playlist-modify-private,
and user-library-read
current_user_id : str
The string id of the user of the access token used to create the spotipy session.
"""
def __init__(self,
access_token=None):
"""Initialization function that sets access token and generates initial spotipy session.
Parameters
----------
access_token : str
A valid Spotify Access token. This requires the scopes playlist-read-private, playlist-modify-private,
and user-library-read.
Returns
-------
None
"""
self.access_token = access_token
self.data = {}
self.spotipy_session = self._get_spotipy_session()
self.current_user_id = self.spotipy_session.current_user()['id']
def update_token(self, access_token):
"""Updates the token and spotify session with the provided access_token. Generally used if your access token
has expired.
Parameters
----------
access_token : str
A valid Spotify Access token. This requires the scopes playlist-read-private, playlist-modify-private,
and user-library-read.
Returns
-------
None
"""
# update the class access token and the spotipy session
self.access_token = access_token
self.spotipy_session = self._get_spotipy_session()
self.current_user_id = self.spotipy_session.current_user()['id']
def _get_spotipy_session(self):
"""Internal Function to create a new spotify session.
Returns
-------
spotipy_session : spotipy.client.Spotify
A spotipy session to access the spotify API.
"""
return spotipy.Spotify(auth=self.access_token)
@staticmethod
def _parse_album(album_data, market='US'):
"""Parses the album data returned from the Spotify API and returns the song information as a pandas DataFrame.
Parameters
----------
album_data : dict
A dictionary of album data from Spotify API
market : str
A string representation of the Spotify market to filter on. Default is 'US'
Returns
-------
pandas.DataFrame
A dataframe of song ids and time for each song
"""
# iterate over each record in the album data and parse the track data
series_list = []
album_tracks = album_data['tracks']['items']
for record in album_tracks:
songid = record['id']
markets = record['available_markets']
# time is stored in milliseconds, divide to convert to seconds.
time = record['duration_ms']/1000
# filter out any songs that are not in the specified market
if market in markets:
series = pd.Series([songid, time], index=['song_id', 'time'])
series_list.append(series)
if len(series_list) > 0:
song_df = pd.concat(series_list, axis=1).transpose()
else:
song_df = pd.DataFrame(columns=['song_id', 'time'])
return song_df
@staticmethod
def _parse_user_playlist(data, market='US'):
"""Parses a user playlist data set from the Spotify API and returns the song information as a pandas DataFrame.
Parameters
----------
data : dictionary
Contains songs in a playlist from the Spotify API
market : str
A string representation of the Spotify market to filter on. Default is 'US'
Returns
-------
pandas.DataFrame
A dataframe of song ids and time for each song
"""
# iterate over each record in the playlist data and parse the track data
series_list = []
data = data['tracks']['items']
for item in data:
record = item['track']
songid = record['id']
markets = record['available_markets']
# time is stored in milliseconds, divide to convert to seconds.
time = record['duration_ms']/1000
# filter out any songs that are not in the specified market
if market in markets:
series = pd.Series([songid, time], index=['song_id', 'time'])
series_list.append(series)
if len(series_list) > 0:
song_df = pd.concat(series_list, axis=1).transpose()
else:
song_df =
|
pd.DataFrame(columns=['song_id', 'time'])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 20 21:05:00 2020
@author: Starlitnightly
"""
import itertools
import numpy as np
import pandas as pd
def FindERG( data, depth=2):
'''
Find out endogenous reference gene
Parameters
----------
data:pandas.DataFrmae
DataFrame of data points with each entry in the form:['gene_id','sample1',...]
depth:int
Accuracy of endogenous reference gene,must be larger that 2
The larger the number, the fewer genes are screened out,Accuracy improvement
Returns
-------
result:list
a list of endogenous reference gene
'''
if depth==1:
print('the depth must larger than 2')
return
if len(data.columns)<=2:
print('the number of samples must larger than 2')
return
if depth>(len(data.columns)-1):
print('depth larger than samples')
return
time=0
result=[]#result
datana=pd.DataFrame()
data1=pd.DataFrame()
data2=pd.DataFrame()
for i in itertools.combinations(range(1,depth+1), 2):
time=time+1 #calculate circle times
data=data.dropna()
data=data.drop_duplicates(data.columns[0])
data.reset_index(drop=True, inplace=True)
last_std=pd.DataFrame()
length=len(data)//1000
remain=len(data)-1000*length
for k in range(1,length+1):
datana=data.iloc[1000*(k-1):1000*k,0:1]
data1=data.iloc[1000*(k-1):1000*k,i[0]:i[0]+1]
data2=data.iloc[1000*(k-1):1000*k,i[1]:i[1]+1]
l1=pd.DataFrame()
l2=pd.DataFrame()
for j in range(1000*(k-1),1000*k):
l1[datana.loc[j]]=data1
l2[datana.loc[j]]=data2
l1=l1.div(np.asarray(data.iloc[1000*(k-1):1000*k,i[0]]))
l2=l2.div(np.asarray(data.iloc[1000*(k-1):1000*k,i[1]]))
l=l1-l2
l_std=l.std(axis=0)
l_std=l_std.sort_values()[0:20]
if(k==1):
last_std=l_std
else:
last_std=
|
pd.concat([last_std,l_std])
|
pandas.concat
|
import os
import cx_Oracle
import logging
import numpy as np
import pandas as pd
import re
import zipfile
import logging
from datetime import datetime
from glob import glob
from os.path import split, normpath, join, relpath, basename
from pathlib import Path
from piper.decorators import shape
from piper.text import _file_with_ext
from piper.text import _get_qual_file
from piper.verbs import clean_names
from piper.verbs import str_trim
from zipfile import ZipFile, ZIP_DEFLATED
from piper.xl import WorkBook
logger = logging.getLogger(__name__)
from typing import (
Any,
Callable,
Dict,
Hashable,
Iterable,
List,
NamedTuple,
Optional,
Pattern,
Set,
Tuple,
Union,
)
logger = logging.getLogger(__name__)
# duplicate_files() {{{1
def duplicate_files(source=None,
glob_pattern='*.*',
recurse=False,
filesize=1,
keep=False,
xl_file=None):
''' select files that have the same file size.
This files are are assumed to be 'duplicates'.
Parameters
----------
source
source directory, default None
glob_pattern
filter extension suffix, default '*.*'
recurse
default False, if True, recurse source directory provided
filesize
file size filter, default 1 (kb)
keep
{‘first’, ‘last’, False}, default ‘first’
Determines which duplicates (if any) to mark.
first: Mark duplicates as True except for the first occurrence.
last: Mark duplicates as True except for the last occurrence.
False: Mark all duplicates as True.
xl_file
default None: output results to Excel workbook to xl_file
Returns
-------
pd.DataFrame
Examples
--------
.. code-block::
from piper.io import duplicate_files
source = '/home/mike/Documents'
duplicate_files(source,
glob_pattern='*.*',
recurse=True,
filesize=2000000,
keep=False).query("duplicate == True")
**References**
https://docs.python.org/3/library/pathlib.html
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.duplicated.html
'''
def func(f):
try:
size = os.stat(f.as_posix()).st_size
except OSError as e:
size = 0
data = {'parent': f.parents[0].as_posix(),
'name': f.name,
# 'stem': f.stem, 'suffix': f.suffix,
'size': size}
return data
file_data = list_files(source=source, glob_pattern = glob_pattern,
recurse=recurse, regex=None)
file_data = [func(x) for x in file_data]
df = (
|
pd.DataFrame(file_data)
|
pandas.DataFrame
|
import pandas as pd
import matplotlib.pyplot as plt
plt.rc('figure', figsize=(14,6))
alugueis = pd.read_csv('/home/laumzav/PycharmProjects/Python_study/AlCuOn/Pandas/aluguel_tratado.csv')
alugueis.boxplot(['Valor'])
corte = alugueis[alugueis['Valor'] <= 100000.00]
corte.boxplot(['Valor'])
q1 = alugueis['Valor'].quantile(.25)
q3 = alugueis['Valor'].quantile(.75)
iiq = q3 - q1
limite_inferior = q1 - 1.5 * iiq
limite_superior = q3 + 1.5 * iiq
selecao = (alugueis['Valor'] >= limite_inferior) & (alugueis['Valor'] <= limite_superior)
corte = alugueis[selecao]
corte.boxplot(['Valor'])
corte.hist(['Valor'])
grupo_tipo = alugueis.groupby('Tipo')
q1 = grupo_tipo['Valor'].quantile(.25)
q3 = grupo_tipo['Valor'].quantile(.75)
iiq = q3 - q1
limite_inferior = q1 - 1.5 * iiq
limite_superior = q3 + 1.5 * iiq
corte =
|
pd.DataFrame()
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(
|
StringIO(data)
|
pandas.compat.StringIO
|
import re
from abc import ABC, abstractmethod
import pandas as pd
import negation.modifiers as modifiers
import negation.risk_vars as risk_vars
from pprint import pprint
# pyreverse -o png -p classdiagram1 negation\risk.py
# %%
class Factory:
def __init__(self):
pass
_numeric_vars = ["age", "vef", "sbp", "bmi", "creatinine"]
_factorial_vars = ["nyha"]
_binary_vars = ["gender", "current smoker", "diabetes", "copd",
"heart failure", "beta blocker", "acei"]
def createVar(self, type, object):
# print("type:", type)
if type in self._numeric_vars:
return(risk_vars.NumVar(object))
elif type in self._factorial_vars:
return(risk_vars.FactVar(object))
elif type in self._binary_vars:
return(risk_vars.BinVar(object))
else:
raise Exception("Variable type not recognized:",
type, object)
_examination_mods = ["indication", "hypothetical"]
_negation_mods = [
"definite_negated_existence", "probable_negated_existence",
"probable_existence", "definite_existence", "ambivalent_existence",
"pseudoneg"]
_date_mods = ["date"]
_temporality_mods = ["historical", "future", "acute"]
def createMod(self, type, object):
if type in self._examination_mods:
return(modifiers.ExamMod(object))
elif type in self._negation_mods:
return(modifiers.NegMod(object))
elif type in self._date_mods:
return(modifiers.DateMod(object))
elif type in self._temporality_mods:
return(modifiers.TempMod(object))
else:
raise Exception("Modifier type not recognized:",
type, object)
class PatientVars:
_risk_vars = ["vef", "sbp", "nyha", "current smoker", "diabetes", "copd"]
# i = 0
# for var in risk_vars:
# risk_vars[i] = "['" + risk_vars[i] + "']"
# i += 1
def __init__(self):
# self.vef = []
# self.sbp = []
# self.nyha = []
# self.current_smoker = []
# self.diabetes = []
# self.copd = []
for var in self._risk_vars:
setattr(self, var, [])
dict = {}
for key in self._risk_vars:
dict[key] = []
self.dict = dict
def addFinding(self, object):
""" Adds RiskVar object to PatientVars dictionary based
on the category of the RiskVar object
"""
atr_list = getattr(self, object.cat)
atr_list.append(object)
setattr(self, object.cat, atr_list)
def process(self):
"""Processes all findings. Must be performed before querying results
"""
self._setFindingIndex()
# self._detMissingAtrs()
# self._detAbundantAtrs()
# self._conflictAtrs()
def loopOverFindings(self, method):
for var in self._risk_vars:
findings = getattr(self, var)
if findings:
for finding in findings:
func = getattr(finding, method)
pprint(func())
def loopOverMods(self, method):
for var in self._risk_vars:
findings = getattr(self, var)
if findings:
for finding in findings:
finding.loopOverMods(method)
def _setFindingIndex(self):
for var in self._risk_vars:
findings = getattr(self, var)
for index, finding in enumerate(findings):
setattr(finding, "index", index)
# def _detMissingAtrs(self):
# """Add all keys of missing attributes to list self.missing
# """
# self.missing = []
# self.present = []
# for key in self.dict:
# if not self.dict[key]:
# self.missing.append(key)
# else:
# self.present.append(key)
# def _detAbundantAtrs(self):
# """Compares for each attribute the findings if multiple findings
# """
# self.abundant = []
# for key in self.dict:
# atr = self.dict[key]
# if len(atr) > 1:
# self.abundant.append(key)
# def _conflictAtrs(self):
# """Keep track of all matching and conflicting findings per variable
# Stores them in a dictionary
# """
# # Initialize conflicts dictionary
# conflicts = {}
# for key in self.abundant:
# conflicts[key] = {"match":[], "conflict":[]}
# # Loop over variable objects per variable
# for key in self.abundant:
# for i in range(len(self.dict[key])):
# for j in range(i+1, len(self.dict[key])):
# match = self.dict[key][i] == self.dict[key][j]
# if match:
# conflicts[key]["match"].append((i,j))
# else:
# conflicts[key]["conflict"].append((i,j))
# self.conflicts = conflicts
# def _gatherResults(self):
# new_dict = {}
# for atr in self.dict:
# ls = []
# for finding in self.dict[atr]:
# if finding.type == "numeric":
# result = self._gatherNumResult(finding)
# elif finding.type == "factorial":
# result = self._gatherFactResult(finding)
# elif finding.type == "binary":
# result = self._gatherBinResult(finding)
# ls.append(result)
# new_dict.update({atr : ls})
# def _gatherNumResult(self, finding):
# values = finding.rec_values
# def getOverview(self):
# new_dict = {}
# for atr in self.dict:
# ls = []
# for finding in self.dict[atr]:
# data = finding.getOverview()
# ls.append(data)
# new_dict.update({atr : ls})
# return(new_dict)
# def getMods(self):
# new_dict = {}
# for atr in self.dict:
# findings = self.dict[atr]
# if findings:
# ls = []
# for finding in findings:
# mods = finding.mod
# if mods:
# for mod in mods:
# ls.append((mod.phrase, mod.type, mod.value))
# new_dict.update({atr : ls})
# return(new_dict)
def getSummary(self):
df = pd.DataFrame()
for var in self._risk_vars:
findings = getattr(self, var)
if findings:
for finding in findings:
df = df.append(finding.getSummary(), ignore_index=True, sort=False)
return(df)
def view(self):
dict = {}
for var in self._risk_vars:
findings = getattr(self, var)
sub_dict = {}
for index, finding in enumerate(findings):
# sub_dict.update({"mod" : finding.getModInfo()})
sub_dict.update({
index : finding.view()})
dict.update({var : sub_dict})
return(dict)
def getDataframe2(self):
df = pd.DataFrame()
for var in self._risk_vars:
findings = getattr(self, var)
# In case there are findings for current var:
if findings:
for finding in findings:
df = df.append(finding.getDataframe(), ignore_index=True, sort=False)
return(df)
def getDataframe(self):
finding_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas.types.common import is_integer, is_scalar
from pandas import Index, Series, DataFrame, isnull, date_range
from pandas.core.index import MultiIndex
from pandas.core.indexing import IndexingError
from pandas.tseries.index import Timestamp
from pandas.tseries.offsets import BDay
from pandas.tseries.tdi import Timedelta
from pandas.compat import lrange, range
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestSeriesIndexing(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_get(self):
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45,
51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
self.assertEqual(result, expected)
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index(
[25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
self.assertEqual(result, expected)
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default='Missing')
self.assertEqual(result, 'Missing')
vc = df.b.value_counts()
result = vc.get(False, default='Missing')
self.assertEqual(result, 3)
result = vc.get(True, default='Missing')
self.assertEqual(result, 'Missing')
def test_delitem(self):
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1, 5), index=lrange(1, 5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2, 5), index=lrange(2, 5))
assert_series_equal(s, expected)
# empty
s = Series()
def f():
del s[0]
self.assertRaises(KeyError, f)
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
# Index(dtype=object)
s = Series(1, index=['a'])
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
s['a'] = 1
assert_series_equal(s, Series(1, index=['a']))
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
self.assertTrue((result == 5).all())
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
self.assertRaises(IndexError, s.__getitem__, -11)
self.assertRaises(IndexError, s.__setitem__, -11, 'foo')
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
self.assertEqual(result, 4)
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assertEqual(
self.series.get(-1), self.series.get(self.series.index[-1]))
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - BDay()
self.assertRaises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
self.assertIsNone(result)
def test_iget(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.irow(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget_value(1)
for i in range(len(s)):
result = s.iloc[i]
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iloc[slice(1, 3)]
expected = s.ix[2:4]
assert_series_equal(result, expected)
# test slice is a view
result[:] = 0
self.assertTrue((s[1:3] == 0).all())
# list of integers
result = s.iloc[[0, 2, 3, 4, 5]]
expected = s.reindex(s.index[[0, 2, 3, 4, 5]])
assert_series_equal(result, expected)
def test_iget_nonunique(self):
s = Series([0, 1, 2], index=[0, 1, 0])
self.assertEqual(s.iloc[2], 2)
def test_getitem_regression(self):
s = Series(lrange(5), index=lrange(5))
result = s[lrange(5)]
assert_series_equal(result, s)
def test_getitem_setitem_slice_bug(self):
s = Series(lrange(10), lrange(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
s = Series(lrange(10), lrange(10))
s[-12:] = 0
self.assertTrue((s == 0).all())
s[:-12] = 5
self.assertTrue((s == 0).all())
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_index_equal(result.index, s.index[mask])
def test_getitem_boolean_empty(self):
s = Series([], dtype=np.int64)
s.index.name = 'index_name'
s = s[s.isnull()]
self.assertEqual(s.index.name, 'index_name')
self.assertEqual(s.dtype, np.int64)
# GH5877
# indexing with empty series
s = Series(['A', 'B'])
expected = Series(np.nan, index=['C'], dtype=object)
result = s[Series(['C'], dtype=object)]
assert_series_equal(result, expected)
s = Series(['A', 'B'])
expected = Series(dtype=object, index=Index([], dtype='int64'))
result = s[Series([], dtype=object)]
assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
def f():
s[Series([], dtype=bool)]
self.assertRaises(IndexingError, f)
def f():
s[Series([True], dtype=bool)]
self.assertRaises(IndexingError, f)
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
s2 = s.copy()
cop = s.copy()
cop[omask] = 5
s2[mask] = 5
assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, freq=BDay()) > ts.median()
# these used to raise...??
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
# ts[mask_shifted]
# ts[mask_shifted] = 1
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
# ts.ix[mask_shifted]
# ts.ix[mask_shifted] = 2
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assertTrue((s[:4] == 0).all())
self.assertTrue(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
self.assertRaises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
self.assertEqual(s.ix[0], s['a'])
s.ix[0] = 5
self.assertAlmostEqual(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
tm.assertIsInstance(value, np.float64)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
self.assertTrue(is_scalar(obj['c']))
self.assertEqual(obj['c'], 0)
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .ix internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
expected = s.ix[['foo', 'bar', 'bah', 'bam']]
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64)
expected = Series([3, 4], index=['C', 'C'], dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
self.assertRaises(TypeError, s.__getitem__, df > 5)
def test_getitem_callable(self):
# GH 12533
s = pd.Series(4, index=list('ABCD'))
result = s[lambda x: 'A']
self.assertEqual(result, s.loc['A'])
result = s[lambda x: ['A', 'B']]
tm.assert_series_equal(result, s.loc[['A', 'B']])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
s2 = s.copy()
s2.ix[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
def test_setitem_float_labels(self):
# note labels are floats
s = Series(['a', 'b', 'c'], index=[0, 0.5, 1])
tmp = s.copy()
s.ix[1] = 'zoo'
tmp.iloc[2] = 'zoo'
assert_series_equal(s, tmp)
def test_setitem_callable(self):
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list('ABCD'))
s[lambda x: 'A'] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD')))
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assertNotIn(self.series.index[9], numSlice.index)
self.assertNotIn(self.objSeries.index[9], objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assertTrue(tm.equalContents(numSliceEnd, np.array(self.series)[
-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assertTrue((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
s[::-1] # it works!
def test_slice_float_get_set(self):
self.assertRaises(TypeError, lambda: self.ts[4.0:10.0])
def f():
self.ts[4.0:10.0] = 0
self.assertRaises(TypeError, f)
self.assertRaises(TypeError, self.ts.__getitem__, slice(4.5, 10.0))
self.assertRaises(TypeError, self.ts.__setitem__, slice(4.5, 10.0), 0)
def test_slice_floats2(self):
s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
i = np.arange(10, 20, dtype=float)
i[2] = 12.2
s.index = i
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
def test_slice_float64(self):
values = np.arange(10., 50., 2)
index = Index(values)
start, end = values[[5, 15]]
s = Series(np.random.randn(20), index=index)
result = s[start:end]
expected = s.iloc[5:16]
assert_series_equal(result, expected)
result = s.loc[start:end]
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(20, 3), index=index)
result = df[start:end]
expected = df.iloc[5:16]
tm.assert_frame_equal(result, expected)
result = df.loc[start:end]
tm.assert_frame_equal(result, expected)
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
self.assertTrue(np.isnan(self.ts[6]))
self.assertTrue(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assertFalse(np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
self.assertTrue((series[::2] == 0).all())
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
app = Series([1], index=['foobar'], name='series')
expected = self.series.append(app)
assert_series_equal(s, expected)
# Test for issue #10193
key = pd.Timestamp('2012-01-01')
series = pd.Series()
series[key] = 47
expected = pd.Series(47, [key])
assert_series_equal(series, expected)
series = pd.Series([], pd.DatetimeIndex([], freq='D'))
series[key] = 47
expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))
assert_series_equal(series, expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan, 2, 3])
s = Series([1, 2, 3])
s.iloc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s.loc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s[0] = np.nan
assert_series_equal(s, expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan]))
s = Series([False, True])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan, 1.0]))
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
self.assertIs(res, self.ts)
self.assertEqual(self.ts[idx], 0)
# equiv
s = self.series.copy()
res = s.set_value('foobar', 0)
self.assertIs(res, s)
self.assertEqual(res.index[-1], 'foobar')
self.assertEqual(res['foobar'], 0)
s = self.series.copy()
s.loc['foobar'] = 0
self.assertEqual(s.index[-1], 'foobar')
self.assertEqual(s['foobar'], 0)
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertTrue(sl.index.is_unique)
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
self.assertRaises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
self.assertRaises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.ix[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
expected = Timestamp('2011-01-01', tz='US/Eastern')
result = s.loc['a']
self.assertEqual(result, expected)
result = s.iloc[0]
self.assertEqual(result, expected)
result = s['a']
self.assertEqual(result, expected)
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.ix[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.ix[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.ix[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.ix[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
self.assertRaises(Exception, s.__setitem__, inds_notfound, 0)
self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
s2 = s.copy()
expected = Timestamp('2011-01-03', tz='US/Eastern')
s2.loc['a'] = expected
result = s2.loc['a']
self.assertEqual(result, expected)
s2 = s.copy()
s2.iloc[0] = expected
result = s2.iloc[0]
self.assertEqual(result, expected)
s2 = s.copy()
s2['a'] = expected
result = s2['a']
self.assertEqual(result, expected)
def test_ix_getitem(self):
inds = self.series.index[[3, 4, 7]]
assert_series_equal(self.series.ix[inds], self.series.reindex(inds))
assert_series_equal(self.series.ix[5::2], self.series[5::2])
# slice with indices
d1, d2 = self.ts.index[[5, 15]]
result = self.ts.ix[d1:d2]
expected = self.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = self.series > self.series.median()
assert_series_equal(self.series.ix[mask], self.series[mask])
# ask for index value
self.assertEqual(self.ts.ix[d1], self.ts[d1])
self.assertEqual(self.ts.ix[d2], self.ts[d2])
def test_ix_getitem_not_monotonic(self):
d1, d2 = self.ts.index[[5, 15]]
ts2 = self.ts[::2][[1, 2, 0]]
self.assertRaises(KeyError, ts2.ix.__getitem__, slice(d1, d2))
self.assertRaises(KeyError, ts2.ix.__setitem__, slice(d1, d2), 0)
def test_ix_getitem_setitem_integer_slice_keyerrors(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# this is OK
cp = s.copy()
cp.ix[4:10] = 0
self.assertTrue((cp.ix[4:10] == 0).all())
# so is this
cp = s.copy()
cp.ix[3:11] = 0
self.assertTrue((cp.ix[3:11] == 0).values.all())
result = s.ix[4:10]
result2 = s.ix[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]
self.assertRaises(KeyError, s2.ix.__getitem__, slice(3, 11))
self.assertRaises(KeyError, s2.ix.__setitem__, slice(3, 11), 0)
def test_ix_getitem_iterator(self):
idx = iter(self.series.index[:10])
result = self.series.ix[idx]
assert_series_equal(result, self.series[:10])
def test_setitem_with_tz(self):
for tz in ['US/Eastern', 'UTC', 'Asia/Tokyo']:
orig = pd.Series(pd.date_range('2016-01-01', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-01-01 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_setitem_with_tz_dst(self):
# GH XXX
tz = 'US/Eastern'
orig = pd.Series(pd.date_range('2016-11-06', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-11-06 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_where(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond).dropna()
rs2 = s[cond]
assert_series_equal(rs, rs2)
rs = s.where(cond, -s)
assert_series_equal(rs, s.abs())
rs = s.where(cond)
assert (s.shape == rs.shape)
assert (rs is not s)
# test alignment
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
rs = s2.where(cond[:3])
assert_series_equal(rs, expected)
expected = s2.abs()
expected.ix[0] = s2[0]
rs = s2.where(cond[:3], -s2)
assert_series_equal(rs, expected)
self.assertRaises(ValueError, s.where, 1)
self.assertRaises(ValueError, s.where, cond[:3].values, -s)
# GH 2745
s = Series([1, 2])
s[[True, False]] = [0, 1]
expected = Series([0, 2])
assert_series_equal(s, expected)
# failures
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[0, 2, 3])
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[])
# unsafe dtype changes
for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype=dtype)
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# these are allowed operations, but are upcasted
for dtype in [np.int64, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
s[mask] = values
expected = Series(values + lrange(5, 10), dtype='float64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# GH 9731
s = Series(np.arange(10), dtype='int64')
mask = s > 5
values = [2.5, 3.5, 4.5, 5.5]
s[mask] = values
expected = Series(lrange(6) + values, dtype='float64')
assert_series_equal(s, expected)
# can't do these as we are forced to change the itemsize of the input
# to something we cannot
for dtype in [np.int8, np.int16, np.int32, np.float16, np.float32]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
self.assertRaises(Exception, s.__setitem__, tuple(mask), values)
# GH3235
s = Series(np.arange(10), dtype='int64')
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype='int64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
s = Series(np.arange(10), dtype='int64')
mask = s > 5
s[mask] = [0] * 4
expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype='int64')
assert_series_equal(s, expected)
s = Series(np.arange(10))
mask = s > 5
def f():
s[mask] = [5, 4, 3, 2, 1]
self.assertRaises(ValueError, f)
def f():
s[mask] = [0] * 5
self.assertRaises(ValueError, f)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.where(s > 2, np.nan)
expected = Series([np.nan, np.nan, 3, 4])
assert_series_equal(result, expected)
# GH 4667
# setting with None changes dtype
s = Series(range(10)).astype(float)
s[8] = None
result = s[8]
self.assertTrue(isnull(result))
s = Series(range(10)).astype(float)
s[s > 8] = None
result = s[isnull(s)]
expected = Series(np.nan, index=[9])
assert_series_equal(result, expected)
def test_where_setitem_invalid(self):
# GH 2702
# make sure correct exceptions are raised on invalid list assignment
# slice
s = Series(list('abc'))
def f():
s[0:3] = list(range(27))
self.assertRaises(ValueError, f)
s[0:3] = list(range(3))
expected = Series([0, 1, 2])
assert_series_equal(s.astype(np.int64), expected, )
# slice with step
s = Series(list('abcdef'))
def f():
s[0:4:2] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abcdef'))
s[0:4:2] = list(range(2))
expected = Series([0, 'b', 1, 'd', 'e', 'f'])
assert_series_equal(s, expected)
# neg slices
s = Series(list('abcdef'))
def f():
s[:-1] = list(range(27))
self.assertRaises(ValueError, f)
s[-3:-1] = list(range(2))
expected = Series(['a', 'b', 'c', 0, 1, 'f'])
assert_series_equal(s, expected)
# list
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(2))
self.assertRaises(ValueError, f)
# scalar
s = Series(list('abc'))
s[0] = list(range(10))
expected = Series([list(range(10)), 'b', 'c'])
assert_series_equal(s, expected)
def test_where_broadcast(self):
# Test a variety of differently sized series
for size in range(2, 6):
# Test a variety of boolean indices
for selection in [
# First element should be set
np.resize([True, False, False, False, False], size),
# Set alternating elements]
np.resize([True, False], size),
# No element should be set
np.resize([False], size)]:
# Test a variety of different numbers as content
for item in [2.0, np.nan, np.finfo(np.float).max,
np.finfo(np.float).min]:
# Test numpy arrays, lists and tuples as the input to be
# broadcast
for arr in [np.array([item]), [item], (item, )]:
data = np.arange(size, dtype=float)
s = Series(data)
s[selection] = arr
# Construct the expected series by taking the source
# data or item based on the selection
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
assert_series_equal(s, expected)
s = Series(data)
result = s.where(~selection, arr)
assert_series_equal(result, expected)
def test_where_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.where(cond, inplace=True)
assert_series_equal(rs.dropna(), s[cond])
assert_series_equal(rs, s.where(cond))
rs = s.copy()
rs.where(cond, -s, inplace=True)
assert_series_equal(rs, s.where(cond, -s))
def test_where_dups(self):
# GH 4550
# where crashes with dups in index
s1 = Series(list(range(3)))
s2 = Series(list(range(3)))
comb = pd.concat([s1, s2])
result = comb.where(comb < 2)
expected = Series([0, 1, np.nan, 0, 1, np.nan],
index=[0, 1, 2, 0, 1, 2])
assert_series_equal(result, expected)
# GH 4548
# inplace updating not working with dups
comb[comb < 1] = 5
expected = Series([5, 1, 2, 5, 1, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
comb[comb < 2] += 10
expected = Series([5, 11, 2, 5, 11, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
def test_where_datetime(self):
s = Series(date_range('20130102', periods=2))
expected = Series([10, 10], dtype='datetime64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='datetime64[ns]')
assert_series_equal(rs, expected)
def test_where_timedelta(self):
s = Series([1, 2], dtype='timedelta64[ns]')
expected = Series([10, 10], dtype='timedelta64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='timedelta64[ns]')
assert_series_equal(rs, expected)
def test_mask(self):
# compare with tested results in test_where
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(~cond, np.nan)
assert_series_equal(rs, s.mask(cond))
rs = s.where(~cond)
rs2 = s.mask(cond)
assert_series_equal(rs, rs2)
rs = s.where(~cond, -s)
rs2 = s.mask(cond, -s)
assert_series_equal(rs, rs2)
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
rs = s2.where(~cond[:3])
rs2 = s2.mask(cond[:3])
assert_series_equal(rs, rs2)
rs = s2.where(~cond[:3], -s2)
rs2 = s2.mask(cond[:3], -s2)
assert_series_equal(rs, rs2)
self.assertRaises(ValueError, s.mask, 1)
self.assertRaises(ValueError, s.mask, cond[:3].values, -s)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.mask(s > 2, np.nan)
expected = Series([1, 2, np.nan, np.nan])
assert_series_equal(result, expected)
def test_mask_broadcast(self):
# GH 8801
# copied from test_where_broadcast
for size in range(2, 6):
for selection in [
# First element should be set
np.resize([True, False, False, False, False], size),
# Set alternating elements]
np.resize([True, False], size),
# No element should be set
np.resize([False], size)]:
for item in [2.0, np.nan, np.finfo(np.float).max,
np.finfo(np.float).min]:
for arr in [np.array([item]), [item], (item, )]:
data = np.arange(size, dtype=float)
s = Series(data)
result = s.mask(selection, arr)
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
assert_series_equal(result, expected)
def test_mask_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.mask(cond, inplace=True)
assert_series_equal(rs.dropna(), s[~cond])
assert_series_equal(rs, s.mask(cond))
rs = s.copy()
rs.mask(cond, -s, inplace=True)
assert_series_equal(rs, s.mask(cond, -s))
def test_ix_setitem(self):
inds = self.series.index[[3, 4, 7]]
result = self.series.copy()
result.ix[inds] = 5
expected = self.series.copy()
expected[[3, 4, 7]] = 5
assert_series_equal(result, expected)
result.ix[5:10] = 10
expected[5:10] = 10
assert_series_equal(result, expected)
# set slice with indices
d1, d2 = self.series.index[[5, 15]]
result.ix[d1:d2] = 6
expected[5:16] = 6 # because it's inclusive
assert_series_equal(result, expected)
# set index value
self.series.ix[d1] = 4
self.series.ix[d2] = 6
self.assertEqual(self.series[d1], 4)
self.assertEqual(self.series[d2], 6)
def test_where_numeric_with_string(self):
# GH 9280
s = pd.Series([1, 2, 3])
w = s.where(s > 1, 'X')
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
w = s.where(s > 1, ['X', 'Y', 'Z'])
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
w = s.where(s > 1, np.array(['X', 'Y', 'Z']))
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
def test_setitem_boolean(self):
mask = self.series > self.series.median()
# similiar indexed series
result = self.series.copy()
result[mask] = self.series * 2
expected = self.series * 2
assert_series_equal(result[mask], expected[mask])
# needs alignment
result = self.series.copy()
result[mask] = (self.series * 2)[0:5]
expected = (self.series * 2)[0:5].reindex_like(self.series)
expected[-mask] = self.series[mask]
assert_series_equal(result[mask], expected[mask])
def test_ix_setitem_boolean(self):
mask = self.series > self.series.median()
result = self.series.copy()
result.ix[mask] = 0
expected = self.series
expected[mask] = 0
assert_series_equal(result, expected)
def test_ix_setitem_corner(self):
inds = list(self.series.index[[5, 8, 12]])
self.series.ix[inds] = 5
self.assertRaises(Exception, self.series.ix.__setitem__,
inds + ['foo'], 5)
def test_get_set_boolean_different_order(self):
ordered = self.series.sort_values()
# setting
copy = self.series.copy()
copy[ordered > 0] = 0
expected = self.series.copy()
expected[expected > 0] = 0
assert_series_equal(copy, expected)
# getting
sel = self.series[ordered > 0]
exp = self.series[self.series > 0]
assert_series_equal(sel, exp)
def test_setitem_na(self):
# these induce dtype changes
expected = Series([np.nan, 3, np.nan, 5, np.nan, 7, np.nan, 9, np.nan])
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
s[::2] = np.nan
assert_series_equal(s, expected)
# get's coerced to float, right?
expected = Series([np.nan, 1, np.nan, 0])
s = Series([True, True, False, False])
s[::2] = np.nan
assert_series_equal(s, expected)
expected = Series([np.nan, np.nan, np.nan, np.nan, np.nan, 5, 6, 7, 8,
9])
s = Series(np.arange(10))
s[:5] = np.nan
assert_series_equal(s, expected)
def test_basic_indexing(self):
s = Series(np.random.randn(5), index=['a', 'b', 'a', 'a', 'b'])
self.assertRaises(IndexError, s.__getitem__, 5)
self.assertRaises(IndexError, s.__setitem__, 5, 0)
self.assertRaises(KeyError, s.__getitem__, 'c')
s = s.sort_index()
self.assertRaises(IndexError, s.__getitem__, 5)
self.assertRaises(IndexError, s.__setitem__, 5, 0)
def test_int_indexing(self):
s = Series(np.random.randn(6), index=[0, 0, 1, 1, 2, 2])
self.assertRaises(KeyError, s.__getitem__, 5)
self.assertRaises(KeyError, s.__getitem__, 'c')
# not monotonic
s = Series(np.random.randn(6), index=[2, 2, 0, 0, 1, 1])
self.assertRaises(KeyError, s.__getitem__, 5)
self.assertRaises(KeyError, s.__getitem__, 'c')
def test_datetime_indexing(self):
from pandas import date_range
index = date_range('1/1/2000', '1/7/2000')
index = index.repeat(3)
s = Series(len(index), index=index)
stamp = Timestamp('1/8/2000')
self.assertRaises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
self.assertEqual(s[stamp], 0)
# not monotonic
s = Series(len(index), index=index)
s = s[::-1]
self.assertRaises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
self.assertEqual(s[stamp], 0)
def test_timedelta_assignment(self):
# GH 8209
s = Series([])
s.loc['B'] = timedelta(1)
tm.assert_series_equal(s, Series(Timedelta('1 days'), index=['B']))
s = s.reindex(s.index.insert(0, 'A'))
tm.assert_series_equal(s, Series(
[np.nan, Timedelta('1 days')], index=['A', 'B']))
result = s.fillna(timedelta(1))
expected = Series(Timedelta('1 days'), index=['A', 'B'])
tm.assert_series_equal(result, expected)
s.loc['A'] = timedelta(1)
tm.assert_series_equal(s, expected)
# GH 14155
s = Series(10 * [np.timedelta64(10, 'm')])
s.loc[[1, 2, 3]] = np.timedelta64(20, 'm')
expected = pd.Series(10 * [np.timedelta64(10, 'm')])
expected.loc[[1, 2, 3]] = pd.Timedelta(np.timedelta64(20, 'm'))
tm.assert_series_equal(s, expected)
def test_underlying_data_conversion(self):
# GH 4080
df = DataFrame(dict((c, [1, 2, 3]) for c in ['a', 'b', 'c']))
df.set_index(['a', 'b', 'c'], inplace=True)
s = Series([1], index=[(2, 2, 2)])
df['val'] = 0
df
df['val'].update(s)
expected = DataFrame(
dict(a=[1, 2, 3], b=[1, 2, 3], c=[1, 2, 3], val=[0, 1, 0]))
expected.set_index(['a', 'b', 'c'], inplace=True)
tm.assert_frame_equal(df, expected)
# GH 3970
# these are chained assignments as well
pd.set_option('chained_assignment', None)
df = DataFrame({"aa": range(5), "bb": [2.2] * 5})
df["cc"] = 0.0
ck = [True] * len(df)
df["bb"].iloc[0] = .13
# TODO: unused
df_tmp = df.iloc[ck] # noqa
df["bb"].iloc[0] = .15
self.assertEqual(df['bb'].iloc[0], 0.15)
pd.set_option('chained_assignment', 'raise')
# GH 3217
df = DataFrame(dict(a=[1, 3], b=[np.nan, 2]))
df['c'] = np.nan
df['c'].update(pd.Series(['foo'], index=[0]))
expected = DataFrame(dict(a=[1, 3], b=[np.nan, 2], c=['foo', np.nan]))
tm.assert_frame_equal(df, expected)
def test_preserveRefs(self):
seq = self.ts[[5, 10, 15]]
seq[1] = np.NaN
self.assertFalse(np.isnan(self.ts[10]))
def test_drop(self):
# unique
s = Series([1, 2], index=['one', 'two'])
expected = Series([1], index=['one'])
result = s.drop(['two'])
assert_series_equal(result, expected)
result = s.drop('two', axis='rows')
assert_series_equal(result, expected)
# non-unique
# GH 5248
s = Series([1, 1, 2], index=['one', 'two', 'one'])
expected = Series([1, 2], index=['one', 'one'])
result = s.drop(['two'], axis=0)
assert_series_equal(result, expected)
result = s.drop('two')
assert_series_equal(result, expected)
expected = Series([1], index=['two'])
result = s.drop(['one'])
assert_series_equal(result, expected)
result = s.drop('one')
assert_series_equal(result, expected)
# single string/tuple-like
s = Series(range(3), index=list('abc'))
self.assertRaises(ValueError, s.drop, 'bc')
self.assertRaises(ValueError, s.drop, ('a', ))
# errors='ignore'
s = Series(range(3), index=list('abc'))
result = s.drop('bc', errors='ignore')
assert_series_equal(result, s)
result = s.drop(['a', 'd'], errors='ignore')
expected = s.ix[1:]
assert_series_equal(result, expected)
# bad axis
self.assertRaises(ValueError, s.drop, 'one', axis='columns')
# GH 8522
s = Series([2, 3], index=[True, False])
self.assertTrue(s.index.is_object())
result = s.drop(True)
expected = Series([3], index=[False])
assert_series_equal(result, expected)
def test_align(self):
def _check_align(a, b, how='left', fill=None):
aa, ab = a.align(b, join=how, fill_value=fill)
join_index = a.index.join(b.index, how=how)
if fill is not None:
diff_a = aa.index.difference(join_index)
diff_b = ab.index.difference(join_index)
if len(diff_a) > 0:
self.assertTrue((aa.reindex(diff_a) == fill).all())
if len(diff_b) > 0:
self.assertTrue((ab.reindex(diff_b) == fill).all())
ea = a.reindex(join_index)
eb = b.reindex(join_index)
if fill is not None:
ea = ea.fillna(fill)
eb = eb.fillna(fill)
assert_series_equal(aa, ea)
assert_series_equal(ab, eb)
self.assertEqual(aa.name, 'ts')
self.assertEqual(ea.name, 'ts')
self.assertEqual(ab.name, 'ts')
self.assertEqual(eb.name, 'ts')
for kind in JOIN_TYPES:
_check_align(self.ts[2:], self.ts[:-5], how=kind)
_check_align(self.ts[2:], self.ts[:-5], how=kind, fill=-1)
# empty left
_check_align(self.ts[:0], self.ts[:-5], how=kind)
_check_align(self.ts[:0], self.ts[:-5], how=kind, fill=-1)
# empty right
_check_align(self.ts[:-5], self.ts[:0], how=kind)
_check_align(self.ts[:-5], self.ts[:0], how=kind, fill=-1)
# both empty
_check_align(self.ts[:0], self.ts[:0], how=kind)
_check_align(self.ts[:0], self.ts[:0], how=kind, fill=-1)
def test_align_fill_method(self):
def _check_align(a, b, how='left', method='pad', limit=None):
aa, ab = a.align(b, join=how, method=method, limit=limit)
join_index = a.index.join(b.index, how=how)
ea = a.reindex(join_index)
eb = b.reindex(join_index)
ea = ea.fillna(method=method, limit=limit)
eb = eb.fillna(method=method, limit=limit)
assert_series_equal(aa, ea)
assert_series_equal(ab, eb)
for kind in JOIN_TYPES:
for meth in ['pad', 'bfill']:
_check_align(self.ts[2:], self.ts[:-5], how=kind, method=meth)
_check_align(self.ts[2:], self.ts[:-5], how=kind, method=meth,
limit=1)
# empty left
_check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth)
_check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth,
limit=1)
# empty right
_check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth)
_check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth,
limit=1)
# both empty
_check_align(self.ts[:0], self.ts[:0], how=kind, method=meth)
_check_align(self.ts[:0], self.ts[:0], how=kind, method=meth,
limit=1)
def test_align_nocopy(self):
b = self.ts[:5].copy()
# do copy
a = self.ts.copy()
ra, _ = a.align(b, join='left')
ra[:5] = 5
self.assertFalse((a[:5] == 5).any())
# do not copy
a = self.ts.copy()
ra, _ = a.align(b, join='left', copy=False)
ra[:5] = 5
self.assertTrue((a[:5] == 5).all())
# do copy
a = self.ts.copy()
b = self.ts[:5].copy()
_, rb = a.align(b, join='right')
rb[:3] = 5
self.assertFalse((b[:3] == 5).any())
# do not copy
a = self.ts.copy()
b = self.ts[:5].copy()
_, rb = a.align(b, join='right', copy=False)
rb[:2] = 5
self.assertTrue((b[:2] == 5).all())
def test_align_sameindex(self):
a, b = self.ts.align(self.ts, copy=False)
self.assertIs(a.index, self.ts.index)
self.assertIs(b.index, self.ts.index)
# a, b = self.ts.align(self.ts, copy=True)
# self.assertIsNot(a.index, self.ts.index)
# self.assertIsNot(b.index, self.ts.index)
def test_align_multiindex(self):
# GH 10665
midx = pd.MultiIndex.from_product([range(2), range(3), range(2)],
names=('a', 'b', 'c'))
idx = pd.Index(range(2), name='b')
s1 = pd.Series(np.arange(12, dtype='int64'), index=midx)
s2 = pd.Series(np.arange(2, dtype='int64'), index=idx)
# these must be the same results (but flipped)
res1l, res1r = s1.align(s2, join='left')
res2l, res2r = s2.align(s1, join='right')
expl = s1
tm.assert_series_equal(expl, res1l)
tm.assert_series_equal(expl, res2r)
expr = pd.Series([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
tm.assert_series_equal(expr, res1r)
tm.assert_series_equal(expr, res2l)
res1l, res1r = s1.align(s2, join='right')
res2l, res2r = s2.align(s1, join='left')
exp_idx = pd.MultiIndex.from_product([range(2), range(2), range(2)],
names=('a', 'b', 'c'))
expl = pd.Series([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
tm.assert_series_equal(expl, res1l)
tm.assert_series_equal(expl, res2r)
expr = pd.Series([0, 0, 1, 1] * 2, index=exp_idx)
tm.assert_series_equal(expr, res1r)
tm.assert_series_equal(expr, res2l)
def test_reindex(self):
identity = self.series.reindex(self.series.index)
# __array_interface__ is not defined for older numpies
# and on some pythons
try:
self.assertTrue(np.may_share_memory(self.series.index,
identity.index))
except (AttributeError):
pass
self.assertTrue(identity.index.is_(self.series.index))
self.assertTrue(identity.index.identical(self.series.index))
subIndex = self.series.index[10:20]
subSeries = self.series.reindex(subIndex)
for idx, val in compat.iteritems(subSeries):
self.assertEqual(val, self.series[idx])
subIndex2 = self.ts.index[10:20]
subTS = self.ts.reindex(subIndex2)
for idx, val in compat.iteritems(subTS):
self.assertEqual(val, self.ts[idx])
stuffSeries = self.ts.reindex(subIndex)
self.assertTrue(np.isnan(stuffSeries).all())
# This is extremely important for the Cython code to not screw up
nonContigIndex = self.ts.index[::2]
subNonContig = self.ts.reindex(nonContigIndex)
for idx, val in compat.iteritems(subNonContig):
self.assertEqual(val, self.ts[idx])
# return a copy the same index here
result = self.ts.reindex()
self.assertFalse((result is self.ts))
def test_reindex_nan(self):
ts = Series([2, 3, 5, 7], index=[1, 4, nan, 8])
i, j = [nan, 1, nan, 8, 4, nan], [2, 0, 2, 3, 1, 2]
assert_series_equal(ts.reindex(i), ts.iloc[j])
ts.index = ts.index.astype('object')
# reindex coerces index.dtype to float, loc/iloc doesn't
assert_series_equal(ts.reindex(i), ts.iloc[j], check_index_type=False)
def test_reindex_corner(self):
# (don't forget to fix this) I think it's fixed
self.empty.reindex(self.ts.index, method='pad') # it works
# corner case: pad empty series
reindexed = self.empty.reindex(self.ts.index, method='pad')
# pass non-Index
reindexed = self.ts.reindex(list(self.ts.index))
assert_series_equal(self.ts, reindexed)
# bad fill method
ts = self.ts[::2]
self.assertRaises(Exception, ts.reindex, self.ts.index, method='foo')
def test_reindex_pad(self):
s = Series(np.arange(10), dtype='int64')
s2 = s[::2]
reindexed = s2.reindex(s.index, method='pad')
reindexed2 = s2.reindex(s.index, method='ffill')
assert_series_equal(reindexed, reindexed2)
expected = Series([0, 0, 2, 2, 4, 4, 6, 6, 8, 8], index=np.arange(10))
assert_series_equal(reindexed, expected)
# GH4604
s = Series([1, 2, 3, 4, 5], index=['a', 'b', 'c', 'd', 'e'])
new_index = ['a', 'g', 'c', 'f']
expected = Series([1, 1, 3, 3], index=new_index)
# this changes dtype because the ffill happens after
result = s.reindex(new_index).ffill()
assert_series_equal(result, expected.astype('float64'))
result = s.reindex(new_index).ffill(downcast='infer')
assert_series_equal(result, expected)
expected = Series([1, 5, 3, 5], index=new_index)
result = s.reindex(new_index, method='ffill')
assert_series_equal(result, expected)
# inferrence of new dtype
s = Series([True, False, False, True], index=list('abcd'))
new_index = 'agc'
result = s.reindex(list(new_index)).ffill()
expected = Series([True, True, False], index=list(new_index))
assert_series_equal(result, expected)
# GH4618 shifted series downcasting
s = Series(False, index=lrange(0, 5))
result = s.shift(1).fillna(method='bfill')
expected = Series(False, index=lrange(0, 5))
assert_series_equal(result, expected)
def test_reindex_nearest(self):
s = Series(np.arange(10, dtype='int64'))
target = [0.1, 0.9, 1.5, 2.0]
actual = s.reindex(target, method='nearest')
expected = Series(np.around(target).astype('int64'), target)
assert_series_equal(expected, actual)
actual = s.reindex_like(actual, method='nearest')
assert_series_equal(expected, actual)
actual = s.reindex_like(actual, method='nearest', tolerance=1)
assert_series_equal(expected, actual)
actual = s.reindex(target, method='nearest', tolerance=0.2)
expected = Series([0, 1, np.nan, 2], target)
assert_series_equal(expected, actual)
def test_reindex_backfill(self):
pass
def test_reindex_int(self):
ts = self.ts[::2]
int_ts = Series(np.zeros(len(ts), dtype=int), index=ts.index)
# this should work fine
reindexed_int = int_ts.reindex(self.ts.index)
# if NaNs introduced
self.assertEqual(reindexed_int.dtype, np.float_)
# NO NaNs introduced
reindexed_int = int_ts.reindex(int_ts.index[::2])
self.assertEqual(reindexed_int.dtype, np.int_)
def test_reindex_bool(self):
# A series other than float, int, string, or object
ts = self.ts[::2]
bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index)
# this should work fine
reindexed_bool = bool_ts.reindex(self.ts.index)
# if NaNs introduced
self.assertEqual(reindexed_bool.dtype, np.object_)
# NO NaNs introduced
reindexed_bool = bool_ts.reindex(bool_ts.index[::2])
self.assertEqual(reindexed_bool.dtype, np.bool_)
def test_reindex_bool_pad(self):
# fail
ts = self.ts[5:]
bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index)
filled_bool = bool_ts.reindex(self.ts.index, method='pad')
self.assertTrue(isnull(filled_bool[:5]).all())
def test_reindex_like(self):
other = self.ts[::2]
assert_series_equal(self.ts.reindex(other.index),
self.ts.reindex_like(other))
# GH 7179
day1 = datetime(2013, 3, 5)
day2 = datetime(2013, 5, 5)
day3 = datetime(2014, 3, 5)
series1 = Series([5, None, None], [day1, day2, day3])
series2 = Series([None, None], [day1, day3])
result = series1.reindex_like(series2, method='pad')
expected = Series([5, np.nan], index=[day1, day3])
assert_series_equal(result, expected)
def test_reindex_fill_value(self):
# -----------------------------------------------------------
# floats
floats = Series([1., 2., 3.])
result = floats.reindex([1, 2, 3])
expected = Series([2., 3., np.nan], index=[1, 2, 3])
assert_series_equal(result, expected)
result = floats.reindex([1, 2, 3], fill_value=0)
expected = Series([2., 3., 0], index=[1, 2, 3])
assert_series_equal(result, expected)
# -----------------------------------------------------------
# ints
ints = Series([1, 2, 3])
result = ints.reindex([1, 2, 3])
expected = Series([2., 3., np.nan], index=[1, 2, 3])
assert_series_equal(result, expected)
# don't upcast
result = ints.reindex([1, 2, 3], fill_value=0)
expected = Series([2, 3, 0], index=[1, 2, 3])
self.assertTrue(issubclass(result.dtype.type, np.integer))
assert_series_equal(result, expected)
# -----------------------------------------------------------
# objects
objects = Series([1, 2, 3], dtype=object)
result = objects.reindex([1, 2, 3])
expected = Series([2, 3, np.nan], index=[1, 2, 3], dtype=object)
assert_series_equal(result, expected)
result = objects.reindex([1, 2, 3], fill_value='foo')
expected = Series([2, 3, 'foo'], index=[1, 2, 3], dtype=object)
assert_series_equal(result, expected)
# ------------------------------------------------------------
# bools
bools = Series([True, False, True])
result = bools.reindex([1, 2, 3])
expected =
|
Series([False, True, np.nan], index=[1, 2, 3], dtype=object)
|
pandas.Series
|
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_0(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=0, inplace=inplace)
actual = gdf.drop(labels=labels, axis=0, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"index",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_index(pdf, index, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace)
actual = gdf.drop(index=index, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5},
index=pd.MultiIndex(
levels=[
["lama", "cow", "falcon"],
["speed", "weight", "length"],
],
codes=[
[0, 0, 0, 1, 1, 1, 2, 2, 2, 1],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 1],
],
),
)
],
)
@pytest.mark.parametrize(
"index,level",
[
("cow", 0),
("lama", 0),
("falcon", 0),
("speed", 1),
("weight", 1),
("length", 1),
pytest.param(
"cow",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"lama",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"falcon",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_multiindex(pdf, index, level, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace, level=level)
actual = gdf.drop(index=index, inplace=inplace, level=level)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_1(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=1, inplace=inplace)
actual = gdf.drop(labels=labels, axis=1, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
def test_dataframe_drop_error():
df = cudf.DataFrame({"a": [1], "b": [2], "c": [3]})
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "d"}),
rfunc_args_and_kwargs=([], {"columns": "d"}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
rfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
expected_error_message="Cannot specify both",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"axis": 1}),
rfunc_args_and_kwargs=([], {"axis": 1}),
expected_error_message="Need to specify at least",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([[2, 0]],),
rfunc_args_and_kwargs=([[2, 0]],),
expected_error_message="One or more values not found in axis",
)
def test_dataframe_drop_raises():
df = cudf.DataFrame(
{"a": [1, 2, 3], "c": [10, 20, 30]}, index=["x", "y", "z"]
)
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["p"],),
rfunc_args_and_kwargs=(["p"],),
expected_error_message="One or more values not found in axis",
)
# label dtype mismatch
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([3],),
rfunc_args_and_kwargs=([3],),
expected_error_message="One or more values not found in axis",
)
expect = pdf.drop("p", errors="ignore")
actual = df.drop("p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "p"}),
rfunc_args_and_kwargs=([], {"columns": "p"}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(columns="p", errors="ignore")
actual = df.drop(columns="p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
rfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(labels="p", axis=1, errors="ignore")
actual = df.drop(labels="p", axis=1, errors="ignore")
assert_eq(actual, expect)
def test_dataframe_column_add_drop_via_setitem():
df = cudf.DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = cudf.DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = cudf.DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas()
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = cudf.DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
string = str(df)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
data = np.arange(6)
mask = np.zeros(1, dtype=cudf.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = cudf.Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is cudf.NA
pd.options.display.max_rows = 10
got = df.to_string()
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide(monkeypatch):
monkeypatch.setenv("COLUMNS", "79")
# Test basic
df = cudf.DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = cudf.DataFrame()
got = df.to_string()
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = cudf.DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = cudf.DataFrame(
{k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()}
)
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = cudf.DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = cudf.DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = cudf.DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = cudf.DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = cudf.Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = cudf.Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=True
)
assert test2_null["a"].nullable
assert test2_null["a"].null_count == 20
test2_nan = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=False
)
assert test2_nan["a"].null_count == 0
gpu_ary = cupy.asarray(ary)
test3_null = cudf.Series(gpu_ary, nan_as_null=True)
assert test3_null.nullable
assert test3_null.null_count == 20
test3_nan = cudf.Series(gpu_ary, nan_as_null=False)
assert test3_nan.null_count == 0
test4 = cudf.DataFrame()
lst = [1, 2, None, 4, 5, 6, None, 8, 9]
test4["lst"] = lst
assert test4["lst"].nullable
assert test4["lst"].null_count == 2
def test_dataframe_append_to_empty():
pdf = pd.DataFrame()
pdf["a"] = []
pdf["b"] = [1, 2, 3]
gdf = cudf.DataFrame()
gdf["a"] = []
gdf["b"] = [1, 2, 3]
assert_eq(gdf, pdf)
def test_dataframe_setitem_index_len1():
gdf = cudf.DataFrame()
gdf["a"] = [1]
gdf["b"] = gdf.index._values
np.testing.assert_equal(gdf.b.to_array(), [0])
def test_empty_dataframe_setitem_df():
gdf1 = cudf.DataFrame()
gdf2 = cudf.DataFrame({"a": [1, 2, 3, 4, 5]})
gdf1["a"] = gdf2["a"]
assert_eq(gdf1, gdf2)
def test_assign():
gdf = cudf.DataFrame({"x": [1, 2, 3]})
gdf2 = gdf.assign(y=gdf.x + 1)
assert list(gdf.columns) == ["x"]
assert list(gdf2.columns) == ["x", "y"]
np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000])
def test_dataframe_hash_columns(nrows):
gdf = cudf.DataFrame()
data = np.asarray(range(nrows))
data[0] = data[-1] # make first and last the same
gdf["a"] = data
gdf["b"] = gdf.a + 100
out = gdf.hash_columns(["a", "b"])
assert isinstance(out, cupy.ndarray)
assert len(out) == nrows
assert out.dtype == np.int32
# Check default
out_all = gdf.hash_columns()
np.testing.assert_array_equal(cupy.asnumpy(out), cupy.asnumpy(out_all))
# Check single column
out_one = cupy.asnumpy(gdf.hash_columns(["a"]))
# First matches last
assert out_one[0] == out_one[-1]
# Equivalent to the cudf.Series.hash_values()
np.testing.assert_array_equal(cupy.asnumpy(gdf.a.hash_values()), out_one)
@pytest.mark.parametrize("nrows", [3, 10, 100, 1000])
@pytest.mark.parametrize("nparts", [1, 2, 8, 13])
@pytest.mark.parametrize("nkeys", [1, 2])
def test_dataframe_hash_partition(nrows, nparts, nkeys):
np.random.seed(123)
gdf = cudf.DataFrame()
keycols = []
for i in range(nkeys):
keyname = "key{}".format(i)
gdf[keyname] = np.random.randint(0, 7 - i, nrows)
keycols.append(keyname)
gdf["val1"] = np.random.randint(0, nrows * 2, nrows)
got = gdf.partition_by_hash(keycols, nparts=nparts)
# Must return a list
assert isinstance(got, list)
# Must have correct number of partitions
assert len(got) == nparts
# All partitions must be DataFrame type
assert all(isinstance(p, cudf.DataFrame) for p in got)
# Check that all partitions have unique keys
part_unique_keys = set()
for p in got:
if len(p):
# Take rows of the keycolumns and build a set of the key-values
unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))
# Ensure that none of the key-values have occurred in other groups
assert not (unique_keys & part_unique_keys)
part_unique_keys |= unique_keys
assert len(part_unique_keys)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_value(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["val"] = gdf["val"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.key])
expected_value = row.key + 100 if valid else np.nan
got_value = row.val
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_keys(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["key"] = gdf["key"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3, keep_index=False)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.val - 100])
# val is key + 100
expected_value = row.val - 100 if valid else np.nan
got_value = row.key
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("keep_index", [True, False])
def test_dataframe_hash_partition_keep_index(keep_index):
gdf = cudf.DataFrame(
{"val": [1, 2, 3, 4], "key": [3, 2, 1, 4]}, index=[4, 3, 2, 1]
)
expected_df1 = cudf.DataFrame(
{"val": [1], "key": [3]}, index=[4] if keep_index else None
)
expected_df2 = cudf.DataFrame(
{"val": [2, 3, 4], "key": [2, 1, 4]},
index=[3, 2, 1] if keep_index else range(1, 4),
)
expected = [expected_df1, expected_df2]
parts = gdf.partition_by_hash(["key"], nparts=2, keep_index=keep_index)
for exp, got in zip(expected, parts):
assert_eq(exp, got)
def test_dataframe_hash_partition_empty():
gdf = cudf.DataFrame({"val": [1, 2], "key": [3, 2]}, index=["a", "b"])
parts = gdf.iloc[:0].partition_by_hash(["key"], nparts=3)
assert len(parts) == 3
for part in parts:
assert_eq(gdf.iloc[:0], part)
@pytest.mark.parametrize("dtype1", utils.supported_numpy_dtypes)
@pytest.mark.parametrize("dtype2", utils.supported_numpy_dtypes)
def test_dataframe_concat_different_numerical_columns(dtype1, dtype2):
df1 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype1)))
df2 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype2)))
if dtype1 != dtype2 and "datetime" in dtype1 or "datetime" in dtype2:
with pytest.raises(TypeError):
cudf.concat([df1, df2])
else:
pres = pd.concat([df1, df2])
gres = cudf.concat([cudf.from_pandas(df1), cudf.from_pandas(df2)])
assert_eq(cudf.from_pandas(pres), gres)
def test_dataframe_concat_different_column_types():
df1 = cudf.Series([42], dtype=np.float64)
df2 = cudf.Series(["a"], dtype="category")
with pytest.raises(ValueError):
cudf.concat([df1, df2])
df2 = cudf.Series(["a string"])
with pytest.raises(TypeError):
cudf.concat([df1, df2])
@pytest.mark.parametrize(
"df_1", [cudf.DataFrame({"a": [1, 2], "b": [1, 3]}), cudf.DataFrame({})]
)
@pytest.mark.parametrize(
"df_2", [cudf.DataFrame({"a": [], "b": []}), cudf.DataFrame({})]
)
def test_concat_empty_dataframe(df_1, df_2):
got = cudf.concat([df_1, df_2])
expect = pd.concat([df_1.to_pandas(), df_2.to_pandas()], sort=False)
# ignoring dtypes as pandas upcasts int to float
# on concatenation with empty dataframes
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"df1_d",
[
{"a": [1, 2], "b": [1, 2], "c": ["s1", "s2"], "d": [1.0, 2.0]},
{"b": [1.9, 10.9], "c": ["s1", "s2"]},
{"c": ["s1"], "b": [None], "a": [False]},
],
)
@pytest.mark.parametrize(
"df2_d",
[
{"a": [1, 2, 3]},
{"a": [1, None, 3], "b": [True, True, False], "c": ["s3", None, "s4"]},
{"a": [], "b": []},
{},
],
)
def test_concat_different_column_dataframe(df1_d, df2_d):
got = cudf.concat(
[cudf.DataFrame(df1_d), cudf.DataFrame(df2_d), cudf.DataFrame(df1_d)],
sort=False,
)
expect = pd.concat(
[pd.DataFrame(df1_d), pd.DataFrame(df2_d), pd.DataFrame(df1_d)],
sort=False,
)
# numerical columns are upcasted to float in cudf.DataFrame.to_pandas()
# casts nan to 0 in non-float numerical columns
numeric_cols = got.dtypes[got.dtypes != "object"].index
for col in numeric_cols:
got[col] = got[col].astype(np.float64).fillna(np.nan)
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"ser_1", [pd.Series([1, 2, 3]), pd.Series([], dtype="float64")]
)
@pytest.mark.parametrize("ser_2", [pd.Series([], dtype="float64")])
def test_concat_empty_series(ser_1, ser_2):
got = cudf.concat([cudf.Series(ser_1), cudf.Series(ser_2)])
expect = pd.concat([ser_1, ser_2])
assert_eq(got, expect)
def test_concat_with_axis():
df1 = pd.DataFrame(dict(x=np.arange(5), y=np.arange(5)))
df2 = pd.DataFrame(dict(a=np.arange(5), b=np.arange(5)))
concat_df = pd.concat([df1, df2], axis=1)
cdf1 = cudf.from_pandas(df1)
cdf2 = cudf.from_pandas(df2)
# concat only dataframes
concat_cdf = cudf.concat([cdf1, cdf2], axis=1)
assert_eq(concat_cdf, concat_df)
# concat only series
concat_s = pd.concat([df1.x, df1.y], axis=1)
cs1 = cudf.Series.from_pandas(df1.x)
cs2 = cudf.Series.from_pandas(df1.y)
concat_cdf_s = cudf.concat([cs1, cs2], axis=1)
assert_eq(concat_cdf_s, concat_s)
# concat series and dataframes
s3 = pd.Series(np.random.random(5))
cs3 = cudf.Series.from_pandas(s3)
concat_cdf_all = cudf.concat([cdf1, cs3, cdf2], axis=1)
concat_df_all = pd.concat([df1, s3, df2], axis=1)
assert_eq(concat_cdf_all, concat_df_all)
# concat manual multi index
midf1 = cudf.from_pandas(df1)
midf1.index = cudf.MultiIndex(
levels=[[0, 1, 2, 3], [0, 1]], codes=[[0, 1, 2, 3, 2], [0, 1, 0, 1, 0]]
)
midf2 = midf1[2:]
midf2.index = cudf.MultiIndex(
levels=[[3, 4, 5], [2, 0]], codes=[[0, 1, 2], [1, 0, 1]]
)
mipdf1 = midf1.to_pandas()
mipdf2 = midf2.to_pandas()
assert_eq(cudf.concat([midf1, midf2]), pd.concat([mipdf1, mipdf2]))
assert_eq(cudf.concat([midf2, midf1]), pd.concat([mipdf2, mipdf1]))
assert_eq(
cudf.concat([midf1, midf2, midf1]), pd.concat([mipdf1, mipdf2, mipdf1])
)
# concat groupby multi index
gdf1 = cudf.DataFrame(
{
"x": np.random.randint(0, 10, 10),
"y": np.random.randint(0, 10, 10),
"z": np.random.randint(0, 10, 10),
"v": np.random.randint(0, 10, 10),
}
)
gdf2 = gdf1[5:]
gdg1 = gdf1.groupby(["x", "y"]).min()
gdg2 = gdf2.groupby(["x", "y"]).min()
pdg1 = gdg1.to_pandas()
pdg2 = gdg2.to_pandas()
assert_eq(cudf.concat([gdg1, gdg2]), pd.concat([pdg1, pdg2]))
assert_eq(cudf.concat([gdg2, gdg1]), pd.concat([pdg2, pdg1]))
# series multi index concat
gdgz1 = gdg1.z
gdgz2 = gdg2.z
pdgz1 = gdgz1.to_pandas()
pdgz2 = gdgz2.to_pandas()
assert_eq(cudf.concat([gdgz1, gdgz2]), pd.concat([pdgz1, pdgz2]))
assert_eq(cudf.concat([gdgz2, gdgz1]), pd.concat([pdgz2, pdgz1]))
@pytest.mark.parametrize("nrows", [0, 3, 10, 100, 1000])
def test_nonmatching_index_setitem(nrows):
np.random.seed(0)
gdf = cudf.DataFrame()
gdf["a"] = np.random.randint(2147483647, size=nrows)
gdf["b"] = np.random.randint(2147483647, size=nrows)
gdf = gdf.set_index("b")
test_values = np.random.randint(2147483647, size=nrows)
gdf["c"] = test_values
assert len(test_values) == len(gdf["c"])
assert (
gdf["c"]
.to_pandas()
.equals(cudf.Series(test_values).set_index(gdf._index).to_pandas())
)
def test_from_pandas():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
gdf = cudf.DataFrame.from_pandas(df)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = df.x
gs = cudf.Series.from_pandas(s)
assert isinstance(gs, cudf.Series)
assert_eq(s, gs)
@pytest.mark.parametrize("dtypes", [int, float])
def test_from_records(dtypes):
h_ary = np.ndarray(shape=(10, 4), dtype=dtypes)
rec_ary = h_ary.view(np.recarray)
gdf = cudf.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
df = pd.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame.from_records(rec_ary)
df = pd.DataFrame.from_records(rec_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
@pytest.mark.parametrize("columns", [None, ["first", "second", "third"]])
@pytest.mark.parametrize(
"index",
[
None,
["first", "second"],
"name",
"age",
"weight",
[10, 11],
["abc", "xyz"],
],
)
def test_from_records_index(columns, index):
rec_ary = np.array(
[("Rex", 9, 81.0), ("Fido", 3, 27.0)],
dtype=[("name", "U10"), ("age", "i4"), ("weight", "f4")],
)
gdf = cudf.DataFrame.from_records(rec_ary, columns=columns, index=index)
df = pd.DataFrame.from_records(rec_ary, columns=columns, index=index)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_construction_from_cupy_arrays():
h_ary = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
d_ary = cupy.asarray(h_ary)
gdf = cudf.DataFrame(d_ary, columns=["a", "b", "c"])
df = pd.DataFrame(h_ary, columns=["a", "b", "c"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
df = pd.DataFrame(h_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary, index=["a", "b"])
df = pd.DataFrame(h_ary, index=["a", "b"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=0, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=0, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=1, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=1, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_cupy_wrong_dimensions():
d_ary = cupy.empty((2, 3, 4), dtype=np.int32)
with pytest.raises(
ValueError, match="records dimension expected 1 or 2 but found: 3"
):
cudf.DataFrame(d_ary)
def test_dataframe_cupy_array_wrong_index():
d_ary = cupy.empty((2, 3), dtype=np.int32)
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index=["a"])
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index="a")
def test_index_in_dataframe_constructor():
a = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
b = cudf.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
assert_eq(a, b)
assert_eq(a.loc[4:], b.loc[4:])
dtypes = NUMERIC_TYPES + DATETIME_TYPES + ["bool"]
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
padf = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
gdf = cudf.DataFrame.from_arrow(padf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = pa.Array.from_pandas(df.a)
gs = cudf.Series.from_arrow(s)
assert isinstance(gs, cudf.Series)
# For some reason PyArrow to_pandas() converts to numpy array and has
# better type compatibility
np.testing.assert_array_equal(s.to_pandas(), gs.to_array())
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_to_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
pa_i = pa.Array.from_pandas(df.index)
pa_gi = gdf.index.to_arrow()
assert isinstance(pa_gi, pa.Array)
assert pa.Array.equals(pa_i, pa_gi)
@pytest.mark.parametrize("data_type", dtypes)
def test_to_from_arrow_nulls(data_type):
if data_type == "longlong":
data_type = "int64"
if data_type == "bool":
s1 = pa.array([True, None, False, None, True], type=data_type)
else:
dtype = np.dtype(data_type)
if dtype.type == np.datetime64:
time_unit, _ = np.datetime_data(dtype)
data_type = pa.timestamp(unit=time_unit)
s1 = pa.array([1, None, 3, None, 5], type=data_type)
gs1 = cudf.Series.from_arrow(s1)
assert isinstance(gs1, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s1.buffers()[0]).view("u1")[0],
gs1._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s1, gs1.to_arrow())
s2 = pa.array([None, None, None, None, None], type=data_type)
gs2 = cudf.Series.from_arrow(s2)
assert isinstance(gs2, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s2.buffers()[0]).view("u1")[0],
gs2._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s2, gs2.to_arrow())
def test_to_arrow_categorical():
df = pd.DataFrame()
df["a"] = pd.Series(["a", "b", "c"], dtype="category")
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
def test_from_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert_eq(
pd.Series(pa_cat.to_pandas()), # PyArrow returns a pd.Categorical
gd_cat.to_pandas(),
)
def test_to_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert pa.Array.equals(pa_cat, gd_cat.to_arrow())
@pytest.mark.parametrize("data_type", dtypes)
def test_from_scalar_typing(data_type):
if data_type == "datetime64[ms]":
scalar = (
np.dtype("int64")
.type(np.random.randint(0, 5))
.astype("datetime64[ms]")
)
elif data_type.startswith("datetime64"):
scalar = np.datetime64(datetime.date.today()).astype("datetime64[ms]")
data_type = "datetime64[ms]"
else:
scalar = np.dtype(data_type).type(np.random.randint(0, 5))
gdf = cudf.DataFrame()
gdf["a"] = [1, 2, 3, 4, 5]
gdf["b"] = scalar
assert gdf["b"].dtype == np.dtype(data_type)
assert len(gdf["b"]) == len(gdf["a"])
@pytest.mark.parametrize("data_type", NUMERIC_TYPES)
def test_from_python_array(data_type):
np_arr = np.random.randint(0, 100, 10).astype(data_type)
data = memoryview(np_arr)
data = arr.array(data.format, data)
gs = cudf.Series(data)
np.testing.assert_equal(gs.to_array(), np_arr)
def test_series_shape():
ps = pd.Series([1, 2, 3, 4])
cs = cudf.Series([1, 2, 3, 4])
assert ps.shape == cs.shape
def test_series_shape_empty():
ps = pd.Series(dtype="float64")
cs = cudf.Series([])
assert ps.shape == cs.shape
def test_dataframe_shape():
pdf = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.3]})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.shape == gdf.shape
def test_dataframe_shape_empty():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
assert pdf.shape == gdf.shape
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("nulls", ["none", "some", "all"])
def test_dataframe_transpose(nulls, num_cols, num_rows, dtype):
pdf = pd.DataFrame()
null_rep = np.nan if dtype in ["float32", "float64"] else None
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(np.random.randint(0, 26, num_rows).astype(dtype))
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = null_rep
elif nulls == "all":
data[:] = null_rep
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function)
assert_eq(expect, got_property)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
def test_dataframe_transpose_category(num_cols, num_rows):
pdf = pd.DataFrame()
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(list(string.ascii_lowercase), dtype="category")
data = data.sample(num_rows, replace=True).reset_index(drop=True)
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function.to_pandas())
assert_eq(expect, got_property.to_pandas())
def test_generated_column():
gdf = cudf.DataFrame({"a": (i for i in range(5))})
assert len(gdf) == 5
@pytest.fixture
def pdf():
return pd.DataFrame({"x": range(10), "y": range(10)})
@pytest.fixture
def gdf(pdf):
return cudf.DataFrame.from_pandas(pdf)
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize(
"func",
[
lambda df, **kwargs: df.min(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.product(**kwargs),
lambda df, **kwargs: df.cummin(**kwargs),
lambda df, **kwargs: df.cummax(**kwargs),
lambda df, **kwargs: df.cumsum(**kwargs),
lambda df, **kwargs: df.cumprod(**kwargs),
lambda df, **kwargs: df.mean(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.std(ddof=1, **kwargs),
lambda df, **kwargs: df.var(ddof=1, **kwargs),
lambda df, **kwargs: df.std(ddof=2, **kwargs),
lambda df, **kwargs: df.var(ddof=2, **kwargs),
lambda df, **kwargs: df.kurt(**kwargs),
lambda df, **kwargs: df.skew(**kwargs),
lambda df, **kwargs: df.all(**kwargs),
lambda df, **kwargs: df.any(**kwargs),
],
)
@pytest.mark.parametrize("skipna", [True, False, None])
def test_dataframe_reductions(data, func, skipna):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf, skipna=skipna), func(gdf, skipna=skipna))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("func", [lambda df: df.count()])
def test_dataframe_count_reduction(data, func):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf), func(gdf))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("ops", ["sum", "product", "prod"])
@pytest.mark.parametrize("skipna", [True, False, None])
@pytest.mark.parametrize("min_count", [-10, -1, 0, 1, 2, 3, 10])
def test_dataframe_min_count_ops(data, ops, skipna, min_count):
psr = pd.DataFrame(data)
gsr = cudf.DataFrame(data)
if PANDAS_GE_120 and psr.shape[0] * psr.shape[1] < min_count:
pytest.xfail("https://github.com/pandas-dev/pandas/issues/39738")
assert_eq(
getattr(psr, ops)(skipna=skipna, min_count=min_count),
getattr(gsr, ops)(skipna=skipna, min_count=min_count),
check_dtype=False,
)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_df(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf, pdf)
g = binop(gdf, gdf)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_df(pdf, gdf, binop):
d = binop(pdf, pdf + 1)
g = binop(gdf, gdf + 1)
assert_eq(d, g)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_series(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf.x, pdf.y)
g = binop(gdf.x, gdf.y)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_series(pdf, gdf, binop):
d = binop(pdf.x, pdf.y + 1)
g = binop(gdf.x, gdf.y + 1)
assert_eq(d, g)
@pytest.mark.parametrize("unaryop", [operator.neg, operator.inv, operator.abs])
def test_unaryops_df(pdf, gdf, unaryop):
d = unaryop(pdf - 5)
g = unaryop(gdf - 5)
assert_eq(d, g)
@pytest.mark.parametrize(
"func",
[
lambda df: df.empty,
lambda df: df.x.empty,
lambda df: df.x.fillna(123, limit=None, method=None, axis=None),
lambda df: df.drop("x", axis=1, errors="raise"),
],
)
def test_unary_operators(func, pdf, gdf):
p = func(pdf)
g = func(gdf)
assert_eq(p, g)
def test_is_monotonic(gdf):
pdf = pd.DataFrame({"x": [1, 2, 3]}, index=[3, 1, 2])
gdf = cudf.DataFrame.from_pandas(pdf)
assert not gdf.index.is_monotonic
assert not gdf.index.is_monotonic_increasing
assert not gdf.index.is_monotonic_decreasing
def test_iter(pdf, gdf):
assert list(pdf) == list(gdf)
def test_iteritems(gdf):
for k, v in gdf.iteritems():
assert k in gdf.columns
assert isinstance(v, cudf.Series)
assert_eq(v, gdf[k])
@pytest.mark.parametrize("q", [0.5, 1, 0.001, [0.5], [], [0.005, 0.5, 1]])
@pytest.mark.parametrize("numeric_only", [True, False])
def test_quantile(q, numeric_only):
ts = pd.date_range("2018-08-24", periods=5, freq="D")
td = pd.to_timedelta(np.arange(5), unit="h")
pdf = pd.DataFrame(
{"date": ts, "delta": td, "val": np.random.randn(len(ts))}
)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf["date"].quantile(q), gdf["date"].quantile(q))
assert_eq(pdf["delta"].quantile(q), gdf["delta"].quantile(q))
assert_eq(pdf["val"].quantile(q), gdf["val"].quantile(q))
if numeric_only:
assert_eq(pdf.quantile(q), gdf.quantile(q))
else:
q = q if isinstance(q, list) else [q]
assert_eq(
pdf.quantile(
q if isinstance(q, list) else [q], numeric_only=False
),
gdf.quantile(q, numeric_only=False),
)
def test_empty_quantile():
pdf = pd.DataFrame({"x": []})
df = cudf.DataFrame({"x": []})
actual = df.quantile()
expected = pdf.quantile()
assert_eq(actual, expected)
def test_from_pandas_function(pdf):
gdf = cudf.from_pandas(pdf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(pdf, gdf)
gdf = cudf.from_pandas(pdf.x)
assert isinstance(gdf, cudf.Series)
assert_eq(pdf.x, gdf)
with pytest.raises(TypeError):
cudf.from_pandas(123)
@pytest.mark.parametrize("preserve_index", [True, False])
def test_arrow_pandas_compat(pdf, gdf, preserve_index):
pdf["z"] = range(10)
pdf = pdf.set_index("z")
gdf["z"] = range(10)
gdf = gdf.set_index("z")
pdf_arrow_table = pa.Table.from_pandas(pdf, preserve_index=preserve_index)
gdf_arrow_table = gdf.to_arrow(preserve_index=preserve_index)
assert pa.Table.equals(pdf_arrow_table, gdf_arrow_table)
gdf2 = cudf.DataFrame.from_arrow(pdf_arrow_table)
pdf2 = pdf_arrow_table.to_pandas()
assert_eq(pdf2, gdf2)
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000, 100000])
def test_series_hash_encode(nrows):
data = np.asarray(range(nrows))
# Python hash returns different value which sometimes
# results in enc_with_name_arr and enc_arr to be same.
# And there is no other better way to make hash return same value.
# So using an integer name to get constant value back from hash.
s = cudf.Series(data, name=1)
num_features = 1000
encoded_series = s.hash_encode(num_features)
assert isinstance(encoded_series, cudf.Series)
enc_arr = encoded_series.to_array()
assert np.all(enc_arr >= 0)
assert np.max(enc_arr) < num_features
enc_with_name_arr = s.hash_encode(num_features, use_name=True).to_array()
assert enc_with_name_arr[0] != enc_arr[0]
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
def test_cuda_array_interface(dtype):
np_data = np.arange(10).astype(dtype)
cupy_data = cupy.array(np_data)
pd_data = pd.Series(np_data)
cudf_data = cudf.Series(cupy_data)
assert_eq(pd_data, cudf_data)
gdf = cudf.DataFrame()
gdf["test"] = cupy_data
pd_data.name = "test"
assert_eq(pd_data, gdf["test"])
@pytest.mark.parametrize("nelem", [0, 2, 3, 100])
@pytest.mark.parametrize("nchunks", [1, 2, 5, 10])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow_chunked_arrays(nelem, nchunks, data_type):
np_list_data = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array = pa.chunked_array(np_list_data)
expect = pd.Series(pa_chunk_array.to_pandas())
got = cudf.Series(pa_chunk_array)
assert_eq(expect, got)
np_list_data2 = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array2 = pa.chunked_array(np_list_data2)
pa_table = pa.Table.from_arrays(
[pa_chunk_array, pa_chunk_array2], names=["a", "b"]
)
expect = pa_table.to_pandas()
got = cudf.DataFrame.from_arrow(pa_table)
assert_eq(expect, got)
@pytest.mark.skip(reason="Test was designed to be run in isolation")
def test_gpu_memory_usage_with_boolmask():
ctx = cuda.current_context()
def query_GPU_memory(note=""):
memInfo = ctx.get_memory_info()
usedMemoryGB = (memInfo.total - memInfo.free) / 1e9
return usedMemoryGB
cuda.current_context().deallocations.clear()
nRows = int(1e8)
nCols = 2
dataNumpy = np.asfortranarray(np.random.rand(nRows, nCols))
colNames = ["col" + str(iCol) for iCol in range(nCols)]
pandasDF = pd.DataFrame(data=dataNumpy, columns=colNames, dtype=np.float32)
cudaDF = cudf.core.DataFrame.from_pandas(pandasDF)
boolmask = cudf.Series(np.random.randint(1, 2, len(cudaDF)).astype("bool"))
memory_used = query_GPU_memory()
cudaDF = cudaDF[boolmask]
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col0"].index._values.data_array_view.device_ctypes_pointer
)
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col1"].index._values.data_array_view.device_ctypes_pointer
)
assert memory_used == query_GPU_memory()
def test_boolmask(pdf, gdf):
boolmask = np.random.randint(0, 2, len(pdf)) > 0
gdf = gdf[boolmask]
pdf = pdf[boolmask]
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"mask_shape",
[
(2, "ab"),
(2, "abc"),
(3, "ab"),
(3, "abc"),
(3, "abcd"),
(4, "abc"),
(4, "abcd"),
],
)
def test_dataframe_boolmask(mask_shape):
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.random.randint(0, 10, 3)
pdf_mask = pd.DataFrame()
for col in mask_shape[1]:
pdf_mask[col] = np.random.randint(0, 2, mask_shape[0]) > 0
gdf = cudf.DataFrame.from_pandas(pdf)
gdf_mask = cudf.DataFrame.from_pandas(pdf_mask)
gdf = gdf[gdf_mask]
pdf = pdf[pdf_mask]
assert np.array_equal(gdf.columns, pdf.columns)
for col in gdf.columns:
assert np.array_equal(
gdf[col].fillna(-1).to_pandas().values, pdf[col].fillna(-1).values
)
@pytest.mark.parametrize(
"mask",
[
[True, False, True],
pytest.param(
cudf.Series([True, False, True]),
marks=pytest.mark.xfail(
reason="Pandas can't index a multiindex with a Series"
),
),
],
)
def test_dataframe_multiindex_boolmask(mask):
gdf = cudf.DataFrame(
{"w": [3, 2, 1], "x": [1, 2, 3], "y": [0, 1, 0], "z": [1, 1, 1]}
)
gdg = gdf.groupby(["w", "x"]).count()
pdg = gdg.to_pandas()
assert_eq(gdg[mask], pdg[mask])
def test_dataframe_assignment():
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.array([0, 1, 1, -2, 10])
gdf = cudf.DataFrame.from_pandas(pdf)
gdf[gdf < 0] = 999
pdf[pdf < 0] = 999
assert_eq(gdf, pdf)
def test_1row_arrow_table():
data = [pa.array([0]), pa.array([1])]
batch = pa.RecordBatch.from_arrays(data, ["f0", "f1"])
table = pa.Table.from_batches([batch])
expect = table.to_pandas()
got = cudf.DataFrame.from_arrow(table)
assert_eq(expect, got)
def test_arrow_handle_no_index_name(pdf, gdf):
gdf_arrow = gdf.to_arrow()
pdf_arrow = pa.Table.from_pandas(pdf)
assert pa.Table.equals(pdf_arrow, gdf_arrow)
got = cudf.DataFrame.from_arrow(gdf_arrow)
expect = pdf_arrow.to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize("num_rows", [1, 3, 10, 100])
@pytest.mark.parametrize("num_bins", [1, 2, 4, 20])
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
@pytest.mark.parametrize("series_bins", [True, False])
def test_series_digitize(num_rows, num_bins, right, dtype, series_bins):
data = np.random.randint(0, 100, num_rows).astype(dtype)
bins = np.unique(np.sort(np.random.randint(2, 95, num_bins).astype(dtype)))
s = cudf.Series(data)
if series_bins:
s_bins = cudf.Series(bins)
indices = s.digitize(s_bins, right)
else:
indices = s.digitize(bins, right)
np.testing.assert_array_equal(
np.digitize(data, bins, right), indices.to_array()
)
def test_series_digitize_invalid_bins():
s = cudf.Series(np.random.randint(0, 30, 80), dtype="int32")
bins = cudf.Series([2, None, None, 50, 90], dtype="int32")
with pytest.raises(
ValueError, match="`bins` cannot contain null entries."
):
_ = s.digitize(bins)
def test_pandas_non_contiguious():
arr1 = np.random.sample([5000, 10])
assert arr1.flags["C_CONTIGUOUS"] is True
df = pd.DataFrame(arr1)
for col in df.columns:
assert df[col].values.flags["C_CONTIGUOUS"] is False
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.to_pandas(), df)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
@pytest.mark.parametrize("null_type", [np.nan, None, "mixed"])
def test_series_all_null(num_elements, null_type):
if null_type == "mixed":
data = []
data1 = [np.nan] * int(num_elements / 2)
data2 = [None] * int(num_elements / 2)
for idx in range(len(data1)):
data.append(data1[idx])
data.append(data2[idx])
else:
data = [null_type] * num_elements
# Typecast Pandas because None will return `object` dtype
expect = pd.Series(data, dtype="float64")
got = cudf.Series(data)
assert_eq(expect, got)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
def test_series_all_valid_nan(num_elements):
data = [np.nan] * num_elements
sr = cudf.Series(data, nan_as_null=False)
np.testing.assert_equal(sr.null_count, 0)
def test_series_rename():
pds = pd.Series([1, 2, 3], name="asdf")
gds = cudf.Series([1, 2, 3], name="asdf")
expect = pds.rename("new_name")
got = gds.rename("new_name")
assert_eq(expect, got)
pds = pd.Series(expect)
gds = cudf.Series(got)
assert_eq(pds, gds)
pds = pd.Series(expect, name="name name")
gds = cudf.Series(got, name="name name")
assert_eq(pds, gds)
@pytest.mark.parametrize("data_type", dtypes)
@pytest.mark.parametrize("nelem", [0, 100])
def test_head_tail(nelem, data_type):
def check_index_equality(left, right):
assert left.index.equals(right.index)
def check_values_equality(left, right):
if len(left) == 0 and len(right) == 0:
return None
np.testing.assert_array_equal(left.to_pandas(), right.to_pandas())
def check_frame_series_equality(left, right):
check_index_equality(left, right)
check_values_equality(left, right)
gdf = cudf.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
check_frame_series_equality(gdf.head(), gdf[:5])
check_frame_series_equality(gdf.head(3), gdf[:3])
check_frame_series_equality(gdf.head(-2), gdf[:-2])
check_frame_series_equality(gdf.head(0), gdf[0:0])
check_frame_series_equality(gdf["a"].head(), gdf["a"][:5])
check_frame_series_equality(gdf["a"].head(3), gdf["a"][:3])
check_frame_series_equality(gdf["a"].head(-2), gdf["a"][:-2])
check_frame_series_equality(gdf.tail(), gdf[-5:])
check_frame_series_equality(gdf.tail(3), gdf[-3:])
check_frame_series_equality(gdf.tail(-2), gdf[2:])
check_frame_series_equality(gdf.tail(0), gdf[0:0])
check_frame_series_equality(gdf["a"].tail(), gdf["a"][-5:])
check_frame_series_equality(gdf["a"].tail(3), gdf["a"][-3:])
check_frame_series_equality(gdf["a"].tail(-2), gdf["a"][2:])
def test_tail_for_string():
gdf = cudf.DataFrame()
gdf["id"] = cudf.Series(["a", "b"], dtype=np.object_)
gdf["v"] = cudf.Series([1, 2])
assert_eq(gdf.tail(3), gdf.to_pandas().tail(3))
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index(pdf, gdf, drop):
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_named_index(pdf, gdf, drop):
pdf.index.name = "cudf"
gdf.index.name = "cudf"
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index_inplace(pdf, gdf, drop):
pdf.reset_index(drop=drop, inplace=True)
gdf.reset_index(drop=drop, inplace=True)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 2, 3, 4, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize(
"index",
[
"a",
["a", "b"],
pd.CategoricalIndex(["I", "II", "III", "IV", "V"]),
pd.Series(["h", "i", "k", "l", "m"]),
["b", pd.Index(["I", "II", "III", "IV", "V"])],
["c", [11, 12, 13, 14, 15]],
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5), # corner case
[pd.Series(["h", "i", "k", "l", "m"]), pd.RangeIndex(0, 5)],
[
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5),
],
],
)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
def test_set_index(data, index, drop, append, inplace):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
expected = pdf.set_index(index, inplace=inplace, drop=drop, append=append)
actual = gdf.set_index(index, inplace=inplace, drop=drop, append=append)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 1, 2, 2, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize("index", ["a", pd.Index([1, 1, 2, 2, 3])])
@pytest.mark.parametrize("verify_integrity", [True])
@pytest.mark.xfail
def test_set_index_verify_integrity(data, index, verify_integrity):
gdf = cudf.DataFrame(data)
gdf.set_index(index, verify_integrity=verify_integrity)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("nelem", [10, 200, 1333])
def test_set_index_multi(drop, nelem):
np.random.seed(0)
a = np.arange(nelem)
np.random.shuffle(a)
df = pd.DataFrame(
{
"a": a,
"b": np.random.randint(0, 4, size=nelem),
"c": np.random.uniform(low=0, high=4, size=nelem),
"d": np.random.choice(["green", "black", "white"], nelem),
}
)
df["e"] = df["d"].astype("category")
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.set_index("a", drop=drop), gdf.set_index(["a"], drop=drop))
assert_eq(
df.set_index(["b", "c"], drop=drop),
gdf.set_index(["b", "c"], drop=drop),
)
assert_eq(
df.set_index(["d", "b"], drop=drop),
gdf.set_index(["d", "b"], drop=drop),
)
assert_eq(
df.set_index(["b", "d", "e"], drop=drop),
gdf.set_index(["b", "d", "e"], drop=drop),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_0(copy):
# TODO (ptaylor): pandas changes `int` dtype to `float64`
# when reindexing and filling new label indices with NaN
gdf = cudf.datasets.randomdata(
nrows=6,
dtypes={
"a": "category",
# 'b': int,
"c": float,
"d": str,
},
)
pdf = gdf.to_pandas()
# Validate reindex returns a copy unmodified
assert_eq(pdf.reindex(copy=True), gdf.reindex(copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_1(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis defaults to 0
assert_eq(pdf.reindex(index, copy=True), gdf.reindex(index, copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_2(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(index, axis=0, copy=True),
gdf.reindex(index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_3(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=0
assert_eq(
pdf.reindex(columns, axis=1, copy=True),
gdf.reindex(columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_4(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(labels=index, axis=0, copy=True),
gdf.reindex(labels=index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_5(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=1
assert_eq(
pdf.reindex(labels=columns, axis=1, copy=True),
gdf.reindex(labels=columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_6(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis='index'
assert_eq(
pdf.reindex(labels=index, axis="index", copy=True),
gdf.reindex(labels=index, axis="index", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_7(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis='columns'
assert_eq(
pdf.reindex(labels=columns, axis="columns", copy=True),
gdf.reindex(labels=columns, axis="columns", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_8(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes labels when index=labels
assert_eq(
pdf.reindex(index=index, copy=True),
gdf.reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_9(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes column names when columns=labels
assert_eq(
pdf.reindex(columns=columns, copy=True),
gdf.reindex(columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_10(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_change_dtype(copy):
if PANDAS_GE_110:
kwargs = {"check_freq": False}
else:
kwargs = {}
index = pd.date_range("12/29/2009", periods=10, freq="D")
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
**kwargs,
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_categorical_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"a": "category"})
pdf = gdf.to_pandas()
assert_eq(pdf["a"].reindex(copy=True), gdf["a"].reindex(copy=copy))
assert_eq(
pdf["a"].reindex(index, copy=True), gdf["a"].reindex(index, copy=copy)
)
assert_eq(
pdf["a"].reindex(index=index, copy=True),
gdf["a"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_float_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"c": float})
pdf = gdf.to_pandas()
assert_eq(pdf["c"].reindex(copy=True), gdf["c"].reindex(copy=copy))
assert_eq(
pdf["c"].reindex(index, copy=True), gdf["c"].reindex(index, copy=copy)
)
assert_eq(
pdf["c"].reindex(index=index, copy=True),
gdf["c"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_string_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"d": str})
pdf = gdf.to_pandas()
assert_eq(pdf["d"].reindex(copy=True), gdf["d"].reindex(copy=copy))
assert_eq(
pdf["d"].reindex(index, copy=True), gdf["d"].reindex(index, copy=copy)
)
assert_eq(
pdf["d"].reindex(index=index, copy=True),
gdf["d"].reindex(index=index, copy=copy),
)
def test_to_frame(pdf, gdf):
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = "foo"
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = False
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(gdf_new_name, pdf_new_name)
assert gdf_new_name.columns[0] is name
def test_dataframe_empty_sort_index():
pdf = pd.DataFrame({"x": []})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.sort_index()
got = gdf.sort_index()
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_sort_index(
axis, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{"b": [1, 3, 2], "a": [1, 4, 3], "c": [4, 1, 5]},
index=[3.0, 1.0, np.nan],
)
gdf = cudf.DataFrame.from_pandas(pdf)
expected = pdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
got = gdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
assert_eq(pdf, gdf)
else:
assert_eq(expected, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize(
"level",
[
0,
"b",
1,
["b"],
"a",
["a", "b"],
["b", "a"],
[0, 1],
[1, 0],
[0, 2],
None,
],
)
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_mulitindex_sort_index(
axis, level, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{
"b": [1.0, 3.0, np.nan],
"a": [1, 4, 3],
1: ["a", "b", "c"],
"e": [3, 1, 4],
"d": [1, 2, 8],
}
).set_index(["b", "a", 1])
gdf = cudf.DataFrame.from_pandas(pdf)
# ignore_index is supported in v.1.0
expected = pdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
inplace=inplace,
na_position=na_position,
)
if ignore_index is True:
expected = expected
got = gdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
if ignore_index is True:
pdf = pdf.reset_index(drop=True)
assert_eq(pdf, gdf)
else:
if ignore_index is True:
expected = expected.reset_index(drop=True)
assert_eq(expected, got)
@pytest.mark.parametrize("dtype", dtypes + ["category"])
def test_dataframe_0_row_dtype(dtype):
if dtype == "category":
data = pd.Series(["a", "b", "c", "d", "e"], dtype="category")
else:
data = np.array([1, 2, 3, 4, 5], dtype=dtype)
expect = cudf.DataFrame()
expect["x"] = data
expect["y"] = data
got = expect.head(0)
for col_name in got.columns:
assert expect[col_name].dtype == got[col_name].dtype
expect = cudf.Series(data)
got = expect.head(0)
assert expect.dtype == got.dtype
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_series_list_nanasnull(nan_as_null):
data = [1.0, 2.0, 3.0, np.nan, None]
expect = pa.array(data, from_pandas=nan_as_null)
got = cudf.Series(data, nan_as_null=nan_as_null).to_arrow()
# Bug in Arrow 0.14.1 where NaNs aren't handled
expect = expect.cast("int64", safe=False)
got = got.cast("int64", safe=False)
assert pa.Array.equals(expect, got)
def test_column_assignment():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float}
)
new_cols = ["q", "r", "s"]
gdf.columns = new_cols
assert list(gdf.columns) == new_cols
def test_select_dtype():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float, "d": str}
)
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("float64"), gdf.select_dtypes("float64"))
assert_eq(pdf.select_dtypes(np.float64), gdf.select_dtypes(np.float64))
assert_eq(
pdf.select_dtypes(include=["float64"]),
gdf.select_dtypes(include=["float64"]),
)
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["int64", "float64"]),
gdf.select_dtypes(include=["int64", "float64"]),
)
assert_eq(
pdf.select_dtypes(include=np.number),
gdf.select_dtypes(include=np.number),
)
assert_eq(
pdf.select_dtypes(include=[np.int64, np.float64]),
gdf.select_dtypes(include=[np.int64, np.float64]),
)
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(exclude=np.number),
gdf.select_dtypes(exclude=np.number),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
rfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
rfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
)
gdf = cudf.DataFrame(
{"A": [3, 4, 5], "C": [1, 2, 3], "D": ["a", "b", "c"]}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["object"], exclude=["category"]),
gdf.select_dtypes(include=["object"], exclude=["category"]),
)
gdf = cudf.DataFrame({"a": range(10), "b": range(10, 20)})
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(include=["float"]),
gdf.select_dtypes(include=["float"]),
)
assert_eq(
pdf.select_dtypes(include=["object"]),
gdf.select_dtypes(include=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"]), gdf.select_dtypes(include=["int"])
)
assert_eq(
pdf.select_dtypes(exclude=["float"]),
gdf.select_dtypes(exclude=["float"]),
)
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes, rfunc=gdf.select_dtypes,
)
gdf = cudf.DataFrame(
{"a": cudf.Series([], dtype="int"), "b": cudf.Series([], dtype="str")}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
def test_select_dtype_datetime():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("datetime64"), gdf.select_dtypes("datetime64"))
assert_eq(
pdf.select_dtypes(np.dtype("datetime64")),
gdf.select_dtypes(np.dtype("datetime64")),
)
assert_eq(
pdf.select_dtypes(include="datetime64"),
gdf.select_dtypes(include="datetime64"),
)
def test_select_dtype_datetime_with_frequency():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_exceptions_equal(
pdf.select_dtypes,
gdf.select_dtypes,
(["datetime64[ms]"],),
(["datetime64[ms]"],),
)
def test_array_ufunc():
gdf = cudf.DataFrame({"x": [2, 3, 4.0], "y": [9.0, 2.5, 1.1]})
pdf = gdf.to_pandas()
assert_eq(np.sqrt(gdf), np.sqrt(pdf))
assert_eq(np.sqrt(gdf.x), np.sqrt(pdf.x))
@pytest.mark.parametrize("nan_value", [-5, -5.0, 0, 5, 5.0, None, "pandas"])
def test_series_to_gpu_array(nan_value):
s = cudf.Series([0, 1, None, 3])
np.testing.assert_array_equal(
s.to_array(nan_value), s.to_gpu_array(nan_value).copy_to_host()
)
def test_dataframe_describe_exclude():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(exclude=["float"])
pdf_results = pdf.describe(exclude=["float"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_include():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include=["int"])
pdf_results = pdf.describe(include=["int"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_default():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe()
pdf_results = pdf.describe()
assert_eq(pdf_results, gdf_results)
def test_series_describe_include_all():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
df["animal"] = np.random.choice(["dog", "cat", "bird"], data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include="all")
pdf_results = pdf.describe(include="all")
assert_eq(gdf_results[["x", "y"]], pdf_results[["x", "y"]])
assert_eq(gdf_results.index, pdf_results.index)
assert_eq(gdf_results.columns, pdf_results.columns)
assert_eq(
gdf_results[["animal"]].fillna(-1).astype("str"),
pdf_results[["animal"]].fillna(-1).astype("str"),
)
def test_dataframe_describe_percentiles():
np.random.seed(12)
data_length = 10000
sample_percentiles = [0.0, 0.1, 0.33, 0.84, 0.4, 0.99]
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(percentiles=sample_percentiles)
pdf_results = pdf.describe(percentiles=sample_percentiles)
assert_eq(pdf_results, gdf_results)
def test_get_numeric_data():
pdf = pd.DataFrame(
{"x": [1, 2, 3], "y": [1.0, 2.0, 3.0], "z": ["a", "b", "c"]}
)
gdf = cudf.from_pandas(pdf)
assert_eq(pdf._get_numeric_data(), gdf._get_numeric_data())
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_shift(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
shifted_outcome = gdf.a.shift(period).fillna(0)
expected_outcome = pdf.a.shift(period).fillna(0).astype(dtype)
if data_empty:
assert_eq(shifted_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(shifted_outcome, expected_outcome)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_diff(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
expected_outcome = pdf.a.diff(period)
diffed_outcome = gdf.a.diff(period).astype(expected_outcome.dtype)
if data_empty:
assert_eq(diffed_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(diffed_outcome, expected_outcome)
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_isnull_isna(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.isnull(), gdf.isnull())
assert_eq(df.isna(), gdf.isna())
# Test individual columns
for col in df:
assert_eq(df[col].isnull(), gdf[col].isnull())
assert_eq(df[col].isna(), gdf[col].isna())
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_notna_notnull(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.notnull(), gdf.notnull())
assert_eq(df.notna(), gdf.notna())
# Test individual columns
for col in df:
assert_eq(df[col].notnull(), gdf[col].notnull())
assert_eq(df[col].notna(), gdf[col].notna())
def test_ndim():
pdf = pd.DataFrame({"x": range(5), "y": range(5, 10)})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.ndim == gdf.ndim
assert pdf.x.ndim == gdf.x.ndim
s = pd.Series(dtype="float64")
gs = cudf.Series()
assert s.ndim == gs.ndim
@pytest.mark.parametrize(
"decimals",
[
-3,
0,
5,
pd.Series([1, 4, 3, -6], index=["w", "x", "y", "z"]),
cudf.Series([-4, -2, 12], index=["x", "y", "z"]),
{"w": -1, "x": 15, "y": 2},
],
)
def test_dataframe_round(decimals):
pdf = pd.DataFrame(
{
"w": np.arange(0.5, 10.5, 1),
"x": np.random.normal(-100, 100, 10),
"y": np.array(
[
14.123,
2.343,
np.nan,
0.0,
-8.302,
np.nan,
94.313,
-112.236,
-8.029,
np.nan,
]
),
"z": np.repeat([-0.6459412758761901], 10),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
if isinstance(decimals, cudf.Series):
pdecimals = decimals.to_pandas()
else:
pdecimals = decimals
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
# with nulls, maintaining existing null mask
for c in pdf.columns:
arr = pdf[c].to_numpy().astype("float64") # for pandas nulls
arr.ravel()[np.random.choice(10, 5, replace=False)] = np.nan
pdf[c] = gdf[c] = arr
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
for c in gdf.columns:
np.array_equal(gdf[c].nullmask.to_array(), result[c].to_array())
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: all does not "
"support columns of object dtype."
)
],
),
],
)
def test_all(data):
# Pandas treats `None` in object type columns as True for some reason, so
# replacing with `False`
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data).replace(
[None], False
)
gdata = cudf.Series.from_pandas(pdata)
else:
pdata = pd.DataFrame(data, columns=["a", "b"]).replace([None], False)
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.all(bool_only=True)
expected = pdata.all(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.all(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.all(level="a")
got = gdata.all()
expected = pdata.all()
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[0, 0, 0, 0, 0],
[0, 0, None, 0],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: any does not "
"support columns of object dtype."
)
],
),
],
)
@pytest.mark.parametrize("axis", [0, 1])
def test_any(data, axis):
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data)
gdata = cudf.Series.from_pandas(pdata)
if axis == 1:
with pytest.raises(NotImplementedError):
gdata.any(axis=axis)
else:
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
else:
pdata = pd.DataFrame(data, columns=["a", "b"])
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.any(bool_only=True)
expected = pdata.any(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.any(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.any(level="a")
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
@pytest.mark.parametrize("axis", [0, 1])
def test_empty_dataframe_any(axis):
pdf = pd.DataFrame({}, columns=["a", "b"])
gdf = cudf.DataFrame.from_pandas(pdf)
got = gdf.any(axis=axis)
expected = pdf.any(axis=axis)
assert_eq(got, expected, check_index_type=False)
@pytest.mark.parametrize("indexed", [False, True])
def test_dataframe_sizeof(indexed):
rows = int(1e6)
index = list(i for i in range(rows)) if indexed else None
gdf = cudf.DataFrame({"A": [8] * rows, "B": [32] * rows}, index=index)
for c in gdf._data.columns:
assert gdf._index.__sizeof__() == gdf._index.__sizeof__()
cols_sizeof = sum(c.__sizeof__() for c in gdf._data.columns)
assert gdf.__sizeof__() == (gdf._index.__sizeof__() + cols_sizeof)
@pytest.mark.parametrize("a", [[], ["123"]])
@pytest.mark.parametrize("b", ["123", ["123"]])
@pytest.mark.parametrize(
"misc_data",
["123", ["123"] * 20, 123, [1, 2, 0.8, 0.9] * 50, 0.9, 0.00001],
)
@pytest.mark.parametrize("non_list_data", [123, "abc", "zyx", "rapids", 0.8])
def test_create_dataframe_cols_empty_data(a, b, misc_data, non_list_data):
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = b
actual["b"] = b
assert_eq(actual, expected)
expected = pd.DataFrame({"a": []})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = misc_data
actual["b"] = misc_data
assert_eq(actual, expected)
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = non_list_data
actual["b"] = non_list_data
assert_eq(actual, expected)
def test_empty_dataframe_describe():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
expected = pdf.describe()
actual = gdf.describe()
assert_eq(expected, actual)
def test_as_column_types():
col = column.as_column(cudf.Series([]))
assert_eq(col.dtype, np.dtype("float64"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float64"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="float32")
assert_eq(col.dtype, np.dtype("float32"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float32"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="str")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="str"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="object")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="object"))
assert_eq(pds, gds)
pds = pd.Series(np.array([1, 2, 3]), dtype="float32")
gds = cudf.Series(column.as_column(np.array([1, 2, 3]), dtype="float32"))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 3], dtype="float32")
gds = cudf.Series([1, 2, 3], dtype="float32")
assert_eq(pds, gds)
pds = pd.Series([], dtype="float64")
gds = cudf.Series(column.as_column(pds))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 4], dtype="int64")
gds = cudf.Series(column.as_column(cudf.Series([1, 2, 4]), dtype="int64"))
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="float32")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="float32")
)
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="str")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="str")
)
assert_eq(pds, gds)
pds = pd.Series(pd.Index(["1", "18", "9"]), dtype="int")
gds = cudf.Series(
cudf.core.index.StringIndex(["1", "18", "9"]), dtype="int"
)
assert_eq(pds, gds)
def test_one_row_head():
gdf = cudf.DataFrame({"name": ["carl"], "score": [100]}, index=[123])
pdf = gdf.to_pandas()
head_gdf = gdf.head()
head_pdf = pdf.head()
assert_eq(head_pdf, head_gdf)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric(dtype, as_dtype):
psr = pd.Series([1, 2, 4, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric_nulls(dtype, as_dtype):
data = [1, 2, None, 3]
sr = cudf.Series(data, dtype=dtype)
got = sr.astype(as_dtype)
expect = cudf.Series([1, 2, None, 3], dtype=as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_numeric_to_other(dtype, as_dtype):
psr = pd.Series([1, 2, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05"]
else:
data = ["1", "2", "3"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_datetime_to_other(as_dtype):
data = ["2001-01-01", "2002-02-02", "2001-01-05"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"inp",
[
("datetime64[ns]", "2011-01-01 00:00:00.000000000"),
("datetime64[us]", "2011-01-01 00:00:00.000000"),
("datetime64[ms]", "2011-01-01 00:00:00.000"),
("datetime64[s]", "2011-01-01 00:00:00"),
],
)
def test_series_astype_datetime_to_string(inp):
dtype, expect = inp
base_date = "2011-01-01"
sr = cudf.Series([base_date], dtype=dtype)
got = sr.astype(str)[0]
assert expect == got
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_series_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
gsr = cudf.from_pandas(psr)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
psr.astype("int32").astype(ordered_dtype_pd).astype("int32"),
gsr.astype("int32").astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_cat_ordered_to_unordered(ordered):
pd_dtype = pd.CategoricalDtype(categories=[1, 2, 3], ordered=ordered)
pd_to_dtype = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=not ordered
)
gd_dtype = cudf.CategoricalDtype.from_pandas(pd_dtype)
gd_to_dtype = cudf.CategoricalDtype.from_pandas(pd_to_dtype)
psr = pd.Series([1, 2, 3], dtype=pd_dtype)
gsr = cudf.Series([1, 2, 3], dtype=gd_dtype)
expect = psr.astype(pd_to_dtype)
got = gsr.astype(gd_to_dtype)
assert_eq(expect, got)
def test_series_astype_null_cases():
data = [1, 2, None, 3]
# numerical to other
assert_eq(cudf.Series(data, dtype="str"), cudf.Series(data).astype("str"))
assert_eq(
cudf.Series(data, dtype="category"),
cudf.Series(data).astype("category"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="int32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="uint32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data).astype("datetime64[ms]"),
)
# categorical to other
assert_eq(
cudf.Series(data, dtype="str"),
cudf.Series(data, dtype="category").astype("str"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="category").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data, dtype="category").astype("datetime64[ms]"),
)
# string to other
assert_eq(
cudf.Series([1, 2, None, 3], dtype="int32"),
cudf.Series(["1", "2", None, "3"]).astype("int32"),
)
assert_eq(
cudf.Series(
["2001-01-01", "2001-02-01", None, "2001-03-01"],
dtype="datetime64[ms]",
),
cudf.Series(["2001-01-01", "2001-02-01", None, "2001-03-01"]).astype(
"datetime64[ms]"
),
)
assert_eq(
cudf.Series(["a", "b", "c", None], dtype="category").to_pandas(),
cudf.Series(["a", "b", "c", None]).astype("category").to_pandas(),
)
# datetime to other
data = [
"2001-01-01 00:00:00.000000",
"2001-02-01 00:00:00.000000",
None,
"2001-03-01 00:00:00.000000",
]
assert_eq(
cudf.Series(data),
cudf.Series(data, dtype="datetime64[us]").astype("str"),
)
assert_eq(
pd.Series(data, dtype="datetime64[ns]").astype("category"),
cudf.from_pandas(pd.Series(data, dtype="datetime64[ns]")).astype(
"category"
),
)
def test_series_astype_null_categorical():
sr = cudf.Series([None, None, None], dtype="category")
expect = cudf.Series([None, None, None], dtype="int32")
got = sr.astype("int32")
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
(
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
),
[
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
],
],
)
def test_create_dataframe_from_list_like(data):
pdf = pd.DataFrame(data, index=["count", "mean", "std", "min"])
gdf = cudf.DataFrame(data, index=["count", "mean", "std", "min"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def test_create_dataframe_column():
pdf = pd.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
gdf = cudf.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
gdf = cudf.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pd.Categorical(["a", "b", "c"]),
["m", "a", "d", "v"],
],
)
def test_series_values_host_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
np.testing.assert_array_equal(pds.values, gds.values_host)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pytest.param(
pd.Categorical(["a", "b", "c"]),
marks=pytest.mark.xfail(raises=NotImplementedError),
),
pytest.param(
["m", "a", "d", "v"],
marks=pytest.mark.xfail(raises=NotImplementedError),
),
],
)
def test_series_values_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
gds_vals = gds.values
assert isinstance(gds_vals, cupy.ndarray)
np.testing.assert_array_equal(gds_vals.get(), pds.values)
@pytest.mark.parametrize(
"data",
[
{"A": [1, 2, 3], "B": [4, 5, 6]},
{"A": [1.0, 2.0, 3.0], "B": [4.0, 5.0, 6.0]},
{"A": [1, 2, 3], "B": [1.0, 2.0, 3.0]},
{"A": np.float32(np.arange(3)), "B": np.float64(np.arange(3))},
pytest.param(
{"A": [1, None, 3], "B": [1, 2, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [None, None, None], "B": [None, None, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [], "B": []},
marks=pytest.mark.xfail(reason="Requires at least 1 row"),
),
pytest.param(
{"A": [1, 2, 3], "B": ["a", "b", "c"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": pd.Categorical(["a", "b", "c"]), "B": ["d", "e", "f"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
],
)
def test_df_values_property(data):
pdf = pd.DataFrame.from_dict(data)
gdf = cudf.DataFrame.from_pandas(pdf)
pmtr = pdf.values
gmtr = gdf.values.get()
np.testing.assert_array_equal(pmtr, gmtr)
def test_value_counts():
pdf = pd.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
gdf = cudf.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
assert_eq(
pdf.numeric.value_counts().sort_index(),
gdf.numeric.value_counts().sort_index(),
check_dtype=False,
)
assert_eq(
pdf.alpha.value_counts().sort_index(),
gdf.alpha.value_counts().sort_index(),
check_dtype=False,
)
@pytest.mark.parametrize(
"data",
[
[],
[0, 12, 14],
[0, 14, 12, 12, 3, 10, 12, 14],
np.random.randint(-100, 100, 200),
pd.Series([0.0, 1.0, None, 10.0]),
[None, None, None, None],
[np.nan, None, -1, 2, 3],
],
)
@pytest.mark.parametrize(
"values",
[
np.random.randint(-100, 100, 10),
[],
[np.nan, None, -1, 2, 3],
[1.0, 12.0, None, None, 120],
[0, 14, 12, 12, 3, 10, 12, 14, None],
[None, None, None],
["0", "12", "14"],
["0", "12", "14", "a"],
],
)
def test_isin_numeric(data, values):
index = np.random.randint(0, 100, len(data))
psr = cudf.utils.utils._create_pandas_series(data=data, index=index)
gsr = cudf.Series.from_pandas(psr, nan_as_null=False)
expected = psr.isin(values)
got = gsr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["2018-01-01", "2019-04-03", None, "2019-12-30"],
dtype="datetime64[ns]",
),
pd.Series(
[
"2018-01-01",
"2019-04-03",
None,
"2019-12-30",
"2018-01-01",
"2018-01-01",
],
dtype="datetime64[ns]",
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
[1514764800000000000, 1577664000000000000],
[
1514764800000000000,
1577664000000000000,
1577664000000000000,
1577664000000000000,
1514764800000000000,
],
["2019-04-03", "2019-12-30", "2012-01-01"],
[
"2012-01-01",
"2012-01-01",
"2012-01-01",
"2019-04-03",
"2019-12-30",
"2012-01-01",
],
],
)
def test_isin_datetime(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["this", "is", None, "a", "test"]),
pd.Series(["test", "this", "test", "is", None, "test", "a", "test"]),
pd.Series(["0", "12", "14"]),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[None, None, None],
["12", "14", "19"],
pytest.param(
[12, 14, 19],
marks=pytest.mark.xfail(
not PANDAS_GE_120,
reason="pandas's failure here seems like a bug(in < 1.2) "
"given the reverse succeeds",
),
),
["is", "this", "is", "this", "is"],
],
)
def test_isin_string(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["a", "b", "c", "c", "c", "d", "e"], dtype="category"),
pd.Series(["a", "b", None, "c", "d", "e"], dtype="category"),
pd.Series([0, 3, 10, 12], dtype="category"),
pd.Series([0, 3, 10, 12, 0, 10, 3, 0, 0, 3, 3], dtype="category"),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["a", "b", None, "f", "words"],
["0", "12", None, "14"],
[0, 10, 12, None, 39, 40, 1000],
[0, 0, 0, 0, 3, 3, 3, None, 1, 2, 3],
],
)
def test_isin_categorical(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["this", "is", None, "a", "test"], index=["a", "b", "c", "d", "e"]
),
pd.Series([0, 15, 10], index=[0, None, 9]),
pd.Series(
range(25),
index=pd.date_range(
start="2019-01-01", end="2019-01-02", freq="H"
),
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[0, 19, 13],
["2019-01-01 04:00:00", "2019-01-01 06:00:00", "2018-03-02"],
],
)
def test_isin_index(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.index.isin(values)
expected = psr.index.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]], names=("number", "color")
),
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
pd.MultiIndex.from_arrays(
[[1, 2, 3, 10, 100], ["red", "blue", "green", "pink", "white"]],
names=("number", "color"),
),
],
)
@pytest.mark.parametrize(
"values,level,err",
[
(["red", "orange", "yellow"], "color", None),
(["red", "white", "yellow"], "color", None),
([0, 1, 2, 10, 11, 15], "number", None),
([0, 1, 2, 10, 11, 15], None, TypeError),
(pd.Series([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 8, 11, 15]), "number", None),
(pd.Index(["red", "white", "yellow"]), "color", None),
([(1, "red"), (3, "red")], None, None),
(((1, "red"), (3, "red")), None, None),
(
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]],
names=("number", "color"),
),
None,
None,
),
(
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
None,
None,
),
(
pd.MultiIndex.from_arrays(
[
[1, 2, 3, 10, 100],
["red", "blue", "green", "pink", "white"],
],
names=("number", "color"),
),
None,
None,
),
],
)
def test_isin_multiindex(data, values, level, err):
pmdx = data
gmdx = cudf.from_pandas(data)
if err is None:
expected = pmdx.isin(values, level=level)
if isinstance(values, pd.MultiIndex):
values = cudf.from_pandas(values)
got = gmdx.isin(values, level=level)
assert_eq(got, expected)
else:
assert_exceptions_equal(
lfunc=pmdx.isin,
rfunc=gmdx.isin,
lfunc_args_and_kwargs=([values], {"level": level}),
rfunc_args_and_kwargs=([values], {"level": level}),
check_exception_type=False,
expected_error_message=re.escape(
"values need to be a Multi-Index or set/list-like tuple "
"squences when `level=None`."
),
)
@pytest.mark.parametrize(
"data",
[
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [8, 2, 1, 0, 2, 4, 5],
"num_wings": [2, 0, 2, 1, 2, 4, -1],
}
),
],
)
@pytest.mark.parametrize(
"values",
[
[0, 2],
{"num_wings": [0, 3]},
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
["sparrow", "pigeon"],
pd.Series(["sparrow", "pigeon"], dtype="category"),
pd.Series([1, 2, 3, 4, 5]),
"abc",
123,
],
)
def test_isin_dataframe(data, values):
pdf = data
gdf = cudf.from_pandas(pdf)
if cudf.utils.dtypes.is_scalar(values):
assert_exceptions_equal(
lfunc=pdf.isin,
rfunc=gdf.isin,
lfunc_args_and_kwargs=([values],),
rfunc_args_and_kwargs=([values],),
)
else:
try:
expected = pdf.isin(values)
except ValueError as e:
if str(e) == "Lengths must match.":
pytest.xfail(
not PANDAS_GE_110,
"https://github.com/pandas-dev/pandas/issues/34256",
)
if isinstance(values, (pd.DataFrame, pd.Series)):
values = cudf.from_pandas(values)
got = gdf.isin(values)
assert_eq(got, expected)
def test_constructor_properties():
df = cudf.DataFrame()
key1 = "a"
key2 = "b"
val1 = np.array([123], dtype=np.float64)
val2 = np.array([321], dtype=np.float64)
df[key1] = val1
df[key2] = val2
# Correct use of _constructor (for DataFrame)
assert_eq(df, df._constructor({key1: val1, key2: val2}))
# Correct use of _constructor (for cudf.Series)
assert_eq(df[key1], df[key2]._constructor(val1, name=key1))
# Correct use of _constructor_sliced (for DataFrame)
assert_eq(df[key1], df._constructor_sliced(val1, name=key1))
# Correct use of _constructor_expanddim (for cudf.Series)
assert_eq(df, df[key2]._constructor_expanddim({key1: val1, key2: val2}))
# Incorrect use of _constructor_sliced (Raises for cudf.Series)
with pytest.raises(NotImplementedError):
df[key1]._constructor_sliced
# Incorrect use of _constructor_expanddim (Raises for DataFrame)
with pytest.raises(NotImplementedError):
df._constructor_expanddim
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", ALL_TYPES)
def test_df_astype_numeric_to_all(dtype, as_dtype):
if "uint" in dtype:
data = [1, 2, None, 4, 7]
elif "int" in dtype or "longlong" in dtype:
data = [1, 2, None, 4, -7]
elif "float" in dtype:
data = [1.0, 2.0, None, 4.0, np.nan, -7.0]
gdf = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype=dtype)
gdf["bar"] = cudf.Series(data, dtype=dtype)
insert_data = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = insert_data.astype(as_dtype)
expect["bar"] = insert_data.astype(as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_df_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
# change None to "NaT" after this issue is fixed:
# https://github.com/rapidsai/cudf/issues/5117
data = ["2001-01-01", "2002-02-02", "2000-01-05", None]
elif as_dtype == "int32":
data = [1, 2, 3]
elif as_dtype == "category":
data = ["1", "2", "3", None]
elif "float" in as_dtype:
data = [1.0, 2.0, 3.0, np.nan]
insert_data = cudf.Series.from_pandas(pd.Series(data, dtype="str"))
expect_data = cudf.Series(data, dtype=as_dtype)
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = insert_data
gdf["bar"] = insert_data
expect["foo"] = expect_data
expect["bar"] = expect_data
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int64",
"datetime64[s]",
"datetime64[us]",
"datetime64[ns]",
"str",
"category",
],
)
def test_df_astype_datetime_to_other(as_dtype):
data = [
"1991-11-20 00:00:00.000",
"2004-12-04 00:00:00.000",
"2016-09-13 00:00:00.000",
None,
]
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype="datetime64[ms]")
gdf["bar"] = cudf.Series(data, dtype="datetime64[ms]")
if as_dtype == "int64":
expect["foo"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
expect["bar"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
elif as_dtype == "str":
expect["foo"] = cudf.Series(data, dtype="str")
expect["bar"] = cudf.Series(data, dtype="str")
elif as_dtype == "category":
expect["foo"] = cudf.Series(gdf["foo"], dtype="category")
expect["bar"] = cudf.Series(gdf["bar"], dtype="category")
else:
expect["foo"] = cudf.Series(data, dtype=as_dtype)
expect["bar"] = cudf.Series(data, dtype=as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_df_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf.astype(as_dtype), gdf.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_df_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
pdf.astype(ordered_dtype_pd).astype("int32"),
gdf.astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize(
"dtype,args",
[(dtype, {}) for dtype in ALL_TYPES]
+ [("category", {"ordered": True}), ("category", {"ordered": False})],
)
def test_empty_df_astype(dtype, args):
df = cudf.DataFrame()
kwargs = {}
kwargs.update(args)
assert_eq(df, df.astype(dtype=dtype, **kwargs))
@pytest.mark.parametrize(
"errors",
[
pytest.param(
"raise", marks=pytest.mark.xfail(reason="should raise error here")
),
pytest.param("other", marks=pytest.mark.xfail(raises=ValueError)),
"ignore",
pytest.param(
"warn", marks=pytest.mark.filterwarnings("ignore:Traceback")
),
],
)
def test_series_astype_error_handling(errors):
sr = cudf.Series(["random", "words"])
got = sr.astype("datetime64", errors=errors)
assert_eq(sr, got)
@pytest.mark.parametrize("dtype", ALL_TYPES)
def test_df_constructor_dtype(dtype):
if "datetime" in dtype:
data = ["1991-11-20", "2004-12-04", "2016-09-13", None]
elif dtype == "str":
data = ["a", "b", "c", None]
elif "float" in dtype:
data = [1.0, 0.5, -1.1, np.nan, None]
elif "bool" in dtype:
data = [True, False, None]
else:
data = [1, 2, 3, None]
sr = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = sr
expect["bar"] = sr
got = cudf.DataFrame({"foo": data, "bar": data}, dtype=dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": int}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": str}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": bool, "b": int, "c": float, "d": str}
),
cudf.DataFrame(),
cudf.DataFrame({"a": [0, 1, 2], "b": [1, None, 3]}),
cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
),
cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False
),
}
),
],
)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops(data, op, skipna):
gdf = data
pdf = gdf.to_pandas()
if op in ("var", "std"):
expected = getattr(pdf, op)(axis=1, ddof=0, skipna=skipna)
got = getattr(gdf, op)(axis=1, ddof=0, skipna=skipna)
else:
expected = getattr(pdf, op)(axis=1, skipna=skipna)
got = getattr(gdf, op)(axis=1, skipna=skipna)
assert_eq(expected, got, check_exact=False)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
def test_rowwise_ops_nullable_dtypes_all_null(op):
gdf = cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
)
expected = cudf.Series([None, None, None, None], dtype="float64")
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series(
[10.0, None, np.NaN, 2234.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"min",
cudf.Series(
[10.0, None, np.NaN, 13.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"sum",
cudf.Series(
[20.0, None, np.NaN, 2247.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"product",
cudf.Series(
[100.0, None, np.NaN, 29042.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"mean",
cudf.Series(
[10.0, None, np.NaN, 1123.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"var",
cudf.Series(
[0.0, None, np.NaN, 1233210.25, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"std",
cudf.Series(
[0.0, None, np.NaN, 1110.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
],
)
def test_rowwise_ops_nullable_dtypes_partial_null(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series([10, None, None, 2234, None, 453], dtype="int64",),
),
("min", cudf.Series([10, None, None, 13, None, 15], dtype="int64",),),
(
"sum",
cudf.Series([20, None, None, 2247, None, 468], dtype="int64",),
),
(
"product",
cudf.Series([100, None, None, 29042, None, 6795], dtype="int64",),
),
(
"mean",
cudf.Series(
[10.0, None, None, 1123.5, None, 234.0], dtype="float32",
),
),
(
"var",
cudf.Series(
[0.0, None, None, 1233210.25, None, 47961.0], dtype="float32",
),
),
(
"std",
cudf.Series(
[0.0, None, None, 1110.5, None, 219.0], dtype="float32",
),
),
],
)
def test_rowwise_ops_nullable_int_dtypes(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, None, 13, None, 15],
"b": cudf.Series(
[10, None, 323, 2234, None, 453], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ns]"
),
"t3": cudf.Series(
["1960-08-31 06:00:00", "2030-08-02 10:00:00"], dtype="<M8[s]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[us]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(["1940-08-31 06:00:00", None], dtype="<M8[ms]"),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
"b1": cudf.Series([True, False], dtype="bool"),
},
],
)
@pytest.mark.parametrize("op", ["max", "min"])
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops_datetime_dtypes(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data,op,skipna",
[
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"max",
True,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
False,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
True,
),
],
)
def test_rowwise_ops_datetime_dtypes_2(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
(
{
"t1": pd.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ns]",
),
"t2": pd.Series(
["1940-08-31 06:00:00", pd.NaT], dtype="<M8[ns]"
),
}
)
],
)
def test_rowwise_ops_datetime_dtypes_pdbug(data):
pdf = pd.DataFrame(data)
gdf = cudf.from_pandas(pdf)
expected = pdf.max(axis=1, skipna=False)
got = gdf.max(axis=1, skipna=False)
if PANDAS_GE_120:
assert_eq(got, expected)
else:
# PANDAS BUG: https://github.com/pandas-dev/pandas/issues/36907
with pytest.raises(AssertionError, match="numpy array are different"):
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[5.0, 6.0, 7.0],
"single value",
np.array(1, dtype="int64"),
np.array(0.6273643, dtype="float64"),
],
)
def test_insert(data):
pdf = pd.DataFrame.from_dict({"A": [1, 2, 3], "B": ["a", "b", "c"]})
gdf = cudf.DataFrame.from_pandas(pdf)
# insertion by index
pdf.insert(0, "foo", data)
gdf.insert(0, "foo", data)
assert_eq(pdf, gdf)
pdf.insert(3, "bar", data)
gdf.insert(3, "bar", data)
assert_eq(pdf, gdf)
pdf.insert(1, "baz", data)
gdf.insert(1, "baz", data)
assert_eq(pdf, gdf)
# pandas insert doesn't support negative indexing
pdf.insert(len(pdf.columns), "qux", data)
gdf.insert(-1, "qux", data)
assert_eq(pdf, gdf)
def test_cov():
gdf = cudf.datasets.randomdata(10)
pdf = gdf.to_pandas()
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.xfail(reason="cupy-based cov does not support nulls")
def test_cov_nans():
pdf = pd.DataFrame()
pdf["a"] = [None, None, None, 2.00758632, None]
pdf["b"] = [0.36403686, None, None, None, None]
pdf["c"] = [None, None, None, 0.64882227, None]
pdf["d"] = [None, -1.46863125, None, 1.22477948, -0.06031689]
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.parametrize(
"gsr",
[
cudf.Series([4, 2, 3]),
cudf.Series([4, 2, 3], index=["a", "b", "c"]),
cudf.Series([4, 2, 3], index=["a", "b", "d"]),
cudf.Series([4, 2], index=["a", "b"]),
cudf.Series([4, 2, 3], index=cudf.core.index.RangeIndex(0, 3)),
pytest.param(
cudf.Series([4, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"]),
marks=pytest.mark.xfail,
),
],
)
@pytest.mark.parametrize("colnames", [["a", "b", "c"], [0, 1, 2]])
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_df_sr_binop(gsr, colnames, op):
data = [[3.0, 2.0, 5.0], [3.0, None, 5.0], [6.0, 7.0, np.nan]]
data = dict(zip(colnames, data))
gsr = gsr.astype("float64")
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas(nullable=True)
psr = gsr.to_pandas(nullable=True)
expect = op(pdf, psr)
got = op(gdf, gsr).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
expect = op(psr, pdf)
got = op(gsr, gdf).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
# comparison ops will temporarily XFAIL
# see PR https://github.com/rapidsai/cudf/pull/7491
pytest.param(operator.eq, marks=pytest.mark.xfail()),
pytest.param(operator.lt, marks=pytest.mark.xfail()),
pytest.param(operator.le, marks=pytest.mark.xfail()),
pytest.param(operator.gt, marks=pytest.mark.xfail()),
pytest.param(operator.ge, marks=pytest.mark.xfail()),
pytest.param(operator.ne, marks=pytest.mark.xfail()),
],
)
@pytest.mark.parametrize(
"gsr", [cudf.Series([1, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"])]
)
def test_df_sr_binop_col_order(gsr, op):
colnames = [0, 1, 2]
data = [[0, 2, 5], [3, None, 5], [6, 7, np.nan]]
data = dict(zip(colnames, data))
gdf = cudf.DataFrame(data)
pdf = pd.DataFrame.from_dict(data)
psr = gsr.to_pandas()
expect = op(pdf, psr).astype("float")
out = op(gdf, gsr).astype("float")
got = out[expect.columns]
assert_eq(expect, got)
@pytest.mark.parametrize("set_index", [None, "A", "C", "D"])
@pytest.mark.parametrize("index", [True, False])
@pytest.mark.parametrize("deep", [True, False])
def test_memory_usage(deep, index, set_index):
# Testing numerical/datetime by comparing with pandas
# (string and categorical columns will be different)
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int64"),
"B": np.arange(rows, dtype="int32"),
"C": np.arange(rows, dtype="float64"),
}
)
df["D"] = pd.to_datetime(df.A)
if set_index:
df = df.set_index(set_index)
gdf = cudf.from_pandas(df)
if index and set_index is None:
# Special Case: Assume RangeIndex size == 0
assert gdf.index.memory_usage(deep=deep) == 0
else:
# Check for Series only
assert df["B"].memory_usage(index=index, deep=deep) == gdf[
"B"
].memory_usage(index=index, deep=deep)
# Check for entire DataFrame
assert_eq(
df.memory_usage(index=index, deep=deep).sort_index(),
gdf.memory_usage(index=index, deep=deep).sort_index(),
)
@pytest.mark.xfail
def test_memory_usage_string():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
gdf = cudf.from_pandas(df)
# Check deep=False (should match pandas)
assert gdf.B.memory_usage(deep=False, index=False) == df.B.memory_usage(
deep=False, index=False
)
# Check string column
assert gdf.B.memory_usage(deep=True, index=False) == df.B.memory_usage(
deep=True, index=False
)
# Check string index
assert gdf.set_index("B").index.memory_usage(
deep=True
) == df.B.memory_usage(deep=True, index=False)
def test_memory_usage_cat():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
df["B"] = df.B.astype("category")
gdf = cudf.from_pandas(df)
expected = (
gdf.B._column.cat().categories.__sizeof__()
+ gdf.B._column.cat().codes.__sizeof__()
)
# Check cat column
assert gdf.B.memory_usage(deep=True, index=False) == expected
# Check cat index
assert gdf.set_index("B").index.memory_usage(deep=True) == expected
def test_memory_usage_list():
df = cudf.DataFrame({"A": [[0, 1, 2, 3], [4, 5, 6], [7, 8], [9]]})
expected = (
df.A._column.offsets._memory_usage()
+ df.A._column.elements._memory_usage()
)
assert expected == df.A.memory_usage()
@pytest.mark.xfail
def test_memory_usage_multi():
rows = int(100)
deep = True
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(np.arange(3, dtype="int64"), rows),
"C": np.random.choice(np.arange(3, dtype="float64"), rows),
}
).set_index(["B", "C"])
gdf = cudf.from_pandas(df)
# Assume MultiIndex memory footprint is just that
# of the underlying columns, levels, and codes
expect = rows * 16 # Source Columns
expect += rows * 16 # Codes
expect += 3 * 8 # Level 0
expect += 3 * 8 # Level 1
assert expect == gdf.index.memory_usage(deep=deep)
@pytest.mark.parametrize(
"list_input",
[
pytest.param([1, 2, 3, 4], id="smaller"),
pytest.param([1, 2, 3, 4, 5, 6], id="larger"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_list(list_input, key):
gdf = cudf.datasets.randomdata(5)
with pytest.raises(
ValueError, match=("All columns must be of equal length")
):
gdf[key] = list_input
@pytest.mark.parametrize(
"series_input",
[
pytest.param(cudf.Series([1, 2, 3, 4]), id="smaller_cudf"),
pytest.param(cudf.Series([1, 2, 3, 4, 5, 6]), id="larger_cudf"),
pytest.param(cudf.Series([1, 2, 3], index=[4, 5, 6]), id="index_cudf"),
pytest.param(pd.Series([1, 2, 3, 4]), id="smaller_pandas"),
pytest.param(pd.Series([1, 2, 3, 4, 5, 6]), id="larger_pandas"),
pytest.param(pd.Series([1, 2, 3], index=[4, 5, 6]), id="index_pandas"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_series(series_input, key):
gdf = cudf.datasets.randomdata(5)
pdf = gdf.to_pandas()
pandas_input = series_input
if isinstance(pandas_input, cudf.Series):
pandas_input = pandas_input.to_pandas()
expect = pdf
expect[key] = pandas_input
got = gdf
got[key] = series_input
# Pandas uses NaN and typecasts to float64 if there's missing values on
# alignment, so need to typecast to float64 for equality comparison
expect = expect.astype("float64")
got = got.astype("float64")
assert_eq(expect, got)
def test_tupleize_cols_False_set():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
pdf[("a", "b")] = [1]
gdf[("a", "b")] = [1]
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_init_multiindex_from_dict():
pdf = pd.DataFrame({("a", "b"): [1]})
gdf = cudf.DataFrame({("a", "b"): [1]})
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_change_column_dtype_in_empty():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
assert_eq(pdf, gdf)
pdf["b"] = pdf["b"].astype("int64")
gdf["b"] = gdf["b"].astype("int64")
assert_eq(pdf, gdf)
def test_dataframe_from_table_empty_index():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
odict = df._data
tbl = cudf._lib.table.Table(odict)
result = cudf.DataFrame._from_table(tbl) # noqa: F841
@pytest.mark.parametrize("dtype", ["int64", "str"])
def test_dataframe_from_dictionary_series_same_name_index(dtype):
pd_idx1 = pd.Index([1, 2, 0], name="test_index").astype(dtype)
pd_idx2 = pd.Index([2, 0, 1], name="test_index").astype(dtype)
pd_series1 = pd.Series([1, 2, 3], index=pd_idx1)
pd_series2 = pd.Series([1, 2, 3], index=pd_idx2)
gd_idx1 = cudf.from_pandas(pd_idx1)
gd_idx2 = cudf.from_pandas(pd_idx2)
gd_series1 = cudf.Series([1, 2, 3], index=gd_idx1)
gd_series2 = cudf.Series([1, 2, 3], index=gd_idx2)
expect = pd.DataFrame({"a": pd_series1, "b": pd_series2})
got = cudf.DataFrame({"a": gd_series1, "b": gd_series2})
if dtype == "str":
# Pandas actually loses its index name erroneously here...
expect.index.name = "test_index"
assert_eq(expect, got)
assert expect.index.names == got.index.names
@pytest.mark.parametrize(
"arg", [slice(2, 8, 3), slice(1, 20, 4), slice(-2, -6, -2)]
)
def test_dataframe_strided_slice(arg):
mul = pd.DataFrame(
{
"Index": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"AlphaIndex": ["a", "b", "c", "d", "e", "f", "g", "h", "i"],
}
)
pdf = pd.DataFrame(
{"Val": [10, 9, 8, 7, 6, 5, 4, 3, 2]},
index=pd.MultiIndex.from_frame(mul),
)
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf[arg]
got = gdf[arg]
assert_eq(expect, got)
@pytest.mark.parametrize(
"data,condition,other,error",
[
(pd.Series(range(5)), pd.Series(range(5)) > 0, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, 10, None),
(
pd.Series(range(5)),
pd.Series(range(5)) > 1,
pd.Series(range(5, 10)),
None,
),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"])
% 3
)
== 0,
-pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) == 4,
None,
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) != 4,
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True, False],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True, True, False], [True, True, True, False]],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cuda.to_device(
np.array(
[[True, True], [False, True], [True, False], [False, True]]
)
),
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cupy.array(
[[True, True], [False, True], [True, False], [False, True]]
),
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
],
None,
ValueError,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) == 4,
None,
None,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) == 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6], dtype="category"),
pd.Series([4, np.nan, 6], dtype="category") != 4,
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
"s",
None,
),
(
pd.Series([1, 2, 3, 2, 5]),
pd.Series([1, 2, 3, 2, 5]) == 2,
pd.DataFrame(
{
"a": pd.Series([1, 2, 3, 2, 5]),
"b": pd.Series([1, 2, 3, 2, 5]),
}
),
NotImplementedError,
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_df_sr_mask_where(data, condition, other, error, inplace):
ps_where = data
gs_where = cudf.from_pandas(data)
ps_mask = ps_where.copy(deep=True)
gs_mask = gs_where.copy(deep=True)
if hasattr(condition, "__cuda_array_interface__"):
if type(condition).__module__.split(".")[0] == "cupy":
ps_condition = cupy.asnumpy(condition)
else:
ps_condition = np.array(condition).astype("bool")
else:
ps_condition = condition
if type(condition).__module__.split(".")[0] == "pandas":
gs_condition = cudf.from_pandas(condition)
else:
gs_condition = condition
ps_other = other
if type(other).__module__.split(".")[0] == "pandas":
gs_other = cudf.from_pandas(other)
else:
gs_other = other
if error is None:
expect_where = ps_where.where(
ps_condition, other=ps_other, inplace=inplace
)
got_where = gs_where.where(
gs_condition, other=gs_other, inplace=inplace
)
expect_mask = ps_mask.mask(
ps_condition, other=ps_other, inplace=inplace
)
got_mask = gs_mask.mask(gs_condition, other=gs_other, inplace=inplace)
if inplace:
expect_where = ps_where
got_where = gs_where
expect_mask = ps_mask
got_mask = gs_mask
if pd.api.types.is_categorical_dtype(expect_where):
np.testing.assert_array_equal(
expect_where.cat.codes,
got_where.cat.codes.astype(expect_where.cat.codes.dtype)
.fillna(-1)
.to_array(),
)
assert_eq(expect_where.cat.categories, got_where.cat.categories)
np.testing.assert_array_equal(
expect_mask.cat.codes,
got_mask.cat.codes.astype(expect_mask.cat.codes.dtype)
.fillna(-1)
.to_array(),
)
assert_eq(expect_mask.cat.categories, got_mask.cat.categories)
else:
assert_eq(
expect_where.fillna(-1),
got_where.fillna(-1),
check_dtype=False,
)
assert_eq(
expect_mask.fillna(-1), got_mask.fillna(-1), check_dtype=False
)
else:
assert_exceptions_equal(
lfunc=ps_where.where,
rfunc=gs_where.where,
lfunc_args_and_kwargs=(
[ps_condition],
{"other": ps_other, "inplace": inplace},
),
rfunc_args_and_kwargs=(
[gs_condition],
{"other": gs_other, "inplace": inplace},
),
compare_error_message=False
if error is NotImplementedError
else True,
)
assert_exceptions_equal(
lfunc=ps_mask.mask,
rfunc=gs_mask.mask,
lfunc_args_and_kwargs=(
[ps_condition],
{"other": ps_other, "inplace": inplace},
),
rfunc_args_and_kwargs=(
[gs_condition],
{"other": gs_other, "inplace": inplace},
),
compare_error_message=False,
)
@pytest.mark.parametrize(
"data,condition,other,has_cat",
[
(
pd.DataFrame(
{
"a": pd.Series(["a", "a", "b", "c", "a", "d", "d", "a"]),
"b": pd.Series(["o", "p", "q", "e", "p", "p", "a", "a"]),
}
),
pd.DataFrame(
{
"a": pd.Series(["a", "a", "b", "c", "a", "d", "d", "a"]),
"b": pd.Series(["o", "p", "q", "e", "p", "p", "a", "a"]),
}
)
!= "a",
None,
None,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
!= "a",
None,
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
== "a",
None,
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
!= "a",
"a",
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
== "a",
"a",
True,
),
],
)
def test_df_string_cat_types_mask_where(data, condition, other, has_cat):
ps = data
gs = cudf.from_pandas(data)
ps_condition = condition
if type(condition).__module__.split(".")[0] == "pandas":
gs_condition = cudf.from_pandas(condition)
else:
gs_condition = condition
ps_other = other
if type(other).__module__.split(".")[0] == "pandas":
gs_other = cudf.from_pandas(other)
else:
gs_other = other
expect_where = ps.where(ps_condition, other=ps_other)
got_where = gs.where(gs_condition, other=gs_other)
expect_mask = ps.mask(ps_condition, other=ps_other)
got_mask = gs.mask(gs_condition, other=gs_other)
if has_cat is None:
assert_eq(
expect_where.fillna(-1).astype("str"),
got_where.fillna(-1),
check_dtype=False,
)
assert_eq(
expect_mask.fillna(-1).astype("str"),
got_mask.fillna(-1),
check_dtype=False,
)
else:
assert_eq(expect_where, got_where, check_dtype=False)
assert_eq(expect_mask, got_mask, check_dtype=False)
@pytest.mark.parametrize(
"data,expected_upcast_type,error",
[
(
pd.Series([random.random() for _ in range(10)], dtype="float32"),
np.dtype("float32"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float16"),
np.dtype("float32"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float64"),
np.dtype("float64"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float128"),
None,
NotImplementedError,
),
],
)
def test_from_pandas_unsupported_types(data, expected_upcast_type, error):
pdf = pd.DataFrame({"one_col": data})
if error == NotImplementedError:
with pytest.raises(error):
cudf.from_pandas(data)
with pytest.raises(error):
cudf.Series(data)
with pytest.raises(error):
cudf.from_pandas(pdf)
with pytest.raises(error):
cudf.DataFrame(pdf)
else:
df = cudf.from_pandas(data)
assert_eq(data, df, check_dtype=False)
assert df.dtype == expected_upcast_type
df = cudf.Series(data)
assert_eq(data, df, check_dtype=False)
assert df.dtype == expected_upcast_type
df = cudf.from_pandas(pdf)
assert_eq(pdf, df, check_dtype=False)
assert df["one_col"].dtype == expected_upcast_type
df = cudf.DataFrame(pdf)
assert_eq(pdf, df, check_dtype=False)
assert df["one_col"].dtype == expected_upcast_type
@pytest.mark.parametrize("nan_as_null", [True, False])
@pytest.mark.parametrize("index", [None, "a", ["a", "b"]])
def test_from_pandas_nan_as_null(nan_as_null, index):
data = [np.nan, 2.0, 3.0]
if index is None:
pdf = pd.DataFrame({"a": data, "b": data})
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
else:
pdf = pd.DataFrame({"a": data, "b": data}).set_index(index)
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
expected = expected.set_index(index)
got = cudf.from_pandas(pdf, nan_as_null=nan_as_null)
assert_eq(expected, got)
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_from_pandas_for_series_nan_as_null(nan_as_null):
data = [np.nan, 2.0, 3.0]
psr = pd.Series(data)
expected = cudf.Series(column.as_column(data, nan_as_null=nan_as_null))
got = cudf.from_pandas(psr, nan_as_null=nan_as_null)
assert_eq(expected, got)
@pytest.mark.parametrize("copy", [True, False])
def test_df_series_dataframe_astype_copy(copy):
gdf = cudf.DataFrame({"col1": [1, 2], "col2": [3, 4]})
pdf = gdf.to_pandas()
assert_eq(
gdf.astype(dtype="float", copy=copy),
pdf.astype(dtype="float", copy=copy),
)
assert_eq(gdf, pdf)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
assert_eq(
gsr.astype(dtype="float", copy=copy),
psr.astype(dtype="float", copy=copy),
)
assert_eq(gsr, psr)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
actual = gsr.astype(dtype="int64", copy=copy)
expected = psr.astype(dtype="int64", copy=copy)
assert_eq(expected, actual)
assert_eq(gsr, psr)
actual[0] = 3
expected[0] = 3
assert_eq(gsr, psr)
@pytest.mark.parametrize("copy", [True, False])
def test_df_series_dataframe_astype_dtype_dict(copy):
gdf = cudf.DataFrame({"col1": [1, 2], "col2": [3, 4]})
pdf = gdf.to_pandas()
assert_eq(
gdf.astype(dtype={"col1": "float"}, copy=copy),
pdf.astype(dtype={"col1": "float"}, copy=copy),
)
assert_eq(gdf, pdf)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
assert_eq(
gsr.astype(dtype={None: "float"}, copy=copy),
psr.astype(dtype={None: "float"}, copy=copy),
)
assert_eq(gsr, psr)
assert_exceptions_equal(
lfunc=psr.astype,
rfunc=gsr.astype,
lfunc_args_and_kwargs=([], {"dtype": {"a": "float"}, "copy": copy}),
rfunc_args_and_kwargs=([], {"dtype": {"a": "float"}, "copy": copy}),
)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
actual = gsr.astype({None: "int64"}, copy=copy)
expected = psr.astype({None: "int64"}, copy=copy)
assert_eq(expected, actual)
assert_eq(gsr, psr)
actual[0] = 3
expected[0] = 3
assert_eq(gsr, psr)
@pytest.mark.parametrize(
"data,columns",
[
([1, 2, 3, 100, 112, 35464], ["a"]),
(range(100), None),
([], None),
((-10, 21, 32, 32, 1, 2, 3), ["p"]),
((), None),
([[1, 2, 3], [1, 2, 3]], ["col1", "col2", "col3"]),
([range(100), range(100)], ["range" + str(i) for i in range(100)]),
(((1, 2, 3), (1, 2, 3)), ["tuple0", "tuple1", "tuple2"]),
([[1, 2, 3]], ["list col1", "list col2", "list col3"]),
([range(100)], ["range" + str(i) for i in range(100)]),
(((1, 2, 3),), ["k1", "k2", "k3"]),
],
)
def test_dataframe_init_1d_list(data, columns):
expect = pd.DataFrame(data, columns=columns)
actual = cudf.DataFrame(data, columns=columns)
assert_eq(
expect, actual, check_index_type=False if len(data) == 0 else True
)
expect = pd.DataFrame(data, columns=None)
actual = cudf.DataFrame(data, columns=None)
assert_eq(
expect, actual, check_index_type=False if len(data) == 0 else True
)
@pytest.mark.parametrize(
"data,cols,index",
[
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
["a", "b", "c", "d"],
),
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
[0, 20, 30, 10],
),
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
[0, 1, 2, 3],
),
(np.array([11, 123, -2342, 232]), ["a"], [1, 2, 11, 12]),
(np.array([11, 123, -2342, 232]), ["a"], ["khsdjk", "a", "z", "kk"]),
(
cupy.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "z"],
["a", "z", "a", "z"],
),
(cupy.array([11, 123, -2342, 232]), ["z"], [0, 1, 1, 0]),
(cupy.array([11, 123, -2342, 232]), ["z"], [1, 2, 3, 4]),
(cupy.array([11, 123, -2342, 232]), ["z"], ["a", "z", "d", "e"]),
(np.random.randn(2, 4), ["a", "b", "c", "d"], ["a", "b"]),
(np.random.randn(2, 4), ["a", "b", "c", "d"], [1, 0]),
(cupy.random.randn(2, 4), ["a", "b", "c", "d"], ["a", "b"]),
(cupy.random.randn(2, 4), ["a", "b", "c", "d"], [1, 0]),
],
)
def test_dataframe_init_from_arrays_cols(data, cols, index):
gd_data = data
if isinstance(data, cupy.core.ndarray):
# pandas can't handle cupy arrays in general
pd_data = data.get()
# additional test for building DataFrame with gpu array whose
# cuda array interface has no `descr` attribute
numba_data = cuda.as_cuda_array(data)
else:
pd_data = data
numba_data = None
# verify with columns & index
pdf = pd.DataFrame(pd_data, columns=cols, index=index)
gdf = cudf.DataFrame(gd_data, columns=cols, index=index)
assert_eq(pdf, gdf, check_dtype=False)
# verify with columns
pdf = pd.DataFrame(pd_data, columns=cols)
gdf = cudf.DataFrame(gd_data, columns=cols)
assert_eq(pdf, gdf, check_dtype=False)
pdf = pd.DataFrame(pd_data)
gdf = cudf.DataFrame(gd_data)
assert_eq(pdf, gdf, check_dtype=False)
if numba_data is not None:
gdf = cudf.DataFrame(numba_data)
assert_eq(pdf, gdf, check_dtype=False)
@pytest.mark.parametrize(
"col_data",
[
range(5),
["a", "b", "x", "y", "z"],
[1.0, 0.213, 0.34332],
["a"],
[1],
[0.2323],
[],
],
)
@pytest.mark.parametrize(
"assign_val",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
def test_dataframe_assign_scalar(col_data, assign_val):
pdf = pd.DataFrame({"a": col_data})
gdf = cudf.DataFrame({"a": col_data})
pdf["b"] = (
cupy.asnumpy(assign_val)
if isinstance(assign_val, cupy.ndarray)
else assign_val
)
gdf["b"] = assign_val
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"col_data",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
@pytest.mark.parametrize(
"assign_val",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
def test_dataframe_assign_scalar_with_scalar_cols(col_data, assign_val):
pdf = pd.DataFrame(
{
"a": cupy.asnumpy(col_data)
if isinstance(col_data, cupy.ndarray)
else col_data
},
index=["dummy_mandatory_index"],
)
gdf = cudf.DataFrame({"a": col_data}, index=["dummy_mandatory_index"])
pdf["b"] = (
cupy.asnumpy(assign_val)
if isinstance(assign_val, cupy.ndarray)
else assign_val
)
gdf["b"] = assign_val
assert_eq(pdf, gdf)
def test_dataframe_info_basic():
buffer = io.StringIO()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
StringIndex: 10 entries, a to 1111
Data columns (total 10 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 0 10 non-null float64
1 1 10 non-null float64
2 2 10 non-null float64
3 3 10 non-null float64
4 4 10 non-null float64
5 5 10 non-null float64
6 6 10 non-null float64
7 7 10 non-null float64
8 8 10 non-null float64
9 9 10 non-null float64
dtypes: float64(10)
memory usage: 859.0+ bytes
"""
)
df = pd.DataFrame(
np.random.randn(10, 10),
index=["a", "2", "3", "4", "5", "6", "7", "8", "100", "1111"],
)
cudf.from_pandas(df).info(buf=buffer, verbose=True)
s = buffer.getvalue()
assert str_cmp == s
def test_dataframe_info_verbose_mem_usage():
buffer = io.StringIO()
df = pd.DataFrame({"a": [1, 2, 3], "b": ["safdas", "assa", "asdasd"]})
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 3 entries, 0 to 2
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 3 non-null int64
1 b 3 non-null object
dtypes: int64(1), object(1)
memory usage: 56.0+ bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=True)
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 3 entries, 0 to 2
Columns: 2 entries, a to b
dtypes: int64(1), object(1)
memory usage: 56.0+ bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=False)
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
df = pd.DataFrame(
{"a": [1, 2, 3], "b": ["safdas", "assa", "asdasd"]},
index=["sdfdsf", "sdfsdfds", "dsfdf"],
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
StringIndex: 3 entries, sdfdsf to dsfdf
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 3 non-null int64
1 b 3 non-null object
dtypes: int64(1), object(1)
memory usage: 91.0 bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=True, memory_usage="deep")
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
int_values = [1, 2, 3, 4, 5]
text_values = ["alpha", "beta", "gamma", "delta", "epsilon"]
float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
df = cudf.DataFrame(
{
"int_col": int_values,
"text_col": text_values,
"float_col": float_values,
}
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 int_col 5 non-null int64
1 text_col 5 non-null object
2 float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0 bytes
"""
)
df.info(buf=buffer, verbose=True, memory_usage="deep")
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
def test_dataframe_info_null_counts():
int_values = [1, 2, 3, 4, 5]
text_values = ["alpha", "beta", "gamma", "delta", "epsilon"]
float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
df = cudf.DataFrame(
{
"int_col": int_values,
"text_col": text_values,
"float_col": float_values,
}
)
buffer = io.StringIO()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Dtype
--- ------ -----
0 int_col int64
1 text_col object
2 float_col float64
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0+ bytes
"""
)
df.info(buf=buffer, verbose=True, null_counts=False)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df.info(buf=buffer, verbose=True, max_cols=0)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df = cudf.DataFrame()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 0 entries
Empty DataFrame"""
)
df.info(buf=buffer, verbose=True)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df = cudf.DataFrame(
{
"a": [1, 2, 3, None, 10, 11, 12, None],
"b": ["a", "b", "c", "sd", "sdf", "sd", None, None],
}
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 8 entries, 0 to 7
Data columns (total 2 columns):
# Column Dtype
--- ------ -----
0 a int64
1 b object
dtypes: int64(1), object(1)
memory usage: 238.0+ bytes
"""
)
pd.options.display.max_info_rows = 2
df.info(buf=buffer, max_cols=2, null_counts=None)
pd.reset_option("display.max_info_rows")
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 8 entries, 0 to 7
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 6 non-null int64
1 b 6 non-null object
dtypes: int64(1), object(1)
memory usage: 238.0+ bytes
"""
)
df.info(buf=buffer, max_cols=2, null_counts=None)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df.info(buf=buffer, null_counts=True)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
@pytest.mark.parametrize(
"data1",
[
[1, 2, 3, 4, 5, 6, 7],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[
1.9876543,
2.9876654,
3.9876543,
4.1234587,
5.23,
6.88918237,
7.00001,
],
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
-0.1221,
-2.1221,
-0.112121,
21.1212,
],
],
)
@pytest.mark.parametrize(
"data2",
[
[1, 2, 3, 4, 5, 6, 7],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[
1.9876543,
2.9876654,
3.9876543,
4.1234587,
5.23,
6.88918237,
7.00001,
],
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
-0.1221,
-2.1221,
-0.112121,
21.1212,
],
],
)
@pytest.mark.parametrize("rtol", [0, 0.01, 1e-05, 1e-08, 5e-1, 50.12])
@pytest.mark.parametrize("atol", [0, 0.01, 1e-05, 1e-08, 50.12])
def test_cudf_isclose(data1, data2, rtol, atol):
array1 = cupy.array(data1)
array2 = cupy.array(data2)
expected = cudf.Series(cupy.isclose(array1, array2, rtol=rtol, atol=atol))
actual = cudf.isclose(
cudf.Series(data1), cudf.Series(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(data1, data2, rtol=rtol, atol=atol)
assert_eq(expected, actual)
actual = cudf.isclose(
cupy.array(data1), cupy.array(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(
np.array(data1), np.array(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(
pd.Series(data1), pd.Series(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data1",
[
[
-1.9876543,
-2.9876654,
np.nan,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
np.nan,
-21.1212,
],
],
)
@pytest.mark.parametrize(
"data2",
[
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
np.nan,
np.nan,
np.nan,
21.1212,
],
],
)
@pytest.mark.parametrize("equal_nan", [True, False])
def test_cudf_isclose_nulls(data1, data2, equal_nan):
array1 = cupy.array(data1)
array2 = cupy.array(data2)
expected = cudf.Series(cupy.isclose(array1, array2, equal_nan=equal_nan))
actual = cudf.isclose(
cudf.Series(data1), cudf.Series(data2), equal_nan=equal_nan
)
assert_eq(expected, actual, check_dtype=False)
actual = cudf.isclose(data1, data2, equal_nan=equal_nan)
assert_eq(expected, actual, check_dtype=False)
def test_cudf_isclose_different_index():
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[0, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 5, 3, 4, 2],
)
expected = cudf.Series([True] * 6, index=s1.index)
assert_eq(expected, cudf.isclose(s1, s2))
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[0, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 5, 10, 4, 2],
)
expected = cudf.Series(
[True, True, True, False, True, True], index=s1.index
)
assert_eq(expected, cudf.isclose(s1, s2))
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[100, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 100, 10, 4, 2],
)
expected = cudf.Series(
[False, True, True, False, True, False], index=s1.index
)
assert_eq(expected, cudf.isclose(s1, s2))
def test_dataframe_to_dict_error():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [9, 5, 3]})
with pytest.raises(
TypeError,
match=re.escape(
r"cuDF does not support conversion to host memory "
r"via `to_dict()` method. Consider using "
r"`.to_pandas().to_dict()` to construct a Python dictionary."
),
):
df.to_dict()
with pytest.raises(
TypeError,
match=re.escape(
r"cuDF does not support conversion to host memory "
r"via `to_dict()` method. Consider using "
r"`.to_pandas().to_dict()` to construct a Python dictionary."
),
):
df["a"].to_dict()
@pytest.mark.parametrize(
"df",
[
pd.DataFrame({"a": [1, 2, 3, 4, 5, 10, 11, 12, 33, 55, 19]}),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
}
),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
},
index=[10, 20, 30, 40, 50, 60],
),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
},
index=["a", "b", "c", "d", "e", "f"],
),
pd.DataFrame(index=["a", "b", "c", "d", "e", "f"]),
pd.DataFrame(columns=["a", "b", "c", "d", "e", "f"]),
pd.DataFrame(index=[10, 11, 12]),
pd.DataFrame(columns=[10, 11, 12]),
pd.DataFrame(),
pd.DataFrame({"one": [], "two": []}),
pd.DataFrame({2: [], 1: []}),
pd.DataFrame(
{
0: [1, 2, 3, 4, 5, 10],
1: ["abc", "def", "ghi", "xyz", "pqr", "abc"],
100: ["a", "b", "b", "x", "z", "a"],
},
index=[10, 20, 30, 40, 50, 60],
),
],
)
def test_dataframe_keys(df):
gdf = cudf.from_pandas(df)
assert_eq(df.keys(), gdf.keys())
@pytest.mark.parametrize(
"ps",
[
pd.Series([1, 2, 3, 4, 5, 10, 11, 12, 33, 55, 19]),
pd.Series(["abc", "def", "ghi", "xyz", "pqr", "abc"]),
pd.Series(
[1, 2, 3, 4, 5, 10],
index=["abc", "def", "ghi", "xyz", "pqr", "abc"],
),
pd.Series(
["abc", "def", "ghi", "xyz", "pqr", "abc"],
index=[1, 2, 3, 4, 5, 10],
),
pd.Series(index=["a", "b", "c", "d", "e", "f"], dtype="float64"),
pd.Series(index=[10, 11, 12], dtype="float64"),
pd.Series(dtype="float64"),
|
pd.Series([], dtype="float64")
|
pandas.Series
|
"""
Usage:
Specify paths to:
training and test sets;
screenshots screen size;
CSV input path and output paths.
Outputs two CSV files (training and test), with labels and positions.
These CSV files will be used to generate TFRecord format files.
"""
import pandas as pd
import os
from collections import OrderedDict
# set paths to training and test sets
training_set_path = os.path.join('.', 'training_set')
test_set_path = os.path.join('.', 'test_set')
train_fnames = os.listdir(training_set_path)
test_fnames = os.listdir(test_set_path)
# screenshots size, this could be inferred but since I took it localy I know the values
screen_width = 1366
screen_height = 768
# CSV file names
window_positions_path = 'windows_positions.csv'
training_labels_path = 'training_labels.csv' # output fname
test_labels_path = 'test_labels.csv' # output fname
# import window position data
windows_positions = pd.read_csv(
window_positions_path,
header=0)
# split to training and test labels
training_set_labels = []
test_set_labels = []
for window_ind, window_row in windows_positions.iterrows():
row = OrderedDict()
row['filename'] = str(window_row.observation_id) + '.png'
row['width'] = screen_width
row['height'] = screen_height
row['class'] = 'Chrome'
row['xmin'] = window_row.pos_x
row['ymin'] = window_row.pos_y
row['xmax'] = window_row.pos_x2
row['ymax'] = window_row.pos_y2
# test if this should go to training or testing CSV
if row['filename'] in train_fnames:
training_set_labels.append(row)
elif row['filename'] in test_fnames:
test_set_labels.append(row)
else:
raise ValueError('Encoutered unknown file')
# convert OrderedDict to df
training_set_labels =
|
pd.DataFrame(training_set_labels)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""Functions for creating an oemof energy system.
---
@ <NAME> - <EMAIL>, 27.01.2020
"""
import os
import pandas as pd
import logging
def import_scenario(filepath):
"""Imports data from a spreadsheet scenario file.
The excel sheet has to contain the following sheets:
- energysystem
- buses
- transformers
- sinks
- sources
- storages
- powerlines
- time_series
----
Keyword arguments:
filepath : obj:'str'
-- path to excel scenario file
----
Returns:
nodes_data : obj:'dict'
-- dictionary containing data from excel scenario file
----
@ <NAME> - <EMAIL>, 05.03.2020
"""
from oemof.tools import logger
# reads node data from Excel sheet
if not filepath or not os.path.isfile(filepath):
raise FileNotFoundError(
'Excel data file {} not found.'.format(filepath))
# creates nodes from excel sheet
xls = pd.ExcelFile(filepath)
nd = {'buses': xls.parse('buses'),
'transformers': xls.parse('transformers'),
'demand': xls.parse('sinks'),
'storages': xls.parse('storages'),
'links': xls.parse('links'),
'timeseries': xls.parse('time_series'),
'energysystem': xls.parse('energysystem'),
'sources': xls.parse('sources')
#'constraints': xls.parse('constraints')
}
# error message, if no nodes are provided
if not nd:
raise ValueError('No nodes data provided.')
# returns logging info
logger.define_logging()
logging.info('Spreadsheet scenario successfully imported.')
# returns nodes
return nd
def define_energy_system(nodes_data):
"""Creates an energy system.
Creates an energy system with the parameters defined in the given
.xlsx-file. The file has to contain a sheet called "energysystem",
which has to be structured as follows:
|start_date |end_date |temporal resolution|
|-------------------|-------------------|-------------------|
|YYYY-MM-DD hh:mm:ss|YYYY-MM-DD hh:mm:ss|h |
----
Keyword arguments:
nodes_data : obj:'dict'
-- dictionary containing data from excel scenario file
----
Returns:
esys : obj:'dict'
-- oemof energy system
----
@ <NAME> - <EMAIL>, 05.03.2020
"""
from oemof import solph
# Importing energysystem parameters from the scenario
ts = next(nodes_data['energysystem'].iterrows())[1]
temp_resolution = ts['temporal resolution']
start_date = ts['start date']
end_date = ts['end date']
# creates time index
datetime_index = pd.date_range(start_date, end_date, freq=temp_resolution)
# initialisation of the energy system
esys = solph.EnergySystem(timeindex=datetime_index)
# defines a time series
nodes_data['timeseries'].set_index('timestamp', inplace=True)
nodes_data['timeseries'].index = pd.to_datetime(
nodes_data['timeseries'].index)
# returns logging info
logging.info(
'Date time index successfully defined:\n start date: '
+ str(start_date)
+ ',\n end date: '
+ str(end_date)
+ ',\n temporal resolution: '
+ str(temp_resolution))
# returns oemof energy system as result of this function
return esys
def format_weather_dataset(filepath):
"""
The feedinlib can only read .csv data sets, so the weather data from
the .xlsx scenario file have to be converted into a .csv data set
and saved
----
Keyword arguments:
filepath: obj:'str'
-- -- path to excel scenario file
"""
# The feedinlib can only read .csv data sets, so the weather data
# from the .xlsx scenario file have to be converted into a
# .csv data set and saved
read_file =
|
pd.read_excel(filepath, sheet_name='weather data')
|
pandas.read_excel
|
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with tm.assertRaises(TypeError):
dti + dti
with tm.assertRaises(TypeError):
dti_tz + dti_tz
with tm.assertRaises(TypeError):
dti_tz + dti
with tm.assertRaises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 =
|
pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
|
pandas.date_range
|
import pandas as pd
import util_functions as uf
import os
import numpy as np
def create_dockless_trips(cur):
# This script creates the dockless trips received May, 18, 2018 AWS table
cur.execute("""
DROP TABLE IF EXISTS dockless_trips;
CREATE TABLE dockless_trips(
duration_in_minute numeric,
EndLatitude numeric,
EndLongitude numeric,
end_time timestamp,
endutc timestamp,
EndWard numeric,
Distance numeric,
MilesMoved numeric,
Operator varchar(50),
StartLatitude numeric,
StartLongitude numeric,
start_time timestamp,
startutc timestamp,
StartWard numeric,
TripID varchar(50) ,
TripDistance numeric,
UserID varchar(50),
BikeID varchar(50),
UniqueTripID varchar(50) PRIMARY KEY,
OperatorClean varchar(50)
)
""")
def patch_user_id():
# Userid is missing for ~15,000 limebike records in most recent file provided by DDOT. Patch with older file
patch_df =
|
pd.read_sql("""select DISTINCT
userid::text as user_id_patch,
startutc,
endutc,
operatorclean,
startlatitude::text as start_lat,
startlongitude::text as start_lon,
endlatitude::text as end_lat,
endlongitude::text as end_lon
FROM dockless_trips_org
WHERE OperatorClean='lime';
""", con=conn)
|
pandas.read_sql
|
import gym
from gym import error, spaces, utils
from gym.utils import seeding
import os
import time
import numpy as np
from numpy import random
import pandas as pd
import logging
import pdb
import tempfile
from sqlalchemy import create_engine
from sklearn.preprocessing import MinMaxScaler
from Config import Config
conf = Config()
log = logging.getLogger(__name__)
log.info('%s logger started.',__name__)
def load_data(instrument, train):
if train:
data_path = conf.TRAINING_DATA_PATH
csv_path = os.path.join(data_path, instrument + conf.csv_file)
if conf.num_of_rows_read > 0:
return pd.read_csv(csv_path, sep=';', nrows=conf.num_of_rows_read)
else:
return
|
pd.read_csv(csv_path, sep=';')
|
pandas.read_csv
|
#standard python libraries
import json
import atexit
import datetime
import os
import warnings
import math
import shutil
import joblib
#external libraries
from binance.client import Client
import numpy as np
import pandas as pd
import ta
from sklearn import preprocessing
import torch
#external methods
from utils import read_config, read_json
from hyperparameters import HyperParameters, CandlestickInterval, Derivation, Scaling, Balancing, Shuffle, ScalerType
class dbid():
"""
Description:
Class which can be used like a dictionary.
This Class is not threadsafe! The changes to the dictionary, only get written to disk when the instance goes out of scope!
Arguments:
-path (string): Path of the database
"""
def __init__(self, path):
self.path = f"{path}/dbid.json"
#load in the dbid
with open(self.path) as json_file:
self.dbid = json.load(json_file)
#register the dump at the end of lifetime
atexit.register(self.dump)
def __getitem__(self, key):
return self.dbid[key]
def __setitem__(self, key, item):
#change the dict in ram
self.dbid[key] = item
def dump(self):
#save changes to json file
with open(self.path, 'w') as fp:
json.dump(self.dbid, fp, indent=4)
class DataBase():
"""
Description:
This is the base Database class, on which every other Database Objects builds upon.
Arguments:
-path[string]: Path of the Database
"""
def __init__(self, path):
#save the params
self.path = path
#check if the path exists and is a database
if not os.path.isdir(path):
raise Exception("The path you chose is not existing")
if not os.path.isfile(f"{path}/dbid.json"):
raise Exception("The path you chose is not a DataBase")
#setup dbid
self.dbid = dbid(path=self.path)
def __getitem__(self, index):
"""
Description:
Method for accessing data of the database. The access is direct from the harddrive (slower but more memory efficient)
Arguments:
-index[string, list]: Generally: [candlestick_interval, list of features]. To access the whole dataframe only specify the candlestick_interval you want e.g. db["5m"].
To access only one feature specify the datatype and the feature you want e.g. db["5m", "close"]
To access multiple features specify the datatype and a list of features you want e.g. db["5m", ["close", "open"]]
Return:
-data[pd.DataFrame]: Returns always a DataFrame in the shape (rows, number of specified features)
"""
#make sure that candlestick interval is of type CandlestickInterval
if type(index) == tuple:
if not isinstance(index[0], CandlestickInterval):
raise Exception(f"Make sure your candlestick interval is of type CandlestickInterval and not {type(index[0])}")
elif not isinstance(index, CandlestickInterval):
raise Exception(f"Make sure your candlestick interval is of type CandlestickInterval and not {type(index)}")
#set the path
if type(index) == tuple:
path = f"{self.path}/{index[0].value}"
elif isinstance(index, CandlestickInterval):
path = f"{self.path}/{index.value}"
else:
raise Exception("Your chosen index is not valid")
#check if path is available
if not os.path.isdir(path):
raise Exception("Your chosen kline-interval is not available")
#access whole dataframe of certain kline-interval
if isinstance(index, CandlestickInterval):
#load in the data and return
try:
data = pd.read_csv(filepath_or_buffer=f"{path}/{index.value}", index_col="index")
#convert the date columns
data["close_time"]= pd.to_datetime(data["close_time"])
data["open_time"]=
|
pd.to_datetime(data["open_time"])
|
pandas.to_datetime
|
import vectorbt as vbt
import numpy as np
import pandas as pd
from numba import njit
from datetime import datetime
import pytest
from vectorbt.generic import nb as generic_nb
from vectorbt.generic.enums import range_dt
from tests.utils import record_arrays_close
seed = 42
day_dt = np.timedelta64(86400000000000)
mask = pd.DataFrame([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]), columns=['a', 'b', 'c'])
ts = pd.Series([1., 2., 3., 2., 1.], index=mask.index)
price = pd.DataFrame({
'open': [10, 11, 12, 11, 10],
'high': [11, 12, 13, 12, 11],
'low': [9, 10, 11, 10, 9],
'close': [11, 12, 11, 10, 9]
})
group_by = pd.Index(['g1', 'g1', 'g2'])
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# accessors.py ############# #
class TestAccessors:
def test_indexing(self):
assert mask.vbt.signals['a'].total() == mask['a'].vbt.signals.total()
def test_freq(self):
assert mask.vbt.signals.wrapper.freq == day_dt
assert mask['a'].vbt.signals.wrapper.freq == day_dt
assert mask.vbt.signals(freq='2D').wrapper.freq == day_dt * 2
assert mask['a'].vbt.signals(freq='2D').wrapper.freq == day_dt * 2
assert pd.Series([False, True]).vbt.signals.wrapper.freq is None
assert pd.Series([False, True]).vbt.signals(freq='3D').wrapper.freq == day_dt * 3
assert pd.Series([False, True]).vbt.signals(freq=np.timedelta64(4, 'D')).wrapper.freq == day_dt * 4
@pytest.mark.parametrize(
"test_n",
[1, 2, 3, 4, 5],
)
def test_fshift(self, test_n):
pd.testing.assert_series_equal(mask['a'].vbt.signals.fshift(test_n), mask['a'].shift(test_n, fill_value=False))
np.testing.assert_array_equal(
mask['a'].vbt.signals.fshift(test_n).values,
generic_nb.fshift_1d_nb(mask['a'].values, test_n, fill_value=False)
)
pd.testing.assert_frame_equal(mask.vbt.signals.fshift(test_n), mask.shift(test_n, fill_value=False))
@pytest.mark.parametrize(
"test_n",
[1, 2, 3, 4, 5],
)
def test_bshift(self, test_n):
pd.testing.assert_series_equal(
mask['a'].vbt.signals.bshift(test_n),
mask['a'].shift(-test_n, fill_value=False))
np.testing.assert_array_equal(
mask['a'].vbt.signals.bshift(test_n).values,
generic_nb.bshift_1d_nb(mask['a'].values, test_n, fill_value=False)
)
pd.testing.assert_frame_equal(mask.vbt.signals.bshift(test_n), mask.shift(-test_n, fill_value=False))
def test_empty(self):
pd.testing.assert_series_equal(
pd.Series.vbt.signals.empty(5, index=np.arange(10, 15), name='a'),
pd.Series(np.full(5, False), index=np.arange(10, 15), name='a')
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.empty((5, 3), index=np.arange(10, 15), columns=['a', 'b', 'c']),
pd.DataFrame(np.full((5, 3), False), index=np.arange(10, 15), columns=['a', 'b', 'c'])
)
pd.testing.assert_series_equal(
pd.Series.vbt.signals.empty_like(mask['a']),
pd.Series(np.full(mask['a'].shape, False), index=mask['a'].index, name=mask['a'].name)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.empty_like(mask),
pd.DataFrame(np.full(mask.shape, False), index=mask.index, columns=mask.columns)
)
def test_generate(self):
@njit
def choice_func_nb(from_i, to_i, col, n):
if col == 0:
return np.arange(from_i, to_i)
elif col == 1:
return np.full(1, from_i)
else:
return np.full(1, to_i - n)
pd.testing.assert_series_equal(
pd.Series.vbt.signals.generate(5, choice_func_nb, 1, index=mask['a'].index, name=mask['a'].name),
pd.Series(
np.array([True, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
with pytest.raises(Exception):
_ = pd.Series.vbt.signals.generate((5, 2), choice_func_nb, 1)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate(
(5, 3), choice_func_nb, 1, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[True, True, False],
[True, False, False],
[True, False, False],
[True, False, False],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate(
(5, 3), choice_func_nb, 1, pick_first=True, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[True, True, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
def test_generate_both(self):
@njit
def entry_func_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
return temp_int[:1]
@njit
def exit_func_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
return temp_int[:1]
temp_int = np.empty((mask.shape[0],), dtype=np.int_)
en, ex = pd.Series.vbt.signals.generate_both(
5, entry_func_nb, (temp_int,), exit_func_nb, (temp_int,),
index=mask['a'].index, name=mask['a'].name)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, False, True, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, False, True, False]),
index=mask['a'].index,
name=mask['a'].name
)
)
en, ex = pd.DataFrame.vbt.signals.generate_both(
(5, 3), entry_func_nb, (temp_int,), exit_func_nb, (temp_int,),
index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[True, True, True],
[False, False, False],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[False, False, False],
[True, True, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.Series.vbt.signals.generate_both(
(5,), entry_func_nb, (temp_int,), exit_func_nb, (temp_int,),
index=mask['a'].index, name=mask['a'].name, entry_wait=1, exit_wait=0)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([True, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
en, ex = pd.Series.vbt.signals.generate_both(
(5,), entry_func_nb, (temp_int,), exit_func_nb, (temp_int,),
index=mask['a'].index, name=mask['a'].name, entry_wait=0, exit_wait=1)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
@njit
def entry_func2_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
if from_i + 1 < to_i:
temp_int[1] = from_i + 1
return temp_int[:2]
return temp_int[:1]
@njit
def exit_func2_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
if from_i + 1 < to_i:
temp_int[1] = from_i + 1
return temp_int[:2]
return temp_int[:1]
en, ex = pd.DataFrame.vbt.signals.generate_both(
(5, 3), entry_func2_nb, (temp_int,), exit_func2_nb, (temp_int,),
entry_pick_first=False, exit_pick_first=False,
index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[False, False, False],
[False, False, False],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[True, True, True],
[True, True, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
def test_generate_exits(self):
@njit
def choice_func_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
return temp_int[:1]
temp_int = np.empty((mask.shape[0],), dtype=np.int_)
pd.testing.assert_series_equal(
mask['a'].vbt.signals.generate_exits(choice_func_nb, temp_int),
pd.Series(
np.array([False, True, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_exits(choice_func_nb, temp_int),
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_exits(choice_func_nb, temp_int, wait=0),
pd.DataFrame(
np.array([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
]),
index=mask.index,
columns=mask.columns
)
)
@njit
def choice_func2_nb(from_i, to_i, col, temp_int):
for i in range(from_i, to_i):
temp_int[i - from_i] = i
return temp_int[:to_i - from_i]
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_exits(choice_func2_nb, temp_int, until_next=False, pick_first=False),
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[True, True, False],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
mask2 = pd.Series([True, True, True, True, True], index=mask.index)
pd.testing.assert_series_equal(
mask2.vbt.signals.generate_exits(choice_func_nb, temp_int, until_next=False, skip_until_exit=True),
pd.Series(
np.array([False, True, False, True, False]),
index=mask.index
)
)
def test_clean(self):
entries = pd.DataFrame([
[True, False, True],
[True, False, False],
[True, True, True],
[False, True, False],
[False, True, True]
], index=mask.index, columns=mask.columns)
exits = pd.Series([True, False, True, False, True], index=mask.index)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(),
pd.DataFrame(
np.array([
[True, False, True],
[False, False, False],
[False, True, True],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.clean(entries),
pd.DataFrame(
np.array([
[True, False, True],
[False, False, False],
[False, True, True],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits)[0],
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits)[1],
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits, entry_first=False)[0],
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits, entry_first=False)[1],
pd.DataFrame(
np.array([
[False, True, False],
[False, False, False],
[False, False, False],
[False, False, False],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.clean(entries, exits)[0],
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.clean(entries, exits)[1],
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
with pytest.raises(Exception):
_ = pd.Series.vbt.signals.clean(entries, entries, entries)
def test_generate_random(self):
pd.testing.assert_series_equal(
pd.Series.vbt.signals.generate_random(
5, n=3, seed=seed, index=mask['a'].index, name=mask['a'].name),
pd.Series(
np.array([False, True, True, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
with pytest.raises(Exception):
_ = pd.Series.vbt.signals.generate_random((5, 2), n=3)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), n=3, seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[False, False, True],
[True, True, True],
[True, True, False],
[False, True, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), n=[0, 1, 2], seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[False, False, True],
[False, False, True],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_series_equal(
pd.Series.vbt.signals.generate_random(
5, prob=0.5, seed=seed, index=mask['a'].index, name=mask['a'].name),
pd.Series(
np.array([True, False, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
with pytest.raises(Exception):
_ = pd.Series.vbt.signals.generate_random((5, 2), prob=3)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), prob=0.5, seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[True, True, True],
[False, True, False],
[False, False, False],
[False, False, True],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), prob=[0., 0.5, 1.], seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[False, True, True],
[False, True, True],
[False, False, True],
[False, False, True],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
with pytest.raises(Exception):
pd.DataFrame.vbt.signals.generate_random((5, 3))
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), prob=[0., 0.5, 1.], pick_first=True, seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[False, True, True],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
def test_generate_random_both(self):
# n
en, ex = pd.Series.vbt.signals.generate_random_both(
5, n=2, seed=seed, index=mask['a'].index, name=mask['a'].name)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, False, True, False, False]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), n=2, seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[True, True, False],
[False, False, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[False, False, False],
[False, True, False],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), n=[0, 1, 2], seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[False, False, True],
[False, True, False],
[False, False, False],
[False, False, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, False, True],
[False, False, False],
[False, True, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both((2, 3), n=2, seed=seed, entry_wait=1, exit_wait=0)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
])
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True]
])
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both((3, 3), n=2, seed=seed, entry_wait=0, exit_wait=1)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[False, False, False]
])
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[True, True, True],
])
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both((7, 3), n=2, seed=seed, entry_wait=2, exit_wait=2)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, False],
[True, True, True],
[False, False, False],
[False, False, False]
])
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, False],
[True, True, True]
])
)
)
n = 10
a = np.full(n * 2, 0.)
for i in range(10000):
en, ex = pd.Series.vbt.signals.generate_random_both(1000, n, entry_wait=2, exit_wait=2)
_a = np.empty((n * 2,), dtype=np.int_)
_a[0::2] = np.flatnonzero(en)
_a[1::2] = np.flatnonzero(ex)
a += _a
greater = a > 10000000 / (2 * n + 1) * np.arange(0, 2 * n)
less = a < 10000000 / (2 * n + 1) * np.arange(2, 2 * n + 2)
assert np.all(greater & less)
# probs
en, ex = pd.Series.vbt.signals.generate_random_both(
5, entry_prob=0.5, exit_prob=1., seed=seed, index=mask['a'].index, name=mask['a'].name)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, False, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, False, False, False]),
index=mask['a'].index,
name=mask['a'].name
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=0.5, exit_prob=1., seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=[0., 0.5, 1.], exit_prob=[0., 0.5, 1.],
seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[False, True, True],
[False, False, False],
[False, False, True],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, True, True],
[False, False, False],
[False, False, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=1., exit_prob=1., exit_wait=0,
seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=1., exit_prob=1., entry_pick_first=False, exit_pick_first=True,
seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=1., exit_prob=1., entry_pick_first=True, exit_pick_first=False,
seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
# none
with pytest.raises(Exception):
pd.DataFrame.vbt.signals.generate_random((5, 3))
def test_generate_random_exits(self):
pd.testing.assert_series_equal(
mask['a'].vbt.signals.generate_random_exits(seed=seed),
pd.Series(
np.array([False, False, True, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(seed=seed),
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[True, True, False],
[False, False, False],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(seed=seed, wait=0),
pd.DataFrame(
np.array([
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, True],
[True, True, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_series_equal(
mask['a'].vbt.signals.generate_random_exits(prob=1., seed=seed),
pd.Series(
np.array([False, True, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(prob=1., seed=seed),
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(prob=[0., 0.5, 1.], seed=seed),
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, True, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(prob=1., wait=0, seed=seed),
pd.DataFrame(
np.array([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(prob=1., until_next=False, seed=seed),
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
def test_generate_stop_exits(self):
e = pd.Series([True, False, False, False, False, False])
t = pd.Series([2, 3, 4, 3, 2, 1]).astype(np.float64)
# stop loss
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(t, -0.1),
pd.Series(np.array([False, False, False, False, False, True]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True),
pd.Series(np.array([False, False, False, True, False, False]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, pick_first=False),
pd.Series(np.array([False, False, False, True, True, True]))
)
pd.testing.assert_frame_equal(
e.vbt.signals.generate_stop_exits(t.vbt.tile(3), [np.nan, -0.5, -1.], trailing=True, pick_first=False),
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, True, False],
[False, True, False]
]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, exit_wait=3),
pd.Series(np.array([False, False, False, False, True, False]))
)
# take profit
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(4 - t, 0.1),
pd.Series(np.array([False, False, False, False, False, True]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(4 - t, 0.1, trailing=True),
pd.Series(np.array([False, False, False, True, False, False]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(4 - t, 0.1, trailing=True, pick_first=False),
pd.Series(np.array([False, False, False, True, True, True]))
)
pd.testing.assert_frame_equal(
e.vbt.signals.generate_stop_exits((4 - t).vbt.tile(3), [np.nan, 0.5, 1.], trailing=True, pick_first=False),
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, True, True],
[False, True, True],
[False, True, True]
]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(4 - t, 0.1, trailing=True, exit_wait=3),
pd.Series(np.array([False, False, False, False, True, False]))
)
# chain
e = pd.Series([True, True, True, True, True, True])
en, ex = e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, chain=True)
pd.testing.assert_series_equal(
en,
pd.Series(np.array([True, False, False, False, True, False]))
)
pd.testing.assert_series_equal(
ex,
pd.Series(np.array([False, False, False, True, False, True]))
)
en, ex = e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, entry_wait=2, chain=True)
pd.testing.assert_series_equal(
en,
pd.Series(np.array([True, False, False, False, False, True]))
)
pd.testing.assert_series_equal(
ex,
pd.Series(np.array([False, False, False, True, False, False]))
)
en, ex = e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, exit_wait=2, chain=True)
pd.testing.assert_series_equal(
en,
pd.Series(np.array([True, False, False, False, True, False]))
)
pd.testing.assert_series_equal(
ex,
pd.Series(np.array([False, False, False, True, False, False]))
)
# until_next and pick_first
e2 = pd.Series([True, True, True, True, True, True])
t2 = pd.Series([6, 5, 4, 3, 2, 1]).astype(np.float64)
ex = e2.vbt.signals.generate_stop_exits(t2, -0.1, until_next=False, pick_first=False)
pd.testing.assert_series_equal(
ex,
pd.Series(np.array([False, True, True, True, True, True]))
)
def test_generate_ohlc_stop_exits(self):
with pytest.raises(Exception):
_ = mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=-0.1)
with pytest.raises(Exception):
_ = mask.vbt.signals.generate_ohlc_stop_exits(ts, tp_stop=-0.1)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, -0.1),
mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=0.1)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, -0.1, trailing=True),
mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=0.1, sl_trail=True)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, 0.1),
mask.vbt.signals.generate_ohlc_stop_exits(ts, tp_stop=0.1)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, 0.1),
mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=0.1, reverse=True)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, 0.1, trailing=True),
mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=0.1, sl_trail=True, reverse=True)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, -0.1),
mask.vbt.signals.generate_ohlc_stop_exits(ts, tp_stop=0.1, reverse=True)
)
def _test_ohlc_stop_exits(**kwargs):
out_dict = {'stop_price': np.nan, 'stop_type': -1}
result = mask.vbt.signals.generate_ohlc_stop_exits(
price['open'], price['high'], price['low'], price['close'],
out_dict=out_dict, **kwargs
)
if isinstance(result, tuple):
_, ex = result
else:
ex = result
return result, out_dict['stop_price'], out_dict['stop_type']
ex, stop_price, stop_type = _test_ohlc_stop_exits()
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(sl_stop=0.1)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, True],
[True, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 10.8],
[9.9, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, 0],
[0, -1, -1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(sl_stop=0.1, sl_trail=True)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, True, True],
[True, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 11.7, 10.8],
[9.9, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1],
[-1, 1, 1],
[1, -1, -1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(tp_stop=0.1)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, False],
[False, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[11.0, np.nan, np.nan],
[np.nan, 12.1, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[2, -1, -1],
[-1, 2, -1],
[-1, -1, -1],
[-1, -1, -1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(sl_stop=0.1, sl_trail=True, tp_stop=0.1)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[11.0, np.nan, np.nan],
[np.nan, 12.1, np.nan],
[np.nan, np.nan, 10.8],
[9.9, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[2, -1, -1],
[-1, 2, -1],
[-1, -1, 1],
[1, -1, -1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(
sl_stop=[np.nan, 0.1, 0.2], sl_trail=True, tp_stop=[np.nan, 0.1, 0.2])
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, True, False],
[False, False, False],
[False, False, True]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 12.1, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 9.6]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[-1, -1, -1],
[-1, 2, -1],
[-1, -1, -1],
[-1, -1, 1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(sl_stop=0.1, sl_trail=True, tp_stop=0.1, exit_wait=0)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, True],
[True, True, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[9.0, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 12.1, np.nan],
[np.nan, np.nan, 11.7],
[10.8, 9.0, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[1, -1, -1],
[-1, -1, -1],
[-1, 2, -1],
[-1, -1, 1],
[1, 1, -1]
]), index=mask.index, columns=mask.columns)
)
(en, ex), stop_price, stop_type = _test_ohlc_stop_exits(
sl_stop=0.1, sl_trail=True, tp_stop=0.1, chain=True)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(np.array([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[11.0, np.nan, np.nan],
[np.nan, 12.1, np.nan],
[np.nan, np.nan, 10.8],
[9.9, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[2, -1, -1],
[-1, 2, -1],
[-1, -1, 1],
[1, -1, -1]
]), index=mask.index, columns=mask.columns)
)
def test_between_ranges(self):
ranges = mask.vbt.signals.between_ranges()
record_arrays_close(
ranges.values,
np.array([
(0, 0, 0, 3, 1), (1, 1, 1, 4, 1)
], dtype=range_dt)
)
assert ranges.wrapper == mask.vbt.wrapper
mask2 = pd.DataFrame([
[True, True, True],
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, False]
], index=mask.index, columns=mask.columns)
other_mask = pd.DataFrame([
[False, False, False],
[True, False, False],
[True, True, False],
[False, True, True],
[False, False, True]
], index=mask.index, columns=mask.columns)
ranges = mask2.vbt.signals.between_ranges(other=other_mask)
record_arrays_close(
ranges.values,
np.array([
(0, 0, 0, 1, 1), (1, 0, 1, 1, 1), (2, 1, 0, 2, 1),
(3, 1, 1, 2, 1), (4, 2, 0, 3, 1), (5, 2, 1, 3, 1)
], dtype=range_dt)
)
assert ranges.wrapper == mask2.vbt.wrapper
ranges = mask2.vbt.signals.between_ranges(other=other_mask, from_other=True)
record_arrays_close(
ranges.values,
np.array([
(0, 0, 1, 1, 1), (1, 0, 1, 2, 1), (2, 1, 1, 2, 1),
(3, 1, 1, 3, 1), (4, 2, 1, 3, 1), (5, 2, 1, 4, 1)
], dtype=range_dt)
)
assert ranges.wrapper == mask2.vbt.wrapper
def test_partition_ranges(self):
mask2 = pd.DataFrame([
[False, False, False],
[True, False, False],
[True, True, False],
[False, True, True],
[True, False, True]
], index=mask.index, columns=mask.columns)
ranges = mask2.vbt.signals.partition_ranges()
record_arrays_close(
ranges.values,
np.array([
(0, 0, 1, 3, 1), (1, 0, 4, 4, 0), (2, 1, 2, 4, 1), (3, 2, 3, 4, 0)
], dtype=range_dt)
)
assert ranges.wrapper == mask2.vbt.wrapper
def test_between_partition_ranges(self):
mask2 = pd.DataFrame([
[True, False, False],
[True, True, False],
[False, True, True],
[True, False, True],
[False, True, False]
], index=mask.index, columns=mask.columns)
ranges = mask2.vbt.signals.between_partition_ranges()
record_arrays_close(
ranges.values,
np.array([
(0, 0, 1, 3, 1), (1, 1, 2, 4, 1)
], dtype=range_dt)
)
assert ranges.wrapper == mask2.vbt.wrapper
def test_pos_rank(self):
pd.testing.assert_series_equal(
(~mask['a']).vbt.signals.pos_rank(),
pd.Series([-1, 0, 1, -1, 0], index=mask['a'].index, name=mask['a'].name)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.pos_rank(),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 1],
[1, 0, -1],
[-1, 1, 0],
[0, -1, 1]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.pos_rank(after_false=True),
pd.DataFrame(
np.array([
[-1, -1, -1],
[0, -1, -1],
[1, 0, -1],
[-1, 1, 0],
[0, -1, 1]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.pos_rank(allow_gaps=True),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 1],
[1, 1, -1],
[-1, 2, 2],
[2, -1, 3]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.pos_rank(reset_by=mask['a'], allow_gaps=True),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 1],
[1, 1, -1],
[-1, 0, 0],
[0, -1, 1]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.pos_rank(reset_by=mask, allow_gaps=True),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 1],
[1, 0, -1],
[-1, 1, 0],
[0, -1, 1]
]),
index=mask.index,
columns=mask.columns
)
)
def test_partition_pos_rank(self):
pd.testing.assert_series_equal(
(~mask['a']).vbt.signals.partition_pos_rank(),
pd.Series([-1, 0, 0, -1, 1], index=mask['a'].index, name=mask['a'].name)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.partition_pos_rank(),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 0],
[0, 1, -1],
[-1, 1, 1],
[1, -1, 1]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.partition_pos_rank(after_false=True),
pd.DataFrame(
np.array([
[-1, -1, -1],
[0, -1, -1],
[0, 0, -1],
[-1, 0, 0],
[1, -1, 0]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.partition_pos_rank(reset_by=mask['a']),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 0],
[0, 1, -1],
[-1, 0, 0],
[0, -1, 0]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.partition_pos_rank(reset_by=mask),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 0],
[0, 0, -1],
[-1, 0, 0],
[0, -1, 0]
]),
index=mask.index,
columns=mask.columns
)
)
def test_pos_rank_fns(self):
pd.testing.assert_frame_equal(
(~mask).vbt.signals.first(),
pd.DataFrame(
np.array([
[False, True, True],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.nth(1),
pd.DataFrame(
np.array([
[False, False, False],
[False, False, True],
[True, False, False],
[False, True, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.nth(2),
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.from_nth(0),
pd.DataFrame(
np.array([
[False, True, True],
[True, False, True],
[True, True, False],
[False, True, True],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
def test_pos_rank_mapped(self):
mask2 = pd.DataFrame([
[True, False, False],
[True, True, False],
[False, True, True],
[True, False, True],
[False, True, False]
], index=mask.index, columns=mask.columns)
mapped = mask2.vbt.signals.pos_rank_mapped()
np.testing.assert_array_equal(
mapped.values,
np.array([0, 1, 0, 0, 1, 0, 0, 1])
)
np.testing.assert_array_equal(
mapped.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2])
)
np.testing.assert_array_equal(
mapped.idx_arr,
np.array([0, 1, 3, 1, 2, 4, 2, 3])
)
assert mapped.wrapper == mask2.vbt.wrapper
def test_partition_pos_rank_mapped(self):
mask2 = pd.DataFrame([
[True, False, False],
[True, True, False],
[False, True, True],
[True, False, True],
[False, True, False]
], index=mask.index, columns=mask.columns)
mapped = mask2.vbt.signals.partition_pos_rank_mapped()
np.testing.assert_array_equal(
mapped.values,
np.array([0, 0, 1, 0, 0, 1, 0, 0])
)
np.testing.assert_array_equal(
mapped.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2])
)
np.testing.assert_array_equal(
mapped.idx_arr,
np.array([0, 1, 3, 1, 2, 4, 2, 3])
)
assert mapped.wrapper == mask2.vbt.wrapper
def test_nth_index(self):
assert mask['a'].vbt.signals.nth_index(0) == pd.Timestamp('2020-01-01 00:00:00')
pd.testing.assert_series_equal(
mask.vbt.signals.nth_index(0),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-02 00:00:00'),
pd.Timestamp('2020-01-03 00:00:00')
], index=mask.columns, name='nth_index', dtype='datetime64[ns]')
)
pd.testing.assert_series_equal(
mask.vbt.signals.nth_index(-1),
pd.Series([
pd.Timestamp('2020-01-04 00:00:00'),
pd.Timestamp('2020-01-05 00:00:00'),
pd.Timestamp('2020-01-03 00:00:00')
], index=mask.columns, name='nth_index', dtype='datetime64[ns]')
)
pd.testing.assert_series_equal(
mask.vbt.signals.nth_index(-2),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-02 00:00:00'),
np.nan
], index=mask.columns, name='nth_index', dtype='datetime64[ns]')
)
pd.testing.assert_series_equal(
mask.vbt.signals.nth_index(0, group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-03 00:00:00')
], index=['g1', 'g2'], name='nth_index', dtype='datetime64[ns]')
)
pd.testing.assert_series_equal(
mask.vbt.signals.nth_index(-1, group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-05 00:00:00'),
pd.Timestamp('2020-01-03 00:00:00')
], index=['g1', 'g2'], name='nth_index', dtype='datetime64[ns]')
)
def test_norm_avg_index(self):
assert mask['a'].vbt.signals.norm_avg_index() == -0.25
pd.testing.assert_series_equal(
mask.vbt.signals.norm_avg_index(),
pd.Series([-0.25, 0.25, 0.0], index=mask.columns, name='norm_avg_index')
)
pd.testing.assert_series_equal(
mask.vbt.signals.norm_avg_index(group_by=group_by),
pd.Series([0.0, 0.0], index=['g1', 'g2'], name='norm_avg_index')
)
def test_index_mapped(self):
mapped = mask.vbt.signals.index_mapped()
np.testing.assert_array_equal(
mapped.values,
np.array([0, 3, 1, 4, 2])
)
np.testing.assert_array_equal(
mapped.col_arr,
np.array([0, 0, 1, 1, 2])
)
np.testing.assert_array_equal(
mapped.idx_arr,
np.array([0, 3, 1, 4, 2])
)
assert mapped.wrapper == mask.vbt.wrapper
def test_total(self):
assert mask['a'].vbt.signals.total() == 2
pd.testing.assert_series_equal(
mask.vbt.signals.total(),
pd.Series([2, 2, 1], index=mask.columns, name='total')
)
pd.testing.assert_series_equal(
mask.vbt.signals.total(group_by=group_by),
pd.Series([4, 1], index=['g1', 'g2'], name='total')
)
def test_rate(self):
assert mask['a'].vbt.signals.rate() == 0.4
pd.testing.assert_series_equal(
mask.vbt.signals.rate(),
pd.Series([0.4, 0.4, 0.2], index=mask.columns, name='rate')
)
pd.testing.assert_series_equal(
mask.vbt.signals.rate(group_by=group_by),
pd.Series([0.4, 0.2], index=['g1', 'g2'], name='rate')
)
def test_total_partitions(self):
assert mask['a'].vbt.signals.total_partitions() == 2
pd.testing.assert_series_equal(
mask.vbt.signals.total_partitions(),
pd.Series([2, 2, 1], index=mask.columns, name='total_partitions')
)
pd.testing.assert_series_equal(
mask.vbt.signals.total_partitions(group_by=group_by),
pd.Series([4, 1], index=['g1', 'g2'], name='total_partitions')
)
def test_partition_rate(self):
assert mask['a'].vbt.signals.partition_rate() == 1.0
pd.testing.assert_series_equal(
mask.vbt.signals.partition_rate(),
pd.Series([1.0, 1.0, 1.0], index=mask.columns, name='partition_rate')
)
pd.testing.assert_series_equal(
mask.vbt.signals.partition_rate(group_by=group_by),
pd.Series([1.0, 1.0], index=['g1', 'g2'], name='partition_rate')
)
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Total', 'Rate [%]', 'First Index',
'Last Index', 'Norm Avg Index [-1, 1]', 'Distance: Min',
'Distance: Max', 'Distance: Mean', 'Distance: Std', 'Total Partitions',
'Partition Rate [%]', 'Partition Length: Min', 'Partition Length: Max',
'Partition Length: Mean', 'Partition Length: Std',
'Partition Distance: Min', 'Partition Distance: Max',
'Partition Distance: Mean', 'Partition Distance: Std'
], dtype='object')
pd.testing.assert_series_equal(
mask.vbt.signals.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'),
1.6666666666666667,
33.333333333333336,
pd.Timestamp('2020-01-02 00:00:00'),
pd.Timestamp('2020-01-04 00:00:00'),
0.0,
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
np.nan,
1.6666666666666667,
100.0,
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
np.nan
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mask.vbt.signals.stats(column='a'),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'),
2,
40.0,
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-04 00:00:00'),
-0.25,
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
np.nan,
2,
100.0,
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
np.nan
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mask.vbt.signals.stats(column='a', settings=dict(to_timedelta=False)),
pd.Series([
|
pd.Timestamp('2020-01-01 00:00:00')
|
pandas.Timestamp
|
# Copyright (c) 2019-2021 - for information on the respective copyright owner
# see the NOTICE file and/or the repository
# https://github.com/boschresearch/pylife
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy as np
import pandas as pd
import pylife.mesh.meshsignal
def test_plain_mesh_3d():
df = pd.DataFrame({'x': [1.0], 'y': [2.0], 'z': [3.0], 'a': [9.9]})
pd.testing.assert_frame_equal(df.plain_mesh.coordinates,
pd.DataFrame({'x': [1.0], 'y': [2.0], 'z': [3.0]}))
def test_plain_mesh_2d():
df = pd.DataFrame({'x': [1.0], 'y': [2.0], 'b': [3.0], 'a': [9.9]})
pd.testing.assert_frame_equal(df.plain_mesh.coordinates,
pd.DataFrame({'x': [1.0], 'y': [2.0]}))
def test_plain_mesh_3d_dims():
df =
|
pd.DataFrame({'x': [1.0, 2.0], 'y': [2.0, 3.0], 'z': [3.0, 4.0], 'b': [3.0, 3.0]})
|
pandas.DataFrame
|
"""
functions used for EUV/CHD mapping of a full CR
"""
import time
import numpy as np
import datetime
import pandas as pd
from chmap.maps.util.map_manip import combine_cr_maps
import chmap.utilities.plotting.psi_plotting as Plotting
import software.ezseg.ezsegwrapper as ezsegwrapper
import chmap.utilities.datatypes.datatypes as datatypes
import chmap.database.db_funs as db_funcs
import chmap.data.corrections.lbcc.LBCC_theoretic_funcs as lbcc_funcs
import chmap.data.corrections.iit.IIT_pipeline_funcs as iit_funcs
#### STEP ONE: SELECT IMAGES ####
def query_datebase_cr(db_session, query_time_min=None, query_time_max=None, interest_date=None, center=None,
ref_inst=None, cr_rot=None):
if query_time_min and query_time_max is not None:
query_pd = db_funcs.query_euv_images(db_session=db_session, time_min=query_time_min, time_max=query_time_max)
elif cr_rot is not None:
query_pd = db_funcs.query_euv_images_rot(db_session, rot_min=cr_rot, rot_max=cr_rot + 1)
else:
ref_instrument = [ref_inst, ]
euv_images = db_funcs.query_euv_images(db_session, time_min=interest_date + datetime.timedelta(hours=1),
time_max=interest_date + datetime.timedelta(hours=1),
instrument=ref_instrument)
# get min and max carrington rotation
# TODO: really only want one CR_value
cr_rot = euv_images.cr_rot
if center:
query_pd = db_funcs.query_euv_images_rot(db_session, rot_min=cr_rot - 0.5, rot_max=cr_rot + 0.5)
else:
query_pd = db_funcs.query_euv_images_rot(db_session, rot_min=cr_rot, rot_max=cr_rot + 1)
return query_pd
#### STEP TWO: APPLY PRE-PROCESSING CORRECTIONS ####
def apply_ipp(db_session, hdf_data_dir, inst_list, row, methods_list, lbc_combo_query, iit_combo_query,
n_intensity_bins=200, R0=1.01):
start = time.time()
index = row[0]
image_row = row[1]
inst_ind = inst_list.index(image_row.instrument)
# apply LBC
los_image, lbcc_image, mu_indices, use_ind, theoretic_query = lbcc_funcs.apply_lbc(db_session, hdf_data_dir,
lbc_combo_query[inst_ind],
image_row=image_row,
n_intensity_bins=n_intensity_bins,
R0=R0)
# apply IIT
lbcc_image, iit_image, use_indices, alpha, x = iit_funcs.apply_iit(db_session, iit_combo_query[inst_ind],
lbcc_image, use_ind, los_image, R0=R0)
# add methods to dataframe
ipp_method = {'meth_name': ("LBCC", "IIT"), 'meth_description': ["LBCC Theoretic Fit Method", "IIT Fit Method"],
'var_name': ("LBCC", "IIT"), 'var_description': (" ", " ")}
methods_list[index] = methods_list[index].append(pd.DataFrame(data=ipp_method), sort=False)
end = time.time()
print("Image Pre-Processing Corrections (Limb-Brightening and Inter-Instrument Transformation) have been "
"applied to image", image_row.data_id, "in", end - start, "seconds.")
return los_image, iit_image, methods_list, use_indices
#### STEP THREE: CORONAL HOLE DETECTION ####
def chd(db_session, inst_list, los_image, iit_image, use_indices, iit_combo_query, thresh1=0.95, thresh2=1.35, nc=3,
iters=1000):
start = time.time()
# reference alpha, x for threshold
sta_ind = inst_list.index('EUVI-A')
ref_alpha, ref_x = db_funcs.query_var_val(db_session, meth_name='IIT', date_obs=los_image.info['date_string'],
inst_combo_query=iit_combo_query[sta_ind])
# define chd parameters
image_data = iit_image.iit_data
use_chd = use_indices.astype(int)
use_chd = np.where(use_chd == 1, use_chd, los_image.no_data_val)
nx = iit_image.x.size
ny = iit_image.y.size
# calculate new threshold parameters based off reference (AIA) instrument
t1 = thresh1 * ref_alpha + ref_x
t2 = thresh2 * ref_alpha + ref_x
# fortran chd algorithm
np.seterr(divide='ignore')
ezseg_output, iters_used = ezsegwrapper.ezseg(np.log10(image_data), use_chd, nx, ny, t1, t2, nc, iters)
chd_result = np.logical_and(ezseg_output == 0, use_chd == 1)
chd_result = chd_result.astype(int)
# create CHD image
chd_image = datatypes.create_chd_image(los_image, chd_result)
chd_image.get_coordinates()
end = time.time()
print("Coronal Hole Detection Algorithm has been applied to image", iit_image.data_id, "in", end - start,
"seconds.")
return chd_image
#### STEP FOUR: CONVERT TO MAP ####
def create_map(iit_image, chd_image, methods_list, row, map_x=None, map_y=None, R0=1.01):
start = time.time()
index = row[0]
image_row = row[1]
# EUV map
euv_map = iit_image.interp_to_map(R0=R0, map_x=map_x, map_y=map_y, image_num=image_row.data_id)
# CHD map
chd_map = chd_image.interp_to_map(R0=R0, map_x=map_x, map_y=map_y, image_num=image_row.data_id)
# record image and map info
euv_map.append_data_info(image_row)
chd_map.append_data_info(image_row)
# generate a record of the method and variable values used for interpolation
interp_method = {'meth_name': ("Im2Map_Lin_Interp_1",), 'meth_description':
["Use SciPy.RegularGridInterpolator() to linearly interpolate from an Image to a Map"] * 1,
'var_name': ("R0",), 'var_description': ("Solar radii",), 'var_val': (R0,)}
# add to the methods dataframe for this map
methods_list[index] = methods_list[index].append(pd.DataFrame(data=interp_method), sort=False)
# incorporate the methods dataframe into the map object
euv_map.append_method_info(methods_list[index])
chd_map.append_method_info(methods_list[index])
end = time.time()
print("Image number", iit_image.data_id, "has been interpolated to map(s) in", end - start, "seconds.")
return euv_map, chd_map
#### STEP FIVE: CREATE COMBINED MAPS ####
def cr_map(euv_map, chd_map, euv_combined, chd_combined, data_info, map_info, mu_cutoff=0.0, mu_merge_cutoff=None,
del_mu=None):
start = time.time()
# create map lists
euv_maps = [euv_map, ]
chd_maps = [chd_map, ]
if euv_combined is not None:
euv_maps.append(euv_combined)
if chd_combined is not None:
chd_maps.append(chd_combined)
# determine number of images already in combined map
n_images = len(data_info)
# combine maps with minimum intensity merge
if del_mu is not None:
euv_combined, chd_combined = combine_cr_maps(n_images, euv_maps, chd_maps, del_mu=del_mu, mu_cutoff=mu_cutoff)
combined_method = {'meth_name': ("Min-Int-CR-Merge-del_mu", "Min-Int-CR-Merge-del_mu"), 'meth_description':
["Minimum intensity merge for CR Map: using del mu"] * 2,
'var_name': ("mu_cutoff", "del_mu"), 'var_description': ("lower mu cutoff value",
"max acceptable mu range"),
'var_val': (mu_cutoff, del_mu)}
else:
euv_combined, chd_combined = combine_cr_maps(n_images, euv_maps, chd_maps, mu_merge_cutoff=mu_merge_cutoff,
mu_cutoff=mu_cutoff)
combined_method = {'meth_name': ("Min-Int-CR-Merge-mu_merge", "Min-Int-CR-Merge-mu_merge"), 'meth_description':
["Minimum intensity merge for CR Map: based on Caplan et. al."] * 2,
'var_name': ("mu_cutoff", "mu_merge_cutoff"), 'var_description': ("lower mu cutoff value",
"mu cutoff value in areas of "
"overlap"),
'var_val': (mu_cutoff, mu_merge_cutoff)}
# chd combined method
chd_combined_method = {'meth_name': ("Prob-CR-CHD-Merge",), 'meth_description': ["Probability Merge for CR CHD Maps"]}
# append image and map info records
data_info.append(euv_map.data_info)
map_info.append(euv_map.map_info)
end = time.time()
print("Image number", euv_map.data_info.data_id[0], "has been added to the combined CR map in", end - start,
"seconds.")
return euv_combined, chd_combined, combined_method, chd_combined_method
#### STEP SIX: PLOT COMBINED MAP AND SAVE TO DATABASE ####
def save_maps(db_session, map_data_dir, euv_combined, chd_combined, data_info, map_info, methods_list,
combined_method, chd_combined_method):
start = time.time()
# generate a record of the method and variable values used for interpolation
euv_combined.append_method_info(methods_list)
euv_combined.append_method_info(
|
pd.DataFrame(data=combined_method)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
'''
Uncommonly used utility functions
'''
import os
import subprocess
import pandas as pd
from dramkit.gentools import cut_df_by_con_val
from dramkit.iotools import read_lines, load_csv, logger_show
def get_csv_colmaxmin(csv_path, col, skipna=True, return_data=True,
ascending=None, **kwargs):
'''
获取指定csv文件中指定列的最大值和最小值
Parameters
----------
csv_path : str
csv数据文件路径
col : str
指定列名
skipna : bool
计算max和min的时候设置是否skipna
return_data : bool
为True时返回最大值、最小值和df数据,为False时不返回数据(替换为None)
ascending : None, bool
返回数据按col列排序: None不排序, True升序, False降序
**kwargs :
:func:`dramkit.iotools.load_csv` 函数接受的参数
Returns
-------
col_max :
col列最大值
col_min :
col列最小值
data : None, pandas.DataFrame
返回数据
'''
data = load_csv(csv_path, **kwargs)
col_max = data[col].max(skipna=skipna)
col_min = data[col].min(skipna=skipna)
if return_data:
if ascending is not None:
data.sort_values(col, ascending=ascending, inplace=True)
return col_max, col_min, data
else:
return col_max, col_min, None
def load_text_multi(fpath, sep=',', encoding=None, del_first_col=False,
del_last_col=False, del_first_line=False, to_pd=True,
keep_header=True, logger=None):
'''
读取可能存在多个表纵向排列,且每个表列数不相同的文件,读取出每个表格
(中金所持仓排名数据中存在这种情况)
Parameters
----------
fpath : str
文本文件路径
sep : str
字段分隔符,默认`,`
encoding : None, str
指定编码方式,为None时会尝试以uft-8和gbk编码读取
del_first_col : bool
是否删除首列,默认不删除
del_last_col : bool
是否删除最后一列,默认否
del_first_line : bool
是否删除首行,默认不删除
.. note:: 若del_first_line为True,则输出pandas.DataFrame没有列名
to_pd : bool
是否输出为pandas.DataFrame,默认是
keep_header : bool
输出为pandas.DataFrame时是否以首行作为列名,默认是
logger : logging.Logger, None
日志记录器
:returns: `list` - 返回读取的数据列表,元素为pandas.DataFrame或list
'''
if not os.path.exists(fpath):
logger_show('文件不存在,返回None:%s'%fpath, logger, 'warn')
return None
lines = read_lines(fpath, encoding=encoding, logger=logger)
data = []
lens = []
for line in lines:
line = str(line)
line = line.strip()
if line == '':
continue
line = line.split(sep)
if del_first_col:
line = line[1:]
if del_last_col:
line = line[:-1]
data.append(line)
lens.append(len(line))
tmp = pd.DataFrame({'len': lens})
tmp['idx'] = range(0, tmp.shape[0])
tmps = cut_df_by_con_val(tmp, 'len')
start_end_idxs = [(x['idx'].iloc[0], x['idx'].iloc[-1]) for x in tmps]
datas = [data[idx1:idx2+1] for idx1, idx2 in start_end_idxs]
def _get_final_data(data):
'''组织数据输出格式'''
if del_first_line:
data = data[1:]
if to_pd:
data = pd.DataFrame(data)
else:
if to_pd:
if keep_header:
cols = data[0]
data = pd.DataFrame(data[1:])
data.columns = cols
else:
data =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
#standard python libraries
import json
import atexit
import datetime
import os
import warnings
import math
import shutil
import joblib
#external libraries
from binance.client import Client
import numpy as np
import pandas as pd
import ta
from sklearn import preprocessing
import torch
#external methods
from utils import read_config, read_json
from hyperparameters import HyperParameters, CandlestickInterval, Derivation, Scaling, Balancing, Shuffle, ScalerType
class dbid():
"""
Description:
Class which can be used like a dictionary.
This Class is not threadsafe! The changes to the dictionary, only get written to disk when the instance goes out of scope!
Arguments:
-path (string): Path of the database
"""
def __init__(self, path):
self.path = f"{path}/dbid.json"
#load in the dbid
with open(self.path) as json_file:
self.dbid = json.load(json_file)
#register the dump at the end of lifetime
atexit.register(self.dump)
def __getitem__(self, key):
return self.dbid[key]
def __setitem__(self, key, item):
#change the dict in ram
self.dbid[key] = item
def dump(self):
#save changes to json file
with open(self.path, 'w') as fp:
json.dump(self.dbid, fp, indent=4)
class DataBase():
"""
Description:
This is the base Database class, on which every other Database Objects builds upon.
Arguments:
-path[string]: Path of the Database
"""
def __init__(self, path):
#save the params
self.path = path
#check if the path exists and is a database
if not os.path.isdir(path):
raise Exception("The path you chose is not existing")
if not os.path.isfile(f"{path}/dbid.json"):
raise Exception("The path you chose is not a DataBase")
#setup dbid
self.dbid = dbid(path=self.path)
def __getitem__(self, index):
"""
Description:
Method for accessing data of the database. The access is direct from the harddrive (slower but more memory efficient)
Arguments:
-index[string, list]: Generally: [candlestick_interval, list of features]. To access the whole dataframe only specify the candlestick_interval you want e.g. db["5m"].
To access only one feature specify the datatype and the feature you want e.g. db["5m", "close"]
To access multiple features specify the datatype and a list of features you want e.g. db["5m", ["close", "open"]]
Return:
-data[pd.DataFrame]: Returns always a DataFrame in the shape (rows, number of specified features)
"""
#make sure that candlestick interval is of type CandlestickInterval
if type(index) == tuple:
if not isinstance(index[0], CandlestickInterval):
raise Exception(f"Make sure your candlestick interval is of type CandlestickInterval and not {type(index[0])}")
elif not isinstance(index, CandlestickInterval):
raise Exception(f"Make sure your candlestick interval is of type CandlestickInterval and not {type(index)}")
#set the path
if type(index) == tuple:
path = f"{self.path}/{index[0].value}"
elif isinstance(index, CandlestickInterval):
path = f"{self.path}/{index.value}"
else:
raise Exception("Your chosen index is not valid")
#check if path is available
if not os.path.isdir(path):
raise Exception("Your chosen kline-interval is not available")
#access whole dataframe of certain kline-interval
if isinstance(index, CandlestickInterval):
#load in the data and return
try:
data = pd.read_csv(filepath_or_buffer=f"{path}/{index.value}", index_col="index")
#convert the date columns
data["close_time"]= pd.to_datetime(data["close_time"])
data["open_time"]= pd.to_datetime(data["open_time"])
return data
except:
raise Exception("Your chosen kline-interval is not available in this DataBase")
#access all the labels
elif type(index) == tuple and len(index) == 2 and isinstance(index[0], CandlestickInterval) and index[1] == "labels":
try:
#get all the label names
label_names = next(os.walk(f"{path}/labels"))[1]
#load in all the labels
labels = pd.DataFrame()
for label_name in label_names:
df = pd.read_csv(filepath_or_buffer=f"{path}/labels/{label_name}/labels.csv", header=None, index_col=0, names=["index", "labels"])
labels[label_name] = df["labels"]
return labels
except:
raise Exception("There are no labels in your database")
#access one label
elif type(index) == tuple and len(index) == 3 and isinstance(index[0], CandlestickInterval) and index[1] == "labels" and type(index[2]) == str:
try:
#load in the labels
labels = pd.read_csv(filepath_or_buffer=f"{path}/labels/{index[2]}/labels.csv", header=None, index_col=0, names=["index", index[2]])
return labels
except:
raise Exception("Your chosen label-type is not available")
#access a list of labels
elif type(index) == tuple and len(index) == 3 and isinstance(index[0], CandlestickInterval) and index[1] == "labels" and type(index[2]) == list:
try:
#load in the labels
labels = pd.DataFrame()
for label_name in index[2]:
df = pd.read_csv(filepath_or_buffer=f"{path}/labels/{label_name}/labels.csv", header=None, index_col=0, names=["index", label_name])
labels[label_name] = df[label_name]
return labels[index[2]]
except:
raise Exception("Your chosen label-type is not available")
#access one feature of a kline-interval
elif type(index) == tuple and len(index) == 2 and isinstance(index[0], CandlestickInterval) and type(index[1]) == str:
try:
data = pd.read_csv(filepath_or_buffer=f"{path}/{index[0].value}", usecols=[index[1]])
#convert the date columns
if "close_time" in data.columns:
data["close_time"]= pd.to_datetime(data["close_time"])
if "open_time" in data.columns:
data["open_time"]= pd.to_datetime(data["open_time"])
return data
except:
raise Exception("Your chosen feature is not available in this DataBase")
#access list of features of a kline-interval
elif type(index) == tuple and len(index) == 2 and isinstance(index[0], CandlestickInterval) and type(index[1]) == list:
try:
data = pd.read_csv(filepath_or_buffer=f"{path}/{index[0].value}", usecols=index[1])
#convert the date columns
if "close_time" in data.columns:
data["close_time"]= pd.to_datetime(data["close_time"])
if "open_time" in data.columns:
data["open_time"]=
|
pd.to_datetime(data["open_time"])
|
pandas.to_datetime
|
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with
|
tm.assertRaisesRegexp(ValueError, 'skip_footer')
|
pandas.util.testing.assertRaisesRegexp
|
#import libraries
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#load data
df =
|
pd.read_csv('data.csv')
|
pandas.read_csv
|
"""QSAR modelling Data Sources
This module supports representation of functional bioactivies
- Directly from ChEMBL API
- From a list of Canonical SMILES, along with the measured activity
"""
import chembl_webresource_client
import numpy as np
import pandas as pd
from rdkit import Chem
from rdkit.Chem import AllChem
from abc import ABCMeta, abstractmethod
from .utils import print_progress_bar
from .dataset import QSARDataset
from .preprocessing import Preprocessing
from .cdk_utils import CDKUtils
class DataSource(metaclass=ABCMeta):
"""
Represent a data source
Chemical data might be obtained from different sources:
- ChEMBL API (class ChEMBLApiDataSource)
- A CSV file downloaded from ChEMBL (class ChEMBLCsvDataSource)
- A CSV file produced manually or exported from elsewhere containing two columns:
'canonical_smiles' and 'activity' (class CsvDataSource)
The first represent the Canonical Smiles of a compound while the second correspond
to the acitivity of the compounds.
DataSource parses the data and creates an object of the class modSAR.dataset.QSARDataset
using the function `build_qsar_dataset`.
"""
def __init__(self, target_id, smiles_column, compound_id_column, activity_column,
is_chembl_data, apply_filter, **kwargs):
"""
Generic DataSource construtor
Args:
target_id (str): name of the protein target common to all activities in the dataset
smiles_column (str): name of column that contains SMILES code for the compounds
compound_id_column (str): column in the DataFrame that identifies the compound
activity_column (str): column in the DataFrame
"""
missing_attributes = []
if ((smiles_column is None) or (smiles_column == '')):
missing_attributes.append(['smiles_column'])
if (activity_column is None):
missing_attributes.append(['activity_column'])
if (compound_id_column is None):
missing_attributes.append(['compound_id_column'])
if (is_chembl_data is None):
missing_attributes.append(['is_chembl_data'])
if (len(missing_attributes) > 1):
raise ValueError('DataSource does not have required attributes: %s' %
(', '.join(missing_attributes)))
self.target_id = target_id
self.smiles_column = smiles_column
self.compound_id_column = compound_id_column
self.activity_column = activity_column
self.is_chembl_data = is_chembl_data
self.apply_filter = apply_filter
for attr, val in kwargs.items():
setattr(self, attr, val)
self.bioactivities_df = self._get_bioactivities_df()
@abstractmethod
def _get_bioactivities_df(self):
pass
def build_qsar_dataset(self, type="cdk", calculate_similarity=True):
"""
Preprocess bioactivities and builds a QSARDataset object
"""
preprocess = Preprocessing(compound_id_column=self.compound_id_column,
activity_column=self.activity_column,
apply_chembl_filter=self.is_chembl_data,
remove_duplicated=True)
clean_df = preprocess.do(self.bioactivities_df)
if type == "cdk":
cdk_utils = CDKUtils()
descriptors_df = cdk_utils.calculate_descriptors(clean_df, self.smiles_column)
descriptors_df.index = clean_df.index
X = descriptors_df
elif "morgan" in type:
nBits = 1024
radius = int(type.replace("morgan", ""))
smiles = clean_df[self.smiles_column]
descriptors_df = pd.DataFrame(columns=['Bit_%04d' % x for x in range(nBits)],
index=clean_df.index, dtype=int)
for i in range(len(smiles)):
try:
molecule = AllChem.MolFromSmiles(smiles.iloc[i])
fingerprint = AllChem.GetMorganFingerprintAsBitVect(molecule, radius, nBits=nBits)
except Exception:
raise ValueError("Error parsing molecule %s" % (smiles.index[i]))
for j in range(nBits):
descriptors_df.iloc[i][j] = int(fingerprint.ToBitString()[j])
X = descriptors_df
else:
raise ValueError("Type of descriptors is not known: %s" % type)
y = clean_df[self.activity_column]
y.index = X.index
qsar_dataset = QSARDataset(name=self.target_id,
X=X,
y=y,
X_smiles=clean_df[self.smiles_column],
metadata=clean_df,
apply_filter=self.apply_filter,
calculate_similarity=calculate_similarity)
return qsar_dataset
class ChEMBLApiDataSource(DataSource):
"""
Retrieve active compounds for a target using ChEMBL API
Example of use:
chembl_data_source = ChEMBLApiDataSource(target_id='CHEMBL202', standard_types=['IC50'])
chembl202_dataset = chembl_data_source.get_qsar_dataset()
"""
def __init__(self, target_id, standard_types,
smiles_column='canonical_smiles',
compound_id_column='parent_molecule_chembl_id',
activity_column='pchembl_value',
apply_filter=True):
"""
Stores a DataFrame containing the bioactivities listed on ChEMBL for specified target.
Bioactivities can later be converted to a QSARDataset using function `get_qsar_dataset`
Args:
target_id (str): ChEMBL target id
standard_types (str or list): e.g.: IC50, Ki, etc.
"""
if type(standard_types) is str:
standard_types = [standard_types]
super(ChEMBLApiDataSource, self).__init__(target_id,
smiles_column=smiles_column,
compound_id_column=compound_id_column,
activity_column=activity_column,
is_chembl_data=True,
standard_types=standard_types,
apply_filter=apply_filter)
def _get_bioactivities_df(self):
activity = chembl_webresource_client.new_client.new_client.activity
result = activity.filter(target_chembl_id=self.target_id,
assay_type__iregex='(B|F)')
def get_compound_df(idx, compound_dict, standard_types):
"""Filter compounds by the standard_types informed, returning a DataFrame"""
if standard_types is None or compound_dict['standard_type'] is None:
is_valid_std_type = False
else:
compound_std_type = compound_dict['standard_type'].lower()
is_valid_std_type = any([std_type.lower() in compound_std_type
for std_type in standard_types])
if is_valid_std_type:
# Drop unused columns
compound_dict.pop('activity_properties', None)
# Capture Ligand Efficiency
lig_efficiency = compound_dict.pop('ligand_efficiency', None)
compound_df =
|
pd.DataFrame(compound_dict, index=[0])
|
pandas.DataFrame
|
"""
Functions about validation.
"""
import re
import pytz
import datetime as dt
from typing import Optional, List, Union, TYPE_CHECKING
import pycountry
import numpy as np
import pandas as pd
from pandas import DataFrame
from . import constants as cs
from . import helpers as hp
if TYPE_CHECKING:
from .feed import Feed
TIME_PATTERN1 = re.compile(r"^\d\d:\d\d:\d\d$")
TIME_PATTERN2 = re.compile(r"^\d:\d\d:\d\d$")
DATE_FORMAT = "%Y%m%d"
TIMEZONES = set(pytz.all_timezones)
# ISO639-1 language codes, both lower and upper case
LANGS = set(
[lang.alpha_2 for lang in pycountry.languages if hasattr(lang, "alpha_2")]
)
LANGS |= set(x.upper() for x in LANGS)
CURRENCIES = set(
[c.alpha_3 for c in pycountry.currencies if hasattr(c, "alpha_3")]
)
URL_PATTERN = re.compile(
r"^(?:http)s?://" # http:// or https://
r"(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|" # domain...
r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})" # ...or ip
r"(?::\d+)?" # optional port
r"(?:/?|[/?]\S+)$",
re.IGNORECASE,
)
EMAIL_PATTERN = re.compile(r"[^@]+@[^@]+\.[^@]+")
COLOR_PATTERN = re.compile(r"(?:[0-9a-fA-F]{2}){3}$")
def valid_str(x: str) -> bool:
"""
Return ``True`` if ``x`` is a non-blank string;
otherwise return ``False``.
"""
if isinstance(x, str) and x.strip():
return True
else:
return False
def valid_time(x: str) -> bool:
"""
Return ``True`` if ``x`` is a valid H:MM:SS or HH:MM:SS time;
otherwise return ``False``.
"""
if isinstance(x, str) and (
re.match(TIME_PATTERN1, x) or re.match(TIME_PATTERN2, x)
):
return True
else:
return False
def valid_date(x: str) -> bool:
"""
Retrun ``True`` if ``x`` is a valid YYYYMMDD date;
otherwise return ``False``.
"""
try:
if x != dt.datetime.strptime(x, DATE_FORMAT).strftime(DATE_FORMAT):
raise ValueError
return True
except ValueError:
return False
def valid_timezone(x: str) -> bool:
"""
Retrun ``True`` if ``x`` is a valid human-readable timezone string,
e.g. 'Africa/Abidjan'; otherwise return ``False``.
"""
return x in TIMEZONES
def valid_lang(x: str) -> bool:
"""
Return ``True`` if ``x`` is a valid two-letter ISO 639 language
code, e.g. 'aa'; otherwise return ``False``.
"""
return x in LANGS
def valid_currency(x: str) -> bool:
"""
Return ``True`` if ``x`` is a valid three-letter ISO 4217 currency
code, e.g. 'AED'; otherwise return ``False``.
"""
return x in CURRENCIES
def valid_url(x: str) -> bool:
"""
Return ``True`` if ``x`` is a valid URL; otherwise return ``False``.
"""
if isinstance(x, str) and re.match(URL_PATTERN, x):
return True
else:
return False
def valid_email(x: str) -> bool:
"""
Return ``True`` if ``x`` is a valid email address; otherwise return
``False``.
"""
if isinstance(x, str) and re.match(EMAIL_PATTERN, x):
return True
else:
return False
def valid_color(x: str) -> bool:
"""
Return ``True`` if ``x`` a valid hexadecimal color string without
the leading hash; otherwise return ``False``.
"""
if isinstance(x, str) and re.match(COLOR_PATTERN, x):
return True
else:
return False
def check_for_required_columns(
problems: List, table: str, df: DataFrame
) -> List:
"""
Check that the given GTFS table has the required columns.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the GTFS is violated;
``'warning'`` means there is a problem but it is not a
GTFS violation
2. A message (string) that describes the problem
3. A GTFS table name, e.g. ``'routes'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a GTFS table
df : DataFrame
The GTFS table corresponding to ``table``
Returns
-------
list
The ``problems`` list extended as follows.
Check that the DataFrame contains the colums required by GTFS
and append to the problems list one error for each column
missing.
"""
r = cs.GTFS_REF
req_columns = r.loc[
(r["table"] == table) & r["column_required"], "column"
].values
for col in req_columns:
if col not in df.columns:
problems.append(["error", f"Missing column {col}", table, []])
return problems
def check_for_invalid_columns(
problems: List, table: str, df: DataFrame
) -> List:
"""
Check for invalid columns in the given GTFS DataFrame.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the GTFS is violated;
``'warning'`` means there is a problem but it is not a
GTFS violation
2. A message (string) that describes the problem
3. A GTFS table name, e.g. ``'routes'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a GTFS table
df : DataFrame
The GTFS table corresponding to ``table``
Returns
-------
list
The ``problems`` list extended as follows.
Check whether the DataFrame contains extra columns not in the
GTFS and append to the problems list one warning for each extra
column.
"""
r = cs.GTFS_REF
valid_columns = r.loc[r["table"] == table, "column"].values
for col in df.columns:
if col not in valid_columns:
problems.append(
["warning", f"Unrecognized column {col}", table, []]
)
return problems
def check_table(
problems: List,
table: str,
df: DataFrame,
condition,
message: str,
type_: str = "error",
) -> List:
"""
Check the given GTFS table for the given problem condition.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the GTFS is violated;
``'warning'`` means there is a problem but it is not a
GTFS violation
2. A message (string) that describes the problem
3. A GTFS table name, e.g. ``'routes'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a GTFS table
df : DataFrame
The GTFS table corresponding to ``table``
condition : boolean expression
One involving ``df``, e.g.`df['route_id'].map(is_valid_str)``
message : string
Problem message, e.g. ``'Invalid route_id'``
type_ : string
``'error'`` or ``'warning'`` indicating the type of problem
encountered
Returns
-------
list
The ``problems`` list extended as follows.
Record the indices of ``df`` that statisfy the condition.
If the list of indices is nonempty, append to the
problems the item ``[type_, message, table, indices]``;
otherwise do not append anything.
"""
indices = df.loc[condition].index.tolist()
if indices:
problems.append([type_, message, table, indices])
return problems
def check_column(
problems: List,
table: str,
df: DataFrame,
column: str,
checker,
message: Optional[str] = None,
type_: str = "error",
*,
column_required: bool = True,
) -> List:
"""
Check the given column of the given GTFS with the given problem
checker.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the GTFS is violated;
``'warning'`` means there is a problem but it is not a
GTFS violation
2. A message (string) that describes the problem
3. A GTFS table name, e.g. ``'routes'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a GTFS table
df : DataFrame
The GTFS table corresponding to ``table``
column : string
A column of ``df``
column_required : boolean
``True`` if and only if ``column`` is required
(and not optional) by the GTFS
checker : boolean valued unary function
Returns ``True`` if and only if no problem is encountered
message : string (optional)
Problem message, e.g. 'Invalid route_id'.
Defaults to 'Invalid ``column``; maybe has extra space characters'
type_ : string
``'error'`` or ``'warning'`` indicating the type of problem
encountered
Returns
-------
list
The ``problems`` list extended as follows.
Apply the checker to the column entries and record the indices
of ``df`` where the checker returns ``False``.
If the list of indices of is nonempty, append to the problems the
item ``[type_, problem, table, indices]``; otherwise do not
append anything.
If not ``column_required``, then NaN entries will be ignored
before applying the checker.
"""
f = df.copy()
if not column_required:
if column not in f.columns:
f[column] = np.nan
f = f.dropna(subset=[column])
cond = ~f[column].map(checker)
if not message:
message = f"Invalid {column}; maybe has extra space characters"
problems = check_table(problems, table, f, cond, message, type_)
return problems
def check_column_id(
problems: List,
table: str,
df: DataFrame,
column: str,
*,
column_required: bool = True,
) -> List:
"""
A specialization of :func:`check_column`.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the GTFS is violated;
``'warning'`` means there is a problem but it is not a
GTFS violation
2. A message (string) that describes the problem
3. A GTFS table name, e.g. ``'routes'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a GTFS table
df : DataFrame
The GTFS table corresponding to ``table``
column : string
A column of ``df``
column_required : boolean
``True`` if and only if ``column`` is required
(and not optional) by the GTFS
Returns
-------
list
The ``problems`` list extended as follows.
Record the indices of ``df`` where the given column has
duplicated entry or an invalid strings.
If the list of indices is nonempty, append to the problems the
item ``[type_, problem, table, indices]``; otherwise do not
append anything.
If not ``column_required``, then NaN entries will be ignored
in the checking.
"""
f = df.copy()
if not column_required:
if column not in f.columns:
f[column] = np.nan
f = f.dropna(subset=[column])
cond = ~f[column].map(valid_str)
problems = check_table(
problems,
table,
f,
cond,
f"Invalid {column}; maybe has extra space characters",
)
cond = f[column].duplicated()
problems = check_table(problems, table, f, cond, f"Repeated {column}")
return problems
def check_column_linked_id(
problems: List,
table: str,
df: DataFrame,
column: str,
target_df: DataFrame,
target_column: Optional[str] = None,
*,
column_required: bool = True,
) -> List:
"""
A modified version of :func:`check_column_id`.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the GTFS is violated;
``'warning'`` means there is a problem but it is not a
GTFS violation
2. A message (string) that describes the problem
3. A GTFS table name, e.g. ``'routes'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a GTFS table
df : DataFrame
The GTFS table corresponding to ``table``
column : string
A column of ``df``
column_required : boolean
``True`` if and only if ``column`` is required
(and not optional) by the GTFS
target_df : DataFrame
A GTFS table
target_column : string
A column of ``target_df``; defaults to ``column_name``
Returns
-------
list
The ``problems`` list extended as follows.
Record indices of ``df`` where the following condition is
violated: ``column`` contain IDs that are valid strings and are
present in ``target_df`` under the ``target_column`` name.
If the list of indices is nonempty, append to the problems the
item ``[type_, problem, table, indices]``; otherwise do not
append anything.
If not ``column_required``, then NaN entries will be ignored
in the checking.
"""
if target_column is None:
target_column = column
f = df.copy()
if target_df is None:
g = pd.DataFrame()
g[target_column] = np.nan
else:
g = target_df.copy()
if target_column not in g.columns:
g[target_column] = np.nan
if not column_required:
if column not in f.columns:
f[column] = np.nan
f = f.dropna(subset=[column])
g = g.dropna(subset=[target_column])
cond = ~f[column].isin(g[target_column])
problems = check_table(problems, table, f, cond, f"Undefined {column}")
return problems
def format_problems(
problems: List, *, as_df: bool = False
) -> Union[List, DataFrame]:
"""
Format the given problems list as a DataFrame.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the GTFS is violated;
``'warning'`` means there is a problem but it is not a
GTFS violation
2. A message (string) that describes the problem
3. A GTFS table name, e.g. ``'routes'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
as_df : boolean
Returns
-------
list or DataFrame
Return ``problems`` if not ``as_df``; otherwise return a
DataFrame with the problems as rows and the columns
``['type', 'message', 'table', 'rows']``.
"""
if as_df:
problems = pd.DataFrame(
problems, columns=["type", "message", "table", "rows"]
).sort_values(["type", "table"])
return problems
def check_agency(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Check that ``feed.agency`` follows the GTFS.
Return a list of problems of the form described in
:func:`check_table`;
the list will be empty if no problems are found.
"""
table = "agency"
problems = []
# Preliminary checks
if feed.agency is None:
problems.append(["error", "Missing table", table, []])
else:
f = feed.agency.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check service_id
problems = check_column_id(
problems, table, f, "agency_id", column_required=False
)
# Check agency_name
problems = check_column(problems, table, f, "agency_name", valid_str)
# Check agency_url
problems = check_column(problems, table, f, "agency_url", valid_url)
# Check agency_timezone
problems = check_column(
problems, table, f, "agency_timezone", valid_timezone
)
# Check agency_fare_url
problems = check_column(
problems, table, f, "agency_fare_url", valid_url, column_required=False
)
# Check agency_lang
problems = check_column(
problems, table, f, "agency_lang", valid_lang, column_required=False
)
# Check agency_phone
problems = check_column(
problems, table, f, "agency_phone", valid_str, column_required=False
)
# Check agency_email
problems = check_column(
problems, table, f, "agency_email", valid_email, column_required=False
)
return format_problems(problems, as_df=as_df)
def check_calendar(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.calendar``.
"""
table = "calendar"
problems = []
# Preliminary checks
if feed.calendar is None:
return problems
f = feed.calendar.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check service_id
problems = check_column_id(problems, table, f, "service_id")
# Check weekday columns
v = lambda x: x in range(2)
for col in [
"monday",
"tuesday",
"wednesday",
"thursday",
"friday",
"saturday",
"sunday",
]:
problems = check_column(problems, table, f, col, v)
# Check start_date and end_date
for col in ["start_date", "end_date"]:
problems = check_column(problems, table, f, col, valid_date)
if include_warnings:
# Check if feed has expired
d = f["end_date"].max()
if feed.calendar_dates is not None and not feed.calendar_dates.empty:
table += "/calendar_dates"
d = max(d, feed.calendar_dates["date"].max())
if d < dt.datetime.today().strftime(DATE_FORMAT):
problems.append(["warning", "Feed expired", table, []])
return format_problems(problems, as_df=as_df)
def check_calendar_dates(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.calendar_dates``.
"""
table = "calendar_dates"
problems = []
# Preliminary checks
if feed.calendar_dates is None:
return problems
f = feed.calendar_dates.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check service_id
problems = check_column(problems, table, f, "service_id", valid_str)
# Check date
problems = check_column(problems, table, f, "date", valid_date)
# No duplicate (service_id, date) pairs allowed
cond = f[["service_id", "date"]].duplicated()
problems = check_table(
problems, table, f, cond, "Repeated pair (service_id, date)"
)
# Check exception_type
v = lambda x: x in [1, 2]
problems = check_column(problems, table, f, "exception_type", v)
return format_problems(problems, as_df=as_df)
def check_fare_attributes(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.calendar_dates``.
"""
table = "fare_attributes"
problems = []
# Preliminary checks
if feed.fare_attributes is None:
return problems
f = feed.fare_attributes.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check fare_id
problems = check_column_id(problems, table, f, "fare_id")
# Check currency_type
problems = check_column(
problems, table, f, "currency_type", valid_currency
)
# Check payment_method
v = lambda x: x in range(2)
problems = check_column(problems, table, f, "payment_method", v)
# Check transfers
v = lambda x: pd.isna(x) or x in range(3)
problems = check_column(problems, table, f, "transfers", v)
# Check transfer_duration
v = lambda x: x >= 0
problems = check_column(
problems, table, f, "transfer_duration", v, column_required=False
)
return format_problems(problems, as_df=as_df)
def check_fare_rules(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.calendar_dates``.
"""
table = "fare_rules"
problems = []
# Preliminary checks
if feed.fare_rules is None:
return problems
f = feed.fare_rules.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check fare_id
problems = check_column_linked_id(
problems, table, f, "fare_id", feed.fare_attributes
)
# Check route_id
problems = check_column_linked_id(
problems, table, f, "route_id", feed.routes, column_required=False
)
# Check origin_id, destination_id, contains_id
for col in ["origin_id", "destination_id", "contains_id"]:
problems = check_column_linked_id(
problems,
table,
f,
col,
feed.stops,
"zone_id",
column_required=False,
)
return format_problems(problems, as_df=as_df)
def check_feed_info(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.feed_info``.
"""
table = "feed_info"
problems = []
# Preliminary checks
if feed.feed_info is None:
return problems
f = feed.feed_info.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check feed_publisher_name
problems = check_column(
problems, table, f, "feed_publisher_name", valid_str
)
# Check feed_publisher_url
problems = check_column(
problems, table, f, "feed_publisher_url", valid_url
)
# Check feed_lang
problems = check_column(problems, table, f, "feed_lang", valid_lang)
# Check feed_start_date and feed_end_date
cols = ["feed_start_date", "feed_end_date"]
for col in cols:
problems = check_column(
problems, table, f, col, valid_date, column_required=False
)
if set(cols) <= set(f.columns):
d1, d2 = f.loc[0, ["feed_start_date", "feed_end_date"]].values
if pd.notna(d1) and pd.notna(d2) and d1 > d1:
problems.append(
[
"error",
"feed_start_date later than feed_end_date",
table,
[0],
]
)
# Check feed_version
problems = check_column(
problems, table, f, "feed_version", valid_str, column_required=False
)
return format_problems(problems, as_df=as_df)
def check_frequencies(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.frequencies``.
"""
table = "frequencies"
problems = []
# Preliminary checks
if feed.frequencies is None:
return problems
f = feed.frequencies.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check trip_id
problems = check_column_linked_id(
problems, table, f, "trip_id", feed.trips
)
# Check start_time and end_time
time_cols = ["start_time", "end_time"]
for col in time_cols:
problems = check_column(problems, table, f, col, valid_time)
for col in time_cols:
f[col] = f[col].map(hp.timestr_to_seconds)
# Start_time should be earlier than end_time
cond = f["start_time"] >= f["end_time"]
problems = check_table(
problems, table, f, cond, "start_time not earlier than end_time"
)
# Headway periods should not overlap
f = f.sort_values(["trip_id", "start_time"])
for __, group in f.groupby("trip_id"):
a = group["start_time"].values
b = group["end_time"].values
indices = np.flatnonzero(a[1:] < b[:-1]).tolist()
if indices:
problems.append(
[
"error",
"Headway periods for the same trip overlap",
table,
indices,
]
)
# Check headway_secs
v = lambda x: x >= 0
problems = check_column(problems, table, f, "headway_secs", v)
# Check exact_times
v = lambda x: x in range(2)
problems = check_column(
problems, table, f, "exact_times", v, column_required=False
)
return format_problems(problems, as_df=as_df)
def check_routes(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.routes``.
"""
table = "routes"
problems = []
# Preliminary checks
if feed.routes is None:
problems.append(["error", "Missing table", table, []])
else:
f = feed.routes.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check route_id
problems = check_column_id(problems, table, f, "route_id")
# Check agency_id
if "agency_id" in f:
if feed.agency is None:
problems.append(
[
"error",
"agency_id column present in routes agency table missing",
table,
[],
]
)
elif "agency_id" not in feed.agency.columns:
problems.append(
[
"error",
"agency_id column present in routes but not in agency",
table,
[],
]
)
else:
g = f.dropna(subset=["agency_id"])
cond = ~g["agency_id"].isin(feed.agency["agency_id"])
problems = check_table(
problems, table, g, cond, "Undefined agency_id"
)
# Check route_short_name and route_long_name
for column in ["route_short_name", "route_long_name"]:
problems = check_column(
problems, table, f, column, valid_str, column_required=False
)
cond = ~(f["route_short_name"].notna() | f["route_long_name"].notna())
problems = check_table(
problems,
table,
f,
cond,
"route_short_name and route_long_name both empty",
)
# Check route_type
v = lambda x: x in range(8)
problems = check_column(problems, table, f, "route_type", v)
# Check route_url
problems = check_column(
problems, table, f, "route_url", valid_url, column_required=False
)
# Check route_color and route_text_color
for col in ["route_color", "route_text_color"]:
problems = check_column(
problems, table, f, col, valid_color, column_required=False
)
if include_warnings:
# Check for duplicated (route_short_name, route_long_name) pairs
cond = f[["route_short_name", "route_long_name"]].duplicated()
problems = check_table(
problems,
table,
f,
cond,
"Repeated pair (route_short_name, route_long_name)",
"warning",
)
# Check for routes without trips
s = feed.trips["route_id"]
cond = ~f["route_id"].isin(s)
problems = check_table(
problems, table, f, cond, "Route has no trips", "warning"
)
return format_problems(problems, as_df=as_df)
def check_shapes(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.shapes``.
"""
table = "shapes"
problems = []
# Preliminary checks
if feed.shapes is None:
return problems
f = feed.shapes.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
f.sort_values(["shape_id", "shape_pt_sequence"], inplace=True)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check shape_id
problems = check_column(problems, table, f, "shape_id", valid_str)
# Check shape_pt_lon and shape_pt_lat
for column, bound in [("shape_pt_lon", 180), ("shape_pt_lat", 90)]:
v = lambda x: pd.notna(x) and -bound <= x <= bound
cond = ~f[column].map(v)
problems = check_table(
problems,
table,
f,
cond,
f"{column} out of bounds {[-bound, bound]}",
)
# Check for duplicated (shape_id, shape_pt_sequence) pairs
cond = f[["shape_id", "shape_pt_sequence"]].duplicated()
problems = check_table(
problems, table, f, cond, "Repeated pair (shape_id, shape_pt_sequence)"
)
# Check if shape_dist_traveled does decreases on a trip
if "shape_dist_traveled" in f.columns:
g = f.dropna(subset=["shape_dist_traveled"])
indices = []
prev_sid = None
prev_index = None
prev_dist = -1
cols = ["shape_id", "shape_dist_traveled"]
for i, sid, dist in g[cols].itertuples():
if sid == prev_sid and dist < prev_dist:
indices.append(prev_index)
prev_sid = sid
prev_index = i
prev_dist = dist
if indices:
problems.append(
[
"error",
"shape_dist_traveled decreases on a trip",
table,
indices,
]
)
return format_problems(problems, as_df=as_df)
def check_stops(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.stops``.
"""
table = "stops"
problems = []
# Preliminary checks
if feed.stops is None:
problems.append(["error", "Missing table", table, []])
else:
f = feed.stops.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check stop_id
problems = check_column_id(problems, table, f, "stop_id")
# Check stop_code, stop_desc, zone_id, parent_station
for column in ["stop_code", "stop_desc", "zone_id", "parent_station"]:
problems = check_column(
problems, table, f, column, valid_str, column_required=False
)
# Check stop_name
problems = check_column(problems, table, f, "stop_name", valid_str)
# Check stop_lon and stop_lat
if "location_type" in f.columns:
requires_location = f.location_type.isin([0, 1, 2])
else:
requires_location = True
for column, bound in [("stop_lon", 180), ("stop_lat", 90)]:
v = lambda x: pd.notna(x) and -bound <= x <= bound
cond = requires_location & ~f[column].map(v)
problems = check_table(
problems,
table,
f,
cond,
f"{column} out of bounds {[-bound, bound]}",
)
# Check stop_url
problems = check_column(
problems, table, f, "stop_url", valid_url, column_required=False
)
# Check location_type
v = lambda x: x in range(5)
problems = check_column(
problems, table, f, "location_type", v, column_required=False
)
# Check stop_timezone
problems = check_column(
problems,
table,
f,
"stop_timezone",
valid_timezone,
column_required=False,
)
# Check wheelchair_boarding
v = lambda x: x in range(3)
problems = check_column(
problems, table, f, "wheelchair_boarding", v, column_required=False
)
# Check further location_type and parent_station
if "parent_station" in f.columns:
if "location_type" not in f.columns:
problems.append(
[
"error",
"parent_station column present but location_type column missing",
table,
[],
]
)
else:
# Parent stations must be well-defined
S = set(f.stop_id) | {np.nan}
v = lambda x: x in S
problems = check_column(
problems,
table,
f,
"parent_station",
v,
"A parent station must be well-defined",
column_required=False,
)
# Stations must have location type 1
station_ids = f.loc[f.parent_station.notna(), "parent_station"]
cond = f.stop_id.isin(station_ids) & (f.location_type != 1)
problems = check_table(
problems, table, f, cond, "A station must have location_type 1"
)
# Stations must not lie in stations
cond = (f.location_type == 1) & f.parent_station.notna()
problems = check_table(
problems,
table,
f,
cond,
"A station must not lie in another station",
)
# Entrances (type 2), generic nodes (type 3) and boarding areas (type 4)
# need to be part of a parent
cond = f.location_type.isin([2, 3, 4]) & f.parent_station.isna()
problems = check_table(
problems,
table,
f,
cond,
"Entrances, nodes, and boarding areas must be part of a parent station",
)
if include_warnings:
# Check for stops of location type 0 or NaN without stop times
ids = []
if feed.stop_times is not None:
ids = feed.stop_times.stop_id.unique()
cond = ~feed.stops.stop_id.isin(ids)
if "location_type" in feed.stops.columns:
cond &= f.location_type.isin([0, np.nan])
problems = check_table(
problems, table, f, cond, "Stop has no stop times", "warning"
)
return format_problems(problems, as_df=as_df)
def check_stop_times(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.stop_times``.
"""
table = "stop_times"
problems = []
# Preliminary checks
if feed.stop_times is None:
problems.append(["error", "Missing table", table, []])
else:
f = feed.stop_times.copy().sort_values(["trip_id", "stop_sequence"])
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check trip_id
problems = check_column_linked_id(
problems, table, f, "trip_id", feed.trips
)
# Check arrival_time and departure_time
v = lambda x: pd.isna(x) or valid_time(x)
for col in ["arrival_time", "departure_time"]:
problems = check_column(problems, table, f, col, v)
# Check that arrival and departure times exist for the first and last
# stop of each trip and for each timepoint.
# For feeds with many trips, iterating through the stop time rows is
# faster than uisg groupby.
if "timepoint" not in f.columns:
f["timepoint"] = np.nan # This will not mess up later timepoint check
indices = []
prev_tid = None
prev_index = None
prev_atime = 1
prev_dtime = 1
for i, tid, atime, dtime, tp in f[
["trip_id", "arrival_time", "departure_time", "timepoint"]
].itertuples():
if tid != prev_tid:
# Check last stop of previous trip
if pd.isna(prev_atime) or pd.isna(prev_dtime):
indices.append(prev_index)
# Check first stop of current trip
if pd.isna(atime) or pd.isna(dtime):
indices.append(i)
elif tp == 1 and (
|
pd.isna(atime)
|
pandas.isna
|
#%%
import sys
import os
#sys.path.append(os.getcwd() + '/connectome_tools/')
os.chdir(os.path.dirname(os.getcwd())) # make directory one step up the current directory
os.chdir(os.path.dirname(os.getcwd())) # make directory one step up the current directory
sys.path.append('/Users/mwinding/repos/maggot_models')
from pymaid_creds import url, name, password, token
import pymaid
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
from src.data import load_metagraph
from src.visualization import CLASS_COLOR_DICT, adjplot
from src.traverse import Cascade, to_transmission_matrix
from src.traverse import TraverseDispatcher
from src.visualization import matrixplot
rm = pymaid.CatmaidInstance(url, token, name, password)
import connectome_tools.cascade_analysis as casc
import connectome_tools.celltype as ct
#mg = load_metagraph("Gad", version="2020-06-10", path = '/Volumes/GoogleDrive/My Drive/python_code/maggot_models/data/processed/')
#mg.calculate_degrees(inplace=True)
#adj = mg.adj # adjacency matrix from the "mg" object
adj_ad = pd.read_csv(f'data/adj/all-neurons_ad.csv', index_col = 0).rename(columns=int)
adj = adj_ad.values
clusters = pd.read_csv('cascades/data/meta-method=color_iso-d=8-bic_ratio=0.95-min_split=32.csv', index_col = 0, header = 0)
order = pd.read_csv('cascades/data/signal_flow_order_lvl7.csv').values
# make array from list of lists
order_delisted = []
for sublist in order:
order_delisted.append(sublist[0])
order = np.array(order_delisted)
#%%
# pull sensory annotations and then pull associated skids
order = ['ORN', 'AN sensories', 'MN sensories', 'photoreceptors', 'thermosensories', 'v\'td', 'A1 ascending noci', 'A1 ascending mechano', 'A1 ascending proprio', 'A1 ascending class II_III']
sens = [ct.Celltype(name, pymaid.get_skids_by_annotation(f'mw {name}')) for name in order]
input_skids_list = [x.get_skids() for x in sens]
input_skids = [val for sublist in input_skids_list for val in sublist]
output_names = pymaid.get_annotated('mw brain outputs').name
output_skids_list = list(map(pymaid.get_skids_by_annotation, pymaid.get_annotated('mw brain outputs').name))
output_skids = [val for sublist in output_skids_list for val in sublist]
#%%
# cascades from each sensory modality
p = 0.05
max_hops = 10
n_init = 100
simultaneous = True
adj=adj_ad
input_hit_hist_list = casc.Cascade_Analyzer.run_cascades_parallel(source_skids_list=input_skids_list, stop_skids=output_skids,
adj=adj_ad, p=p, max_hops=max_hops, n_init=n_init, simultaneous=simultaneous)
# **** continue here when new clusters are available
#%%
# grouping cascade indices by cluster type
# level 7 clusters
lvl7 = clusters.groupby('lvl7_labels')
# cluster order and number of neurons per cluster
cluster_lvl7 = []
for key in lvl7.groups.keys():
cluster_lvl7.append([key, len(lvl7.groups[key])])
cluster_lvl7 = pd.DataFrame(cluster_lvl7, columns = ['key', 'num_cluster'])
# breaking signal cascades into cluster groups
input_hit_hist_lvl7 = []
for hit_hist in input_hit_hist_list:
sensory_clustered_hist = []
for key in lvl7.groups.keys():
skids = lvl7.groups[key]
indices = np.where([x in skids for x in mg.meta.index])[0]
cluster_hist = hit_hist[indices]
cluster_hist = pd.DataFrame(cluster_hist, index = indices)
sensory_clustered_hist.append(cluster_hist)
input_hit_hist_lvl7.append(sensory_clustered_hist)
# summed signal cascades per cluster group (hops remain intact)
summed_hist_lvl7 = []
for input_hit_hist in input_hit_hist_lvl7:
sensory_sum_hist = []
for i, cluster in enumerate(input_hit_hist):
sum_cluster = cluster.sum(axis = 0)/(len(cluster.index)) # normalize by number of neurons in cluster
sensory_sum_hist.append(sum_cluster)
sensory_sum_hist = pd.DataFrame(sensory_sum_hist) # column names will be hop number
sensory_sum_hist.index = cluster_lvl7.key # uses cluster name for index of each summed cluster row
summed_hist_lvl7.append(sensory_sum_hist)
# number of neurons per cluster group over threshold (hops remain intact)
threshold = 50
num_hist_lvl7 = []
for input_hit_hist in input_hit_hist_lvl7:
sensory_num_hist = []
for i, cluster in enumerate(input_hit_hist):
num_cluster = (cluster>threshold).sum(axis = 0)
sensory_num_hist.append(num_cluster)
sensory_num_hist = pd.DataFrame(sensory_num_hist) # column names will be hop number
sensory_num_hist.index = cluster_lvl7.key # uses cluster name for index of each summed cluster row
num_hist_lvl7.append(sensory_num_hist)
# %%
# plot signal of all sensories through clusters
# main figure
fig, axs = plt.subplots(
1, 1, figsize=(5, 5)
)
vmax = 300
ax = axs
sns.heatmap(sum(summed_hist_lvl7).loc[order, 0:7], ax = ax, vmax = vmax, rasterized=True, cbar_kws={'label': 'Visits from sensory signal'})
ax.set_ylabel('Individual Clusters')
ax.set_yticks([])
ax.set_xlabel('Hops from sensory neuron signal')
plt.savefig('cascades/cluster_plots/all_sensory_through_clusters_lvl7.pdf', format='pdf', bbox_inches='tight')
# plotting number of neurons downstream of each sensory modality (with threshold)
fig, axs = plt.subplots(
1, 1, figsize=(5, 5)
)
ax = axs
sns.heatmap(sum(num_hist_lvl7).loc[order, 0:7], ax = ax, rasterized=True, cbar_kws={'label': 'Number of Neurons Downstream'})
ax.set_ylabel('Individual Clusters')
ax.set_yticks([])
ax.set_xlabel('Hops from sensory neuron signal')
#%%
#
# checking where inputs, outputs, etc are located in these reordered clusters
# maybe supplemental figure
cluster_membership = []
for cluster_key in order:
cluster_temp = clusters[clusters.lvl7_labels==cluster_key]
cluster_dSEZ = sum(cluster_temp.dSEZ == True)
cluster_dVNC = sum(cluster_temp.dVNC == True)
cluster_RG = sum(cluster_temp.RG == True)
cluster_ORN = sum(cluster_temp.sens_subclass_ORN == True)
cluster_AN = sum(cluster_temp.sens_subclass_AN == True)
cluster_MN = sum(cluster_temp.sens_subclass_MN == True)
cluster_thermo = sum(cluster_temp.sens_subclass_thermo == True)
cluster_photo = sum((cluster_temp.sens_subclass_photoRh5 == True) | (cluster_temp.sens_subclass_photoRh6 == True))
cluster_A00c = sum(cluster_temp.A00c == True)
cluster_vtd = sum(cluster_temp.sens_subclass_vtd == True)
cluster_input = sum(cluster_temp.input == True)
cluster_output = sum(cluster_temp.output == True)
cluster_brain = sum(cluster_temp.brain_neurons == True)
cluster_all = len(cluster_temp.index)
cluster_membership.append(dict({'cluster_key': cluster_key,
'total_neurons': cluster_all, 'brain_neurons': cluster_brain/cluster_all,
'outputs': cluster_output/cluster_all, 'inputs': cluster_input/cluster_all,
'dVNC': cluster_dVNC/cluster_all, 'dSEZ': cluster_dSEZ/cluster_all,
'RG': cluster_RG/cluster_all,
'ORN': cluster_ORN/cluster_all, 'AN': cluster_AN/cluster_all,
'MN': cluster_MN/cluster_all, 'thermo': cluster_thermo/cluster_all,
'photo': cluster_photo/cluster_all, 'noci': cluster_A00c/cluster_all,
'vtd': cluster_vtd/cluster_all}))
cluster_membership =
|
pd.DataFrame(cluster_membership)
|
pandas.DataFrame
|
import os
import numpy as np
import tensorflow as tf
import gpflow
from GPcounts import branchingKernel
from GPcounts import NegativeBinomialLikelihood
from sklearn.cluster import KMeans
import scipy.stats as ss
from pathlib import Path
import pandas as pd
from gpflow.utilities import set_trainable
from tqdm import tqdm
from scipy.signal import savgol_filter
import random
import scipy as sp
from scipy import interpolate
from robustgp import ConditionalVariance
from pandas import DataFrame
from scipy.special import logsumexp
import warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore')
# Get number of cores reserved by the batch system (NSLOTS is automatically set, or use 1 if not)
NUMCORES=int(os.getenv("NSLOTS",1))
# print("Using", NUMCORES, "core(s)" )
# Create session properties
config=tf.compat.v1.ConfigProto(inter_op_parallelism_threads=NUMCORES,intra_op_parallelism_threads=NUMCORES)
tf.compat.v1.Session.intra_op_parallelism_threads = NUMCORES
tf.compat.v1.Session.inter_op_parallelism_threads = NUMCORES
class Fit_GPcounts(object):
def __init__(self,X = None,Y= None,scale = None,sparse = False,M=0,nb_scaled=False,safe_mode = False):
self.safe_mode = safe_mode
self.folder_name = 'GPcounts_models/'
self.transform = True # to use log(count+1) transformation
self.sparse = sparse # use sparse or full inference
self.nb_scaled = nb_scaled
self.X = None # time points (cell,samples, spatial location)
self.M = M # number of inducing points
self.Z = None # inducing points
self.ConditionalVariance = False # set inducing points using conditional variance from robustGP method
self.Y = None # gene expression matrix
self.Y_copy = None #copy of gene expression matrix
self.D = None # number of genes
self.N = None # number of cells
self.scale = scale
self.genes_name = None
self.cells_name = None
self.Scale = None
self.kernel = None
self.bic = None
# statistical test information
self.lik_name = None # the selected likelihood name
self.models_number = None # Total number of models to fit for single gene for selected test
self.model_index = None # index the current model
self.hyper_parameters = {} # model paramaters initialization
self.user_hyper_parameters = [None,None,None,None]# user model paramaters initialization
self.model = None # GP model information
self.var = None # GP variance of posterior predictive
self.mean = None # GP mean of posterior predictive
self.fix = False # fix hyper-parameters
# save likelihood of hyper-parameters of dynamic model to initialize the constant model
self.lik_alpha = None
self.lik_km = None
self.optimize = True # optimize or load model
self.branching = None # DE kernel or RBF kernel
self.xp = -1000. # Put branching time much earlier than zero time
# single gene information
self.y = None
self.index = None
self.global_seed = 0
self.seed_value = 0 # initialize seed
self.count_fix = 0 # counter of number of trails to resolve either local optima or failure duo to numerical issues
# check the X and Y are not missing
if (X is None) or (Y is None):
print('TypeError: GPcounts() missing 2 required positional arguments: X and Y')
else:
self.set_X_Y(X,Y)
def set_X_Y(self,X,Y):
self.seed_value = 0
np.random.seed(self.seed_value)
if X.shape[0] == Y.shape[1]:
self.X = X
self.cells_name = list(map(str,list(X.index.values)))
self.X = X.values.astype(float)
if len(self.X.shape) > 1:
self.X = self.X.reshape([-1,self.X.shape[1]])
else:
self.X = self.X.reshape([-1, 1])
if self.sparse:
if self.M == 0:
self.M = int((5*(len(self.X)))/100) # number of inducing points is 5% of length of time points
self.ConditionalVariance = True
self.Y = Y
self.genes_name = self.Y.index.values.tolist() # gene expression name
self.Y = self.Y.values # gene expression matrix
'''
if self.lik_name == 'Gaussian':
self.Y = self.Y.values # gene expression matrix
else:
self.Y = self.Y.values.astype(int)
self.Y = self.Y.astype(float)
self.Y_copy = self.Y
'''
self.Y_copy = self.Y
self.D = Y.shape[0] # number of genes
self.N = Y.shape[1] # number of cells
else:
print('InvalidArgumentError: Dimension 0 in X shape must be equal to Dimension 1 in Y, but shapes are %d and %d.' %(X.shape[0],Y.shape[1]))
def Infer_trajectory(self,lik_name= 'Negative_binomial',transform = True):
if transform == True:
self.Y = self.Y.astype(int)
self.Y = self.Y.astype(float)
self.Y_copy = self.Y
genes_index = range(self.D)
genes_results = self.run_test(lik_name,1,genes_index)
return genes_results
def One_sample_test(self,lik_name= 'Negative_binomial', transform = True):
if transform == True:
self.Y = self.Y.astype(int)
self.Y = self.Y.astype(float)
self.Y_copy = self.Y
genes_index = range(self.D)
genes_results = self.run_test(lik_name,2,genes_index)
return genes_results
def Model_selection_test(self,lik_name = 'Negative_binomial',kernel = None,transform = True):
if transform == True:
self.Y = self.Y.astype(int)
self.Y = self.Y.astype(float)
self.Y_copy = self.Y
# Run GP model for linear, periodic and rbf kernels and calculate BIC
ker_list = ['Linear','Periodic','RBF']
genes_index = range(self.D)
selection_results = pd.DataFrame()
selection_results['Gene'] = 0
selection_results['Dynamic_model_log_likelihood'] = 0
selection_results['Constant_model_log_likelihood'] = 0
selection_results['log_likelihood_ratio'] = 0
selection_results['p_value'] = 0
selection_results['q_value'] = 0
selection_results['log_likelihood_ratio'] = 0
selection_results['Model'] = 0
selection_results['BIC'] = 0
for word in ker_list:
self.kernel = word
results = self.run_test(lik_name,2,genes_index)
results['BIC'] = -2*results['Dynamic_model_log_likelihood'] + self.K*np.log(self.X.shape[0])
results['Gene'] = self.genes_name
results['Model'] = word
results['p_value'] = 1 - ss.chi2.cdf(2*results['log_likelihood_ratio'], df=1)
results['q_value']= self.qvalue(results['p_value'])
selection_results = selection_results.merge(results, how = 'outer')
# Model probability estimation based on bic based on SpatialDE:identification of spatially variable genes: https://www.nature.com/articles/nmeth.4636
tr = selection_results.groupby(['Gene','Model'])['BIC'].transform(min) == selection_results['BIC']
# select bic values for each kernel and gene
bic_values = -selection_results[tr].pivot_table(values='BIC', index='Gene', columns='Model')
restore_these_settings = np.geterr()
temp_settings = restore_these_settings.copy()
temp_settings["over"] = "ignore"
temp_settings["under"] = "ignore"
np.seterr(**temp_settings)
log_v = logsumexp(bic_values,1)
log_model_prob= (bic_values.T - log_v).T
model_prob = np.exp(log_model_prob).add_suffix('_probability')
tr = selection_results.groupby('Gene')['BIC'].transform(min) == selection_results['BIC']
selection_results_prob = selection_results[tr]
selection_results_prob = selection_results_prob.join(model_prob, on='Gene')
transfer_columns = ['p_value', 'q_value']
np.seterr(**restore_these_settings)
selection_results_prob = selection_results_prob.drop(transfer_columns,1)\
.merge(selection_results,how = 'inner')
return selection_results_prob
def Two_samples_test(self,lik_name= 'Negative_binomial',transform = True):
if transform == True:
self.Y = self.Y.astype(int)
self.Y = self.Y.astype(float)
self.Y_copy = self.Y
genes_index = range(self.D)
genes_results = self.run_test(lik_name,3,genes_index)
return genes_results
def Infer_branching_location(self, cell_labels, bins_num=50, lik_name='Negative_binomial',
branching_point=-1000,transform = True):
if transform == True:
self.Y = self.Y.astype(int)
self.Y = self.Y.astype(float)
self.Y_copy = self.Y
cell_labels = np.array(cell_labels)
self.X = np.c_[self.X, cell_labels[:, None]]
self.branching = True
self.xp = branching_point
# return self.X
genes_index = range(self.D)
log_likelihood = self.run_test(lik_name, 1, genes_index, branching=True)
self.branching_kernel_var = self.model.kernel.kern.variance.numpy()
self.branching_kernel_ls = self.model.kernel.kern.lengthscales.numpy()
# return log_likelihood
return self.infer_branching(lik_name, bins_num)
def infer_branching(self, lik_name, bins_num):
testTimes = np.linspace(min(self.X[:, 0]), max(self.X[:, 0]), bins_num, endpoint=True)
ll = np.zeros(bins_num)
models = list()
genes_index = range(self.D)
self.fix = True
X = self.X
for i in range(0, bins_num):
del self.X
# gpflow.utilities.print_summary(self.model, fmt='notebook')
del self.model
self.xp = testTimes[i]
self.X = X.copy()
self.X[np.where(self.X[:, 0] <= testTimes[i]), 1] = 1
_ = self.run_test(lik_name, 1, genes_index, branching=True)
ll[i] = self.model.log_posterior_density().numpy()
models.append(self.model)
del self.model
# Find MAP model
log_ll = np.zeros(bins_num)
i = 0
for mm in models:
log_ll[i] = mm.log_posterior_density().numpy()
i = i + 1
p = self.CalculateBranchingEvidence({'loglik': log_ll}, testTimes)
ll = p['posteriorBranching']
# tmp = -500. - max(log_ll)
# for i in range(0, bins_num):
# ll[i] = np.exp(log_ll[i] + tmp)
# normalized_ll = ll / ll.sum(0)
iMAP = np.argmax(ll)
# MAP_model = models[iMAP]
self.model = models[iMAP]
# Prediction
Xnew = np.linspace(min(self.X[:,0]), max(self.X[:,0]), 100).reshape(-1)[:, None]
x1 = np.c_[Xnew, np.ones(len(Xnew))[:, None]]
x2 = np.c_[Xnew, (np.ones(len(Xnew)) * 2)[:, None]]
Xtest = np.concatenate((x1, x2))
Xtest[np.where(Xtest[:, 0] <= self.model.kernel.xp), 1] = 1
if self.lik_name == 'Gaussian':
mu, var = self.model.predict_y(Xtest)
else:
mu, var = self.samples_posterior_predictive_distribution(Xtest)
del models
self.branching = False
return {'geneName':self.genes_name,
'branching_probability':ll,
'branching_location':self.model.kernel.xp,
'mean': mu,
'variance':var,
'Xnew':Xnew,
'test_times':testTimes,
'MAP_model':self.model,
'loglik':log_ll,
'logBayesFactor':p['logBayesFactor'],
'likelihood':self.lik_name}
def CalculateBranchingEvidence(self, d, Bsearch):
"""
:param d: output dictionary from FitModel
:param Bsearch: candidate list of branching points
:return: posterior probability of branching at each point and log Bayes factor
of branching vs not branching
"""
# Calculate probability of branching at each point
# o = d['loglik'][:-1]
o = d['loglik']
pn = np.exp(o - np.max(o))
p = pn / pn.sum() # normalize
# Calculate log likelihood ratio by averaging out
o = d['loglik']
Nb = o.size - 1
if Nb != len(Bsearch) - 1:
raise NameError('Passed in wrong length of Bsearch is %g- should be %g' % (len(Bsearch), Nb))
obj = o[:-1]
illmax = np.argmax(obj)
llmax = obj[illmax]
lratiostable = llmax + np.log(1 + np.exp(obj[np.arange(obj.size) != illmax] - llmax).sum()) - o[-1] - np.log(Nb)
return {'posteriorBranching': p, 'logBayesFactor': lratiostable}
def calculate_FDR(self,genes_results):
genes_results['p_value'] = 1 - ss.chi2.cdf(2*genes_results['log_likelihood_ratio'], df=1)
genes_results['q_value']= self.qvalue(genes_results['p_value'])
return genes_results
'''
def set_inducing_points_locations(self,Z):
self.Z = Z
self.M = self.Z.shape[0]
'''
def kmean_algorithm_inducing_points(self,M = 0):
if M != 0:
self.M = M
self.ConditionalVariance = False
# set inducing points by K-mean cluster algorithm
kmeans = KMeans(n_clusters= self.M).fit(self.X)
self.Z = kmeans.cluster_centers_
self.Z = np.sort(self.Z,axis=None).reshape([self.M,1])
self.Z = self.Z.reshape([self.Z.shape[0],1])
# Run the selected test and get likelihoods for all genes
def run_test(self,lik_name,models_number,genes_index,branching = False):
genes_results = {}
genes_state = {}
self.Y = self.Y_copy
self.models_number = models_number
self.lik_name = lik_name
self.optimize = True
#column names for likelihood dataframe
if self.models_number == 1:
column_name = ['Dynamic_model_log_likelihood']
elif self.models_number == 2:
column_name = ['Dynamic_model_log_likelihood','Constant_model_log_likelihood','log_likelihood_ratio']
else:
column_name = ['Shared_log_likelihood','model_1_log_likelihood','model_2_log_likelihood','log_likelihood_ratio']
for self.index in tqdm(genes_index):
self.y = self.Y[self.index].astype(float)
self.y = self.y.reshape([-1,1])
results = self.fit_single_gene(column_name)
genes_results[self.genes_name[self.index]] = results
return pd.DataFrame.from_dict(genes_results, orient='index', columns= column_name)
# fit numbers of GPs = models_number to run the selected test
def fit_single_gene(self,column_name,reset =False):
if self.models_number == 1:
col_name = 0
else:
col_name = 2
self.model_index = 1
model_1_log_likelihood = self.fit_model()
results = [model_1_log_likelihood]
if self.models_number == 2:
if not(np.isnan(model_1_log_likelihood)):
if self.lik_name == 'Negative_binomial':
self.lik_alpha = self.model.likelihood.alpha.numpy()
if self.lik_name == 'Zero_inflated_negative_binomial':
self.lik_km = self.model.likelihood.km.numpy()
self.lik_alpha = self.model.likelihood.alpha.numpy()
self.model_index = 2
model_2_log_likelihood= self.fit_model()
if not(np.isnan(model_2_log_likelihood)):
ll_ratio = model_1_log_likelihood - model_2_log_likelihood
if np.isnan(model_1_log_likelihood) or np.isnan(model_2_log_likelihood):
model_2_log_likelihood = np.nan
ll_ratio = np.nan
results = [model_1_log_likelihood,model_2_log_likelihood,ll_ratio]
if self.models_number == 3:
X_df =
|
pd.DataFrame(data=self.X,index= self.cells_name,columns= ['times'])
|
pandas.DataFrame
|
"""Main class and helper functions.
"""
import os
from enum import Enum
from collections import OrderedDict
from functools import reduce
from pathlib import Path
from typing import Any, Union, Optional
from typing import Iterable, Sized, Sequence, Mapping, MutableMapping
from typing import Tuple, List, Dict, KeysView
from copy import deepcopy
import numpy as np
from numpy import ma
import pandas as pd
from numpy.lib.recfunctions import rec_drop_fields
from pandas.core.index import RangeIndex
from pandas.api.types import is_string_dtype, is_categorical
from scipy import sparse
from scipy.sparse import issparse
from scipy.sparse.sputils import IndexMixin
from natsort import natsorted
# try importing zarr
try:
from zarr.core import Array as ZarrArray
except ImportError:
class ZarrArray:
@staticmethod
def __rep__():
return 'mock zarr.core.Array'
# try importing zappy
try:
from zappy.base import ZappyArray
except ImportError:
class ZappyArray:
@staticmethod
def __rep__():
return 'mock zappy.base.ZappyArray'
from . import h5py
from .layers import AnnDataLayers
from . import utils
from .utils import Index, get_n_items_idx
from .logging import anndata_logger as logger
from .compat import PathLike
class StorageType(Enum):
Array = np.ndarray
Masked = ma.MaskedArray
Sparse = sparse.spmatrix
ZarrArry = ZarrArray
ZappyArry = ZappyArray
@classmethod
def classes(cls):
print(ZarrArray)
return tuple(c.value for c in cls.__members__.values())
class BoundRecArr(np.recarray):
"""A :class:`numpy.recarray` to which fields can be added using ``.['key']``.
To enable this, it is bound to a instance of AnnData.
"""
_attr_choices = ['obsm', 'varm']
def __new__(cls, input_array: np.ndarray, parent: Any, attr: str):
"""
Parameters
----------
input_array
A (structured) numpy array.
parent
Any object to which the BoundRecArr shall be bound to.
attr
The name of the attribute as which it appears in parent.
"""
arr = np.asarray(input_array).view(cls)
arr._parent = parent
arr._attr = attr
return arr
def __array_finalize__(self, obj: Any):
if obj is None: return
self._parent = getattr(obj, '_parent', None)
self._attr = getattr(obj, '_attr', None)
def __reduce__(self) -> Tuple[Dict[str, Any], Dict[str, Any], Dict[str, Any]]:
pickled_state = super().__reduce__()
new_state = pickled_state[2] + (self.__dict__, )
return pickled_state[0], pickled_state[1], new_state
def __setstate__(self, state: Sequence[Mapping[str, Any]]):
for k, v in state[-1].items():
self.__setattr__(k, v)
super().__setstate__(state[0:-1])
def copy(self, order='C') -> 'BoundRecArr':
new = super().copy()
new._parent = self._parent
return new
def flipped(self) -> 'BoundRecArr':
new_attr = (self._attr_choices[1] if self._attr == self._attr_choices[0]
else self._attr_choices[0])
return BoundRecArr(self, self._parent, new_attr)
def keys(self) -> Tuple[str, ...]:
return self.dtype.names
def __setitem__(self, key: str, arr: np.ndarray):
if not isinstance(arr, np.ndarray):
raise ValueError(
'Can only assign numpy ndarrays to .{}[{!r}], not objects of class {}'
.format(self._attr, key, type(arr))
)
if arr.ndim == 1:
raise ValueError('Use adata.obs or adata.var for 1-dimensional arrays.')
if self.shape[0] != arr.shape[0]:
raise ValueError(
'Can only assign an array of same length ({}), not of length {}.'
.format(self.shape[0], arr.shape[0])
)
# the following always allocates a new array
# even if the key already exists and dimensions match
# TODO: one could check for this case
# dtype
merged_dtype = []
found_key = False
for descr in self.dtype.descr:
if descr[0] == key:
merged_dtype.append((key, arr.dtype, arr.shape[1]))
found_key = True
else:
merged_dtype.append(descr)
if not found_key:
merged_dtype.append((key, arr.dtype, arr.shape[1]))
# create new array
new = np.empty(len(self), dtype=merged_dtype)
# fill the array
for name in new.dtype.names:
if name == key:
new[name] = arr
else:
new[name] = self[name]
# make it a BoundRecArr
# TODO: why can we not do this step before filling the array?
new = BoundRecArr(new, self._parent, self._attr)
setattr(self._parent, self._attr, new)
def __delitem__(self, key: str):
"""Delete field with name."""
if key not in self.dtype.names:
raise ValueError(
'Currently, can only delete single names from {}.'
.format(self.dtype.names)
)
new_array = rec_drop_fields(self, key)
new = BoundRecArr(new_array, self._parent, self._attr)
setattr(self._parent, self._attr, new)
def to_df(self) -> pd.DataFrame:
"""Convert to pandas dataframe."""
df = pd.DataFrame(index=RangeIndex(0, self.shape[0], name=None))
for key in self.keys():
value = self[key]
for icolumn, column in enumerate(value.T):
df['{}{}'.format(key, icolumn+1)] = column
return df
# for backwards compat
def _find_corresponding_multicol_key(key, keys_multicol):
"""Find the corresponding multicolumn key."""
for mk in keys_multicol:
if key.startswith(mk) and 'of' in key:
return mk
return None
# for backwards compat
def _gen_keys_from_multicol_key(key_multicol, n_keys):
"""Generates single-column keys from multicolumn key."""
keys = [('{}{:03}of{:03}')
.format(key_multicol, i+1, n_keys) for i in range(n_keys)]
return keys
def df_to_records_fixed_width(df, var_len_str=True):
uns = {} # unstructured dictionary for storing categories
names = ['index']
if is_string_dtype(df.index):
if var_len_str:
index = df.index.values.astype(h5py.special_dtype(vlen=str))
else:
max_len_index = 0 if 0 in df.shape else df.index.map(len).max()
index = df.index.values.astype('S{}'.format(max_len_index))
else:
index = df.index.values
arrays = [index]
for k in df.columns:
names.append(k)
if is_string_dtype(df[k]) and not is_categorical(df[k]):
if var_len_str:
arrays.append(df[k].values.astype(h5py.special_dtype(vlen=str)))
else:
lengths = df[k].map(len)
if is_categorical(lengths): lengths = lengths.cat.as_ordered()
arrays.append(df[k].values.astype('S{}'.format(lengths.max())))
elif is_categorical(df[k]):
uns[k + '_categories'] = df[k].cat.categories
arrays.append(df[k].cat.codes)
else:
arrays.append(df[k].values)
formats = [v.dtype for v in arrays]
return np.rec.fromarrays(
arrays,
dtype={'names': names, 'formats': formats}), uns
def _check_2d_shape(X):
"""Check shape of array or sparse matrix.
Assure that X is always 2D: Unlike numpy we always deal with 2D arrays.
"""
if X.dtype.names is None and len(X.shape) != 2:
raise ValueError('X needs to be 2-dimensional, not '
'{}-dimensional.'.format(len(X.shape)))
def _normalize_index(index, names):
if not isinstance(names, RangeIndex):
assert names.dtype != float and names.dtype != int, \
'Don’t call _normalize_index with non-categorical/string names'
# the following is insanely slow for sequences, we replaced it using pandas below
def name_idx(i):
if isinstance(i, str):
# `where` returns an 1-tuple (1D array) of found indices
i_found = np.where(names == i)[0]
if len(i_found) == 0: # returns array of length 0 if nothing is found
raise IndexError(
'Key "{}" is not valid observation/variable name/index.'
.format(i))
i = i_found[0]
return i
if isinstance(index, slice):
start = name_idx(index.start)
stop = name_idx(index.stop)
# string slices can only be inclusive, so +1 in that case
if isinstance(index.stop, str):
stop = None if stop is None else stop + 1
step = index.step
return slice(start, stop, step)
elif isinstance(index, (int, str)):
return name_idx(index)
elif isinstance(index, (Sequence, np.ndarray, pd.Index)):
# here, we replaced the implementation based on name_idx with this
# incredibly faster one
positions = pd.Series(index=names, data=range(len(names)))
positions = positions[index]
if positions.isnull().values.any():
raise KeyError(
'Indices "{}" contain invalid observation/variables names/indices.'
.format(index))
return positions.values
else:
raise IndexError('Unknown index {!r} of type {}'
.format(index, type(index)))
def _gen_dataframe(anno, length, index_names):
if isinstance(anno, pd.DataFrame):
return anno
if anno is None or len(anno) == 0:
_anno = pd.DataFrame(index=RangeIndex(0, length, name=None).astype(str))
else:
for index_name in index_names:
if index_name in anno:
_anno = pd.DataFrame(
anno, index=anno[index_name],
columns=[k for k in anno.keys() if k != index_name])
break
else:
_anno = pd.DataFrame(anno, index=RangeIndex(0, length, name=None).astype(str))
return _anno
class AnnDataFileManager:
"""Backing file manager for AnnData.
"""
def __init__(
self,
adata: 'AnnData',
filename: Optional[PathLike] = None,
filemode: Optional[str] = None,
):
self._adata = adata
self.filename = filename
self._filemode = filemode
self._file = None
if filename:
self.open()
def __repr__(self) -> str:
if self.filename is None:
return 'Backing file manager: no file is set.'
else:
return 'Backing file manager of file {}.'.format(self.filename)
def __getitem__(self, key: str) -> Union[h5py.Group, h5py.Dataset, h5py.SparseDataset]:
return self._file[key]
def __setitem__(self, key: str, value: Union[h5py.Group, h5py.Dataset, h5py.SparseDataset]):
self._file[key] = value
def __delitem__(self, key: str):
del self._file[key]
@property
def filename(self) -> Path:
return self._filename
@filename.setter
def filename(self, filename: Optional[PathLike]):
self._filename = None if filename is None else Path(filename)
def open(
self,
filename: Optional[PathLike] = None,
filemode: Optional[str] = None,
):
if filename is not None:
self.filename = filename
if filemode is not None:
self._filemode = filemode
if self.filename is None:
raise ValueError(
'Cannot open backing file if backing not initialized.')
self._file = h5py.File(self.filename, self._filemode, force_dense=True)
def close(self):
"""Close the backing file, remember filename, do *not* change to memory mode."""
if self._file is not None:
self._file.close()
def _to_memory_mode(self):
"""Close the backing file, forget filename, *do* change to memory mode."""
self._adata.__X = self._adata.X[()]
self._file.close()
self._file = None
self._filename = None
@property
def isopen(self) -> bool:
"""State of backing file."""
if self._file is None:
return False
# try accessing the id attribute to see if the file is open
return bool(self._file.id)
def _init_actual_AnnData(adata_view):
if adata_view.isbacked:
raise ValueError(
'You cannot modify elements of an AnnData view, '
'but need a copy of the subset.\n\n'
'Call `adata_subset = adata[index].copy(filename=...)`.')
adata_view._init_as_actual(adata_view.copy())
class _SetItemMixin:
def __setitem__(self, idx: Any, value: Any):
if self._view_args is None:
super().__setitem__(idx, value)
else:
adata_view, attr_name = self._view_args
_init_actual_AnnData(adata_view)
getattr(adata_view, attr_name)[idx] = value
class _ViewMixin(_SetItemMixin):
def __init__(self, *args, view_args: Tuple['AnnData', str] = None, **kwargs):
self._view_args = view_args
super().__init__(*args, **kwargs)
class ArrayView(_SetItemMixin, np.ndarray):
def __new__(
cls,
input_array: Sequence[Any],
view_args: Tuple['AnnData', str] = None,
):
arr = np.asarray(input_array).view(cls)
arr._view_args = view_args
return arr
def __array_finalize__(self, obj: Optional[np.ndarray]):
if obj is None: return
self._view_args = getattr(obj, '_view_args', None)
def keys(self) -> KeysView[str]:
# it's a structured array
return self.dtype.names
def copy(self, order: str = 'C') -> np.ndarray:
# we want a conventional array
return np.array(self)
def toarray(self) -> np.ndarray:
return self.copy()
class SparseCSRView(_ViewMixin, sparse.csr_matrix):
pass
class SparseCSCView(_ViewMixin, sparse.csc_matrix):
pass
class DictView(_ViewMixin, dict):
pass
class DataFrameView(_ViewMixin, pd.DataFrame):
_metadata = ['_view_args']
class Raw(IndexMixin):
def __init__(
self,
adata: Optional['AnnData'] = None,
X: Union[np.ndarray, sparse.spmatrix, None] = None,
var: Optional[BoundRecArr] = None,
varm: Optional[BoundRecArr] = None,
):
self._adata = adata
self._n_obs = adata.n_obs
if X is not None:
self._X = X
self._var = var
self._varm = varm
else:
self._X = None if adata.isbacked else adata.X.copy()
self._var = adata.var.copy()
self._varm = adata.varm.copy()
@property
def X(self):
if self._adata.isbacked:
if not self._adata.file.isopen: self._adata.file.open()
X = self._adata.file['raw.X']
if self._adata.isview: return X[self._adata._oidx, self._adata._vidx]
else: return X
else:
if self.n_obs == 1 and self.n_vars == 1:
return self._X[0, 0]
elif self.n_obs == 1 or self.n_vars == 1:
X = self._X
if issparse(self._X): X = self._X.toarray()
return X.flatten()
else:
return self._X
@property
def shape(self):
return self.X.shape
@property
def var(self):
return self._var
@property
def n_vars(self):
return self._var.shape[0]
@property
def n_obs(self):
return self._n_obs
@property
def varm(self):
return self._varm
@property
def var_names(self):
return self.var.index
def __getitem__(self, index):
oidx, vidx = self._normalize_indices(index)
if self._adata is not None or not self._adata.isbacked: X = self._X[oidx, vidx]
else: X = self._adata.file['raw.X'][oidx, vidx]
if isinstance(vidx, (int, np.int64)): vidx = slice(vidx, vidx+1, 1)
var = self._var.iloc[vidx]
if self._varm is not None:
varm = self._varm[vidx]
else:
varm = None
return Raw(self._adata, X=X, var=var, varm=varm)
def copy(self):
return Raw(self._adata, X=self._X.copy(), var=self._var.copy(),
varm=None if self._varm is None else self._varm.copy())
def _normalize_indices(self, packed_index):
# deal with slicing with pd.Series
if isinstance(packed_index, pd.Series):
packed_index = packed_index.values
if isinstance(packed_index, tuple):
if len(packed_index) != 2:
raise IndexDimError(len(packed_index))
if isinstance(packed_index[1], pd.Series):
packed_index = packed_index[0], packed_index[1].values
if isinstance(packed_index[0], pd.Series):
packed_index = packed_index[0].values, packed_index[1]
obs, var = super()._unpack_index(packed_index)
obs = _normalize_index(obs, self._adata.obs_names)
var = _normalize_index(var, self.var_names)
return obs, var
INDEX_DIM_ERROR_MSG = 'You tried to slice an AnnData(View) object with an' \
'{}-dimensional index, but only 2 dimensions exist in such an object.'
INDEX_DIM_ERROR_MSG_1D = '\nIf you tried to slice cells using adata[cells, ], ' \
'be aware that Python (unlike R) uses adata[cells, :] as slicing syntax.'
class IndexDimError(IndexError):
def __init__(self, n_dims):
msg = INDEX_DIM_ERROR_MSG.format(n_dims)
if n_dims == 1:
msg += INDEX_DIM_ERROR_MSG_1D
super().__init__(msg)
class AnnData(IndexMixin, metaclass=utils.DeprecationMixinMeta):
"""An annotated data matrix.
:class:`~anndata.AnnData` stores a data matrix :attr:`X` together with annotations
of observations :attr:`obs`, variables :attr:`var` and unstructured annotations :attr:`uns`.
.. figure:: https://falexwolf.de/img/scanpy/anndata.svg
:width: 350px
An :class:`~anndata.AnnData` object ``adata`` can be sliced like a pandas
dataframe, for instance, ``adata_subset = adata[:, list_of_variable_names]``.
:class:`~anndata.AnnData`'s basic structure is similar to R's ExpressionSet
[Huber15]_. If setting an ``.h5ad``-formatted HDF5 backing file ``.filename``,
data remains on the disk but is automatically loaded into memory if needed.
See this `blog post`_ for more details.
.. _blog post: http://falexwolf.de/blog/171223_AnnData_indexing_views_HDF5-backing/
Parameters
----------
X
A #observations × #variables data matrix. A view of the data is used if the
data type matches, otherwise, a copy is made.
obs
Key-indexed one-dimensional observations annotation of length #observations.
var
Key-indexed one-dimensional variables annotation of length #variables.
uns
Key-index unstructured annotation.
obsm
Key-indexed multi-dimensional observations annotation of length #observations.
If passing a :class:`~numpy.ndarray`, it needs to have a structured datatype.
varm
Key-indexed multi-dimensional variables annotation of length #variables.
If passing a :class:`~numpy.ndarray`, it needs to have a structured datatype.
dtype
Data type used for storage.
shape
Shape tuple (#observations, #variables). Can only be provided if ``X`` is ``None``.
filename
Name of backing file. See :class:`anndata.h5py.File`.
filemode
Open mode of backing file. See :class:`anndata.h5py.File`.
layers
Dictionary with keys as layers' names and values as matrices of the same dimensions as X.
See Also
--------
read_h5ad
read_csv
read_excel
read_hdf
read_loom
read_zarr
read_mtx
read_text
read_umi_tools
Notes
-----
Multi-dimensional annotations are stored in :attr:`obsm` and :attr:`varm`.
Indexing into an AnnData object with a numeric is supposed to be positional,
like pandas’ :attr:`~pandas.DataFrame.iloc` accessor, while indexing with a string/categorical is
supposed to behave like :attr:`~pandas.DataFrame.loc`.
If the unstructured annotations :attr:`uns` contain a sparse matrix of shape
:attr:`n_obs` × :attr:`n_obs`, these are sliced when calling ``[]``.
A data matrix is flattened if either :attr:`n_obs` or :attr:`n_vars` is 1, so that
numpy's slicing behavior is reproduced::
adata = AnnData(np.ones((2, 2)))
adata[:, 0].X == adata.X[:, 0]
:class:`~anndata.AnnData` stores observations (samples) of variables
(features) in the rows of a matrix. This is the convention of the modern
classics of statistics [Hastie09]_ and machine learning [Murphy12]_, the
convention of dataframes both in R and Python and the established statistics
and machine learning packages in Python (statsmodels_, scikit-learn_).
.. _statsmodels: http://www.statsmodels.org/stable/index.html
.. _scikit-learn: http://scikit-learn.org/
"""
_BACKED_ATTRS = ['X', 'raw.X']
# backwards compat
_H5_ALIASES = {
'X': {'X', '_X', 'data', '_data'},
'obs': {'obs', '_obs', 'smp', '_smp'},
'var': {'var', '_var'},
'uns': {'uns'},
'obsm': {'obsm', '_obsm', 'smpm', '_smpm'},
'varm': {'varm', '_varm'},
'layers': {'layers', '_layers'},
}
_H5_ALIASES_NAMES = {
'obs': {'obs_names', 'smp_names', 'row_names', 'index'},
'var': {'var_names', 'col_names', 'index'},
}
def __init__(
self,
X: Optional[Union[np.ndarray, sparse.spmatrix, pd.DataFrame]] = None,
obs: Optional[Union[pd.DataFrame, Mapping[str, Iterable[Any]]]] = None,
var: Optional[Union[pd.DataFrame, Mapping[str, Iterable[Any]]]] = None,
uns: Optional[Mapping[str, Any]] = None,
obsm: Optional[Union[np.ndarray, Mapping[str, Sequence[Any]]]] = None,
varm: Optional[Union[np.ndarray, Mapping[str, Sequence[Any]]]] = None,
layers: Optional[Mapping[str, Union[np.ndarray, sparse.spmatrix]]] = None,
raw: Optional[Raw] = None,
dtype: Union[np.dtype, str] = 'float32',
shape: Optional[Tuple[int, int]] = None,
filename: Optional[PathLike] = None,
filemode: Optional[str] = None,
asview: bool = False,
*, oidx: Index = None, vidx: Index = None):
if asview:
if not isinstance(X, AnnData):
raise ValueError('`X` has to be an AnnData object.')
self._init_as_view(X, oidx, vidx)
else:
self._init_as_actual(
X=X, obs=obs, var=var, uns=uns,
obsm=obsm, varm=varm, raw=raw,
layers=layers,
dtype=dtype, shape=shape,
filename=filename, filemode=filemode)
def _init_as_view(self, adata_ref: 'AnnData', oidx: Index, vidx: Index):
if adata_ref.isbacked and adata_ref.isview:
raise ValueError(
'Currently, you cannot index repeatedly into a backed AnnData, '
'that is, you cannot make a view of a view.')
self._isview = True
self._adata_ref = adata_ref
self._oidx = oidx
self._vidx = vidx
# the file is the same as of the reference object
self.file = adata_ref.file
# views on attributes of adata_ref
oidx_normalized, vidx_normalized = oidx, vidx
if isinstance(oidx, (int, np.int64)): oidx_normalized = slice(oidx, oidx+1, 1)
if isinstance(vidx, (int, np.int64)): vidx_normalized = slice(vidx, vidx+1, 1)
obs_sub = adata_ref.obs.iloc[oidx_normalized]
var_sub = adata_ref.var.iloc[vidx_normalized]
self._obsm = ArrayView(adata_ref.obsm[oidx_normalized], view_args=(self, 'obsm'))
self._varm = ArrayView(adata_ref.varm[vidx_normalized], view_args=(self, 'varm'))
# hackish solution here, no copy should be necessary
uns_new = deepcopy(self._adata_ref._uns)
# need to do the slicing before setting the updated self._n_obs, self._n_vars
self._n_obs = self._adata_ref.n_obs # use the original n_obs here
self._slice_uns_sparse_matrices_inplace(uns_new, self._oidx)
# fix _n_obs, _n_vars
if isinstance(oidx, slice):
self._n_obs = get_n_items_idx(obs_sub.index, adata_ref.n_obs)
elif isinstance(oidx, (int, np.int64)):
self._n_obs = 1
elif isinstance(oidx, Sized):
self._n_obs = get_n_items_idx(oidx, adata_ref.n_obs)
else:
raise KeyError('Unknown Index type')
if isinstance(vidx, slice):
self._n_vars = get_n_items_idx(var_sub.index, adata_ref.n_vars)
elif isinstance(vidx, (int, np.int64)):
self._n_vars = 1
elif isinstance(vidx, Sized):
self._n_vars = get_n_items_idx(vidx, adata_ref.n_vars)
else:
raise KeyError('Unknown Index type')
# fix categories
self._remove_unused_categories(adata_ref.obs, obs_sub, uns_new)
self._remove_unused_categories(adata_ref.var, var_sub, uns_new)
# set attributes
self._obs = DataFrameView(obs_sub, view_args=(self, 'obs'))
self._var = DataFrameView(var_sub, view_args=(self, 'var'))
self._uns = DictView(uns_new, view_args=(self, 'uns'))
# set data
if self.isbacked:
self._X = None
else:
self._init_X_as_view()
self._layers = AnnDataLayers(self, adata_ref=adata_ref, oidx=oidx, vidx=vidx)
# set raw, easy, as it's immutable anyways...
if adata_ref._raw is not None:
# slicing along variables axis is ignored
self._raw = adata_ref.raw[oidx]
else:
self._raw = None
def _init_X_as_view(self):
if self._adata_ref.X is None:
self._X = None
return
X = self._adata_ref._X[self._oidx, self._vidx]
if isinstance(X, sparse.csr_matrix):
self._X = SparseCSRView(X, view_args=(self, 'X'))
elif isinstance(X, sparse.csc_matrix):
self._X = SparseCSCView(X, view_args=(self, 'X'))
elif issparse(X):
raise ValueError('View on non-csr/csc sparse matrices not implemented.')
elif isinstance(X, ZappyArray): # ZappyArray acts as a view itself
self._X = X
else:
shape = (
get_n_items_idx(self._oidx, self._adata_ref.n_obs),
get_n_items_idx(self._vidx, self._adata_ref.n_vars)
)
if np.isscalar(X):
X = X.view()
self._X = ArrayView(X.reshape(shape), view_args=(self, 'X'))
def _init_as_actual(
self, X=None, obs=None, var=None, uns=None,
obsm=None, varm=None, raw=None, layers=None,
dtype='float32', shape=None,
filename=None, filemode=None):
from .readwrite.read import _read_args_from_h5ad
# view attributes
self._isview = False
self._adata_ref = None
self._oidx = None
self._vidx = None
# ----------------------------------------------------------------------
# various ways of initializing the data
# ----------------------------------------------------------------------
# init from file
if filename is not None:
if any((X, obs, var, uns, obsm, varm)):
raise ValueError(
'If initializing from `filename`, '
'no further arguments may be passed.')
self.file = AnnDataFileManager(self, filename, filemode)
X, obs, var, uns, obsm, varm, layers, raw = _read_args_from_h5ad(self, mode=filemode)
if X is not None:
# this is not a function that a user would use, hence it's fine to set the dtype
dtype = X.dtype.name
else:
self.file = AnnDataFileManager(self, None)
# init from AnnData
if isinstance(X, AnnData):
if any((obs, var, uns, obsm, varm)):
raise ValueError(
'If `X` is a dict no further arguments must be provided.')
X, obs, var, uns, obsm, varm, layers, raw = X._X, X.obs, X.var, X.uns, X.obsm, X.varm, X.layers, X.raw
# init from DataFrame
elif isinstance(X, pd.DataFrame):
obs = pd.DataFrame(index=X.index)
var = pd.DataFrame(index=X.columns)
X = X.values
# ----------------------------------------------------------------------
# actually process the data
# ----------------------------------------------------------------------
# check data type of X
if X is not None:
for s_type in StorageType:
if isinstance(X, s_type.value):
break
else:
class_names = ', '.join(c.__name__ for c in StorageType.classes())
raise ValueError('`X` needs to be of one of {}, not {}.'
.format(class_names, type(X)))
if shape is not None:
raise ValueError('`shape` needs to be `None` is `X` is not `None`.')
_check_2d_shape(X)
# if type doesn't match, a copy is made, otherwise, use a view
if issparse(X) or isinstance(X, ma.MaskedArray):
# TODO: maybe use view on data attribute of sparse matrix
# as in readwrite.read_10x_h5
if X.dtype != np.dtype(dtype): X = X.astype(dtype)
elif isinstance(X, ZarrArray):
X = X.astype(dtype)
else: # is np.ndarray
X = X.astype(dtype, copy=False)
# data matrix and shape
self._X = X
self._n_obs, self._n_vars = self._X.shape
else:
self._X = None
self._n_obs = len([] if obs is None else obs)
self._n_vars = len([] if var is None else var)
# check consistency with shape
if shape is not None:
if self._n_obs == 0:
self._n_obs = shape[0]
else:
if self._n_obs != shape[0]:
raise ValueError('`shape` is inconsistent with `obs`')
if self._n_vars == 0:
self._n_vars = shape[1]
else:
if self._n_vars != shape[1]:
raise ValueError('`shape` is inconsistent with `var`')
# annotations
self._obs = _gen_dataframe(obs, self._n_obs,
['obs_names', 'row_names', 'smp_names'])
self._var = _gen_dataframe(var, self._n_vars, ['var_names', 'col_names'])
# unstructured annotations
self._uns = uns or OrderedDict()
# multi-dimensional array annotations
if obsm is None:
try:
obsm = np.empty(self._n_obs, dtype=[])
except TypeError:
raise TypeError(
'TypeError: Empty data-type'
'--> try installing a more recent numpy version: '
' pip install numpy --upgrade')
if varm is None: varm = np.empty(self._n_vars, dtype=[])
# deal with dictionaries
if isinstance(obsm, Mapping):
obsm = utils.convert_dictionary_to_structured_array(obsm)
if isinstance(varm, Mapping):
varm = utils.convert_dictionary_to_structured_array(varm)
self._obsm = BoundRecArr(obsm, self, 'obsm')
self._varm = BoundRecArr(varm, self, 'varm')
self._check_dimensions()
self._check_uniqueness()
# raw
if raw is None:
self._raw = None
else:
if isinstance(raw, Raw):
self._raw = raw
else:
# is dictionary from reading the file, nothing that is meant for a user
shape = self.file['raw.X'].shape if self.isbacked else raw['X'].shape
self._raw = Raw(
self,
X=raw['X'],
var=_gen_dataframe(raw['var'], shape[1], ['var_names', 'col_names']),
varm=raw['varm'] if 'varm' in raw else None)
# clean up old formats
self._clean_up_old_format(uns)
# layers
self._layers = AnnDataLayers(self, layers, dtype)
def __sizeof__(self) -> int:
size = 0
for attr in ['_X', '_obs', '_var', '_uns', '_obsm', '_varm']:
s = getattr(self, attr).__sizeof__()
size += s
return size
def _gen_repr(self, n_obs, n_vars) -> str:
if self.isbacked:
backed_at = 'backed at \'{}\''.format(self.filename)
else:
backed_at = ''
descr = (
'AnnData object with n_obs × n_vars = {} × {} {}'
.format(n_obs, n_vars, backed_at))
for attr in ['obs', 'var', 'uns', 'obsm', 'varm', 'layers']:
keys = getattr(self, attr).keys()
if len(keys) > 0:
descr += '\n {}: {}'.format(attr, str(list(keys))[1:-1])
return descr
def __repr__(self) -> str:
if self.isview:
return 'View of ' + self._gen_repr(self.n_obs, self.n_vars)
else:
return self._gen_repr(self.n_obs, self.n_vars)
@property
def shape(self) -> Tuple[int, int]:
r"""Shape of data matrix (:attr:`n_obs`, :attr:`n_vars`)."""
return self.n_obs, self.n_vars
@property
def X(self) -> Optional[Union[np.ndarray, sparse.spmatrix, ArrayView]]:
"""Data matrix of shape :attr:`n_obs` × :attr:`n_vars`."""
if self.isbacked:
if not self.file.isopen: self.file.open()
X = self.file['X']
if self.isview:
X = X[self._oidx, self._vidx]
return X
else:
if self.n_obs == 1 and self.n_vars == 1:
return self._X[0, 0]
elif self.n_obs == 1 or self.n_vars == 1:
X = self._X
if issparse(self._X): X = self._X.toarray()
return X.flatten()
else:
return self._X
@X.setter
def X(self, value: Optional[Union[np.ndarray, sparse.spmatrix]]):
if value is None:
if self.isview:
raise ValueError('Copy the view before setting the data matrix to `None`.')
if self.isbacked:
raise ValueError('Not implemented.')
self._X = None
return
var_get = self.n_vars == 1 and self.n_obs == len(value)
obs_get = self.n_obs == 1 and self.n_vars == len(value)
if var_get or obs_get or self.shape == value.shape:
if self.isbacked:
if self.isview:
self.file['X'][self._oidx, self._vidx] = value
else:
self._set_backed('X', value)
else:
if self.isview:
# exit the view if we go from sparse to dense
if (
issparse(value) and not issparse(self._adata_ref._X)
or not issparse(value) and issparse(self._adata_ref._X)
):
self._init_as_actual(self.copy())
self._X = value
else:
self._adata_ref._X[self._oidx, self._vidx] = value
self._init_X_as_view()
else:
self._X = value
else:
raise ValueError('Data matrix has wrong shape {}, need to be {}'
.format(value.shape, self.shape))
@property
def layers(self) -> AnnDataLayers:
"""Dictionary-like object with values of the same dimensions as :attr:`X`.
Layers in AnnData have API similar to loompy :ref:`loomlayers`.
Return the layer named ``"unspliced"``::
adata.layers["unspliced"]
Create or replace the ``"spliced"`` layer::
adata.layers["spliced"] = ...
Assign the 10th column of layer ``"spliced"`` to the variable a::
a = adata.layers["spliced"][:, 10]
Delete the ``"spliced"`` layer::
del adata.layers["spliced"]
Return layers’ names::
adata.layers.keys()
.. warning::
If AnnData is a view, setting subsets of layers modifies the original data.
"""
return self._layers
@property
def raw(self) -> Raw:
"""Store raw version of :attr:`X` and :attr:`var` as ``.raw.X`` and ``.raw.var``.
The :attr:`raw` attribute is initialized with the current content of an object by setting::
adata.raw = adata
Its content can be deleted by setting it back to ``None``::
adata.raw = None
Upon slicing an AnnData object along the observations (row) axis,
:attr:`raw` is also sliced. Slicing an AnnData object along the variables
(columns) axis, leaves :attr:`raw` unaffected. Note that you can call::
adata.raw[:, 'orig_variable_name'].X
to retrieve the data associated with a variable that might have been
filtered out or "compressed away" in :attr:`X`.
"""
return self._raw
@raw.setter
def raw(self, value: Optional['AnnData']):
if not (isinstance(value, AnnData) or value is None):
raise ValueError(
'Can only init raw attribute with an AnnData object or `None`.')
if value is None:
self._raw = None
else:
if self.isview:
self._init_as_actual(self.copy())
self._raw = Raw(value)
@property
def n_obs(self) -> int:
"""Number of observations."""
return self._n_obs
@property
def n_vars(self) -> int:
"""Number of variables/features."""
return self._n_vars
@property
def obs(self) -> pd.DataFrame:
"""One-dimensional annotation of observations (`pd.DataFrame`)."""
return self._obs
@obs.setter
def obs(self, value: pd.DataFrame):
if not isinstance(value, pd.DataFrame):
raise ValueError('Can only assign pd.DataFrame.')
if len(value) != self.n_obs:
raise ValueError('Length does not match.')
utils.warn_no_string_index(value.index)
if self.isview: self._adata_ref._obs.iloc[self._oidx] = value
else: self._obs = value
@property
def var(self) -> pd.DataFrame:
"""One-dimensional annotation of variables/ features (`pd.DataFrame`)."""
return self._var
@var.setter
def var(self, value: pd.DataFrame):
if not isinstance(value, pd.DataFrame):
raise ValueError('Can only assign pd.DataFrame.')
if len(value) != self.n_vars:
raise ValueError('Length does not match.')
utils.warn_no_string_index(value.index)
if self.isview: self._adata_ref._var.iloc[self._vidx] = value
else: self._var = value
@property
def uns(self) -> MutableMapping:
"""Unstructured annotation (ordered dictionary)."""
return self._uns
@uns.setter
def uns(self, value: MutableMapping):
if not isinstance(value, MutableMapping):
raise ValueError('Only mutable mapping types (e.g. dict) are allowed for `.uns`.')
if self.isview:
self._init_as_actual(self.copy())
self._uns = value
@property
def obsm(self) -> BoundRecArr:
"""Multi-dimensional annotation of observations (mutable structured :class:`~numpy.ndarray`).
Stores for each key, a two or higher-dimensional :class:`np.ndarray` of length
``n_obs``. Is sliced with ``data`` and ``obs`` but behaves otherwise like a
:class:`dict`.
"""
return self._obsm
@obsm.setter
def obsm(self, value: np.ndarray):
if not isinstance(value, np.ndarray):
raise ValueError('Can only assign `np.ndarray`.')
if len(value) != self.n_obs:
raise ValueError('Length does not match.')
if self.isview:
self._adata_ref._obsm[self._oidx] = value
else:
self._obsm = BoundRecArr(value, self, 'obsm')
@property
def varm(self) -> BoundRecArr:
"""Multi-dimensional annotation of variables/ features (mutable structured :class:`~numpy.ndarray`).
Stores for each key, a two or higher-dimensional :class:`~numpy.ndarray` of length
``n_vars``. Is sliced with ``data`` and ``var`` but behaves otherwise like a
:class:`dict`.
"""
return self._varm
@varm.setter
def varm(self, value: np.ndarray):
if not isinstance(value, np.ndarray):
raise ValueError('Can only assign `np.ndarray`.')
if len(value) != self.n_vars:
raise ValueError('Length does not match.')
if self.isview:
self._adata_ref._varm[self._vidx] = value
else:
self._varm = BoundRecArr(value, self, 'varm')
@property
def obs_names(self) -> pd.Index:
"""Names of observations (alias for ``.obs.index``)."""
return self.obs.index
@obs_names.setter
def obs_names(self, names: Sequence[str]):
utils.warn_no_string_index(names)
self._obs.index = names
if not self._obs.index.is_unique:
utils.warn_names_duplicates('obs')
@property
def var_names(self) -> pd.Index:
"""Names of variables (alias for ``.var.index``)."""
return self._var.index
@var_names.setter
def var_names(self, names: Sequence[str]):
utils.warn_no_string_index(names)
self._var.index = names
if not self._var.index.is_unique:
utils.warn_names_duplicates('var')
def obs_keys(self) -> List[str]:
"""List keys of observation annotation :attr:`obs`."""
return self._obs.keys().tolist()
def var_keys(self) -> List[str]:
"""List keys of variable annotation :attr:`var`."""
return self._var.keys().tolist()
def obsm_keys(self) -> List[str]:
"""List keys of observation annotation :attr:`obsm`."""
return list(self._obsm.keys())
def varm_keys(self) -> List[str]:
"""List keys of variable annotation :attr:`varm`."""
return list(self._varm.keys())
def uns_keys(self) -> List[str]:
"""List keys of unstructured annotation."""
return sorted(list(self._uns.keys()))
@property
def isbacked(self) -> bool:
"""``True`` if object is backed on disk, ``False`` otherwise."""
return self.filename is not None
@property
def isview(self) -> bool:
"""``True`` if object is view of another AnnData object, ``False`` otherwise."""
return self._isview
@property
def filename(self) -> Optional[PathLike]:
"""Change to backing mode by setting the filename of a ``.h5ad`` file.
- Setting the filename writes the stored data to disk.
- Setting the filename when the filename was previously another name
moves the backing file from the previous file to the new file. If you
want to copy the previous file, use ``copy(filename='new_filename')``.
"""
return self.file.filename
@filename.setter
def filename(self, filename: Optional[PathLike]):
# convert early for later comparison
filename = None if filename is None else Path(filename)
# change from backing-mode back to full loading into memory
if filename is None:
if self.filename is not None:
self.file._to_memory_mode()
else:
# both filename and self.filename are None
# do nothing
return
else:
if self.filename is not None:
if self.filename != filename:
# write the content of self to the old file
# and close the file
self.write()
os.rename(self.filename, filename)
else:
# do nothing
return
else:
# change from memory to backing-mode
# write the content of self to disk
self.write(filename, force_dense=True)
# open new file for accessing
self.file.open(filename, 'r+')
# as the data is stored on disk, we can safely set self._X to None
self._X = None
def _set_backed(self, attr, value):
if (not isinstance(self.file[attr], h5py.SparseDataset)
and not issparse(value)):
self.file[attr] = value
else:
del self.file[attr]
self.file._file.create_dataset(attr, data=value)
def _normalize_indices(self, index: Optional[Index]):
# deal with tuples of length 1
if isinstance(index, tuple) and len(index) == 1:
index = index[0]
# deal with pd.Series
if isinstance(index, pd.Series):
index = index.values
if isinstance(index, tuple):
if len(index) > 2:
raise ValueError(
'AnnData can only be sliced in rows and columns.')
# deal with pd.Series
if isinstance(index[1], pd.Series):
index = index[0], index[1].values
if isinstance(index[0], pd.Series):
index = index[0].values, index[1]
no_slice = not any(isinstance(i, slice) for i in index)
both_scalars = all(isinstance(i, (int, str, type(None))) for i in index)
if no_slice and not both_scalars:
raise NotImplementedError(
'Slicing with two indices at the same time is not yet implemented. '
'As a workaround, do row and column slicing succesively.')
# Speed up and error prevention for boolean indices (Don’t convert to integer indices)
# Needs to be refactored once we support a tuple of two arbitrary index types
if any(isinstance(i, np.ndarray) and i.dtype == bool for i in index):
return index
obs, var = super()._unpack_index(index)
obs = _normalize_index(obs, self.obs_names)
var = _normalize_index(var, self.var_names)
return obs, var
# TODO: this is not quite complete...
def __delitem__(self, index: Index):
obs, var = self._normalize_indices(index)
# TODO: does this really work?
if not self.isbacked:
del self._X[obs, var]
else:
X = self.file['X']
del X[obs, var]
self._set_backed('X', X)
if var == slice(None):
del self._obs.iloc[obs, :]
if obs == slice(None):
del self._var.iloc[var, :]
def __getitem__(self, index: Index) -> 'AnnData':
"""Returns a sliced view of the object."""
return self._getitem_view(index)
def _getitem_view(self, index: Index) -> 'AnnData':
oidx, vidx = self._normalize_indices(index)
return AnnData(self, oidx=oidx, vidx=vidx, asview=True)
def _remove_unused_categories(self, df_full, df_sub, uns):
from pandas.api.types import is_categorical
for k in df_full:
if is_categorical(df_full[k]):
all_categories = df_full[k].cat.categories
df_sub[k].cat.remove_unused_categories(inplace=True)
# also correct the colors...
if k + '_colors' in uns:
# this is a strange hack...
if np.array(uns[k + '_colors']).ndim == 0:
uns[k + '_colors'] = np.array(uns[k + '_colors'])[None]
uns[k + '_colors'] = np.array(uns[k + '_colors'])[
np.where(np.in1d(
all_categories, df_sub[k].cat.categories))[0]]
def rename_categories(self, key: str, categories: Sequence[Any]):
"""Rename categories of annotation ``key`` in
:attr:`obs`, :attr:`var` and :attr:`uns`.
Only supports passing a list/array-like ``categories`` argument.
Besides calling ``self.obs[key].cat.categories = categories`` -
similar for :attr:`var` - this also renames categories in unstructured
annotation that uses the categorical annotation ``key``.
Parameters
----------
key
Key for observations or variables annotation.
categories
New categories, the same number as the old categories.
"""
if isinstance(categories, Mapping):
raise ValueError('Only list-like `categories` is supported.')
if key in self.obs:
old_categories = self.obs[key].cat.categories.tolist()
self.obs[key].cat.rename_categories(categories, inplace=True)
elif key in self.var:
old_categories = self.var[key].cat.categories.tolist()
self.var[key].cat.rename_categories(categories, inplace=True)
else:
raise ValueError('{} is neither in `.obs` nor in `.var`.'
.format(key))
# this is not a good solution
# but depends on the scanpy conventions for storing the categorical key
# as `groupby` in the `params` slot
for k1, v1 in self.uns.items():
if isinstance(v1, Mapping):
if 'params' in v1 and 'groupby' in v1['params']:
if v1['params']['groupby'] == key:
for k2, v2 in v1.items():
# picks out the recarrays that are named according to the old
# categories
if isinstance(v2, np.ndarray) and v2.dtype.names is not None:
if list(v2.dtype.names) == old_categories:
self.uns[k1][k2].dtype.names = categories
else:
logger.warning(
'Omitting {}/{} as old categories do not match.'
.format(k1, k2))
def strings_to_categoricals(self, df: Optional[pd.DataFrame] = None):
"""Transform string annotations to categoricals.
Only affects string annotations that lead to less categories than the
total number of observations.
Params
------
df
If ``df`` is ``None``, modifies both :attr:`obs` and :attr:`.var`,
otherwise modifies ``df`` inplace.
Notes
-----
Turns the view of an :class:`~anndata.AnnData` into an actual
:class:`~anndata.AnnData`.
"""
dont_modify = False # only necessary for backed views
if df is None:
dfs = [self.obs, self.var]
if self.isview:
if not self.isbacked:
self._init_as_actual(self.copy())
else:
dont_modify = True
else:
dfs = [df]
for df in dfs:
string_cols = [
key for key in df.columns
if is_string_dtype(df[key])
and not is_categorical(df[key])
]
for key in string_cols:
# make sure we only have strings (could be that there are
# np.nans (float), -666, '-666', for instance)
c = df[key].astype('U')
# make a categorical
c = pd.Categorical(c, categories=natsorted(np.unique(c)))
if len(c.categories) < len(c):
if dont_modify:
raise RuntimeError(
'Please call `.strings_to_categoricals()` on full AnnData, not on this view. '
'You might encounter this error message while copying or writing to disk.'
)
df[key] = c
logger.info('... storing {!r} as categorical'.format(key))
_sanitize = strings_to_categoricals # backwards compat
def _slice_uns_sparse_matrices_inplace(self, uns, oidx):
# slice sparse spatrices of n_obs × n_obs in self.uns
if not (isinstance(oidx, slice) and
oidx.start is None and oidx.step is None and oidx.stop is None):
for k, v in uns.items():
# treat nested dicts
if isinstance(v, Mapping):
self._slice_uns_sparse_matrices_inplace(v, oidx)
if isinstance(v, sparse.spmatrix) and v.shape == (
self.n_obs, self.n_obs):
uns[k] = v.tocsc()[:, oidx].tocsr()[oidx, :]
def _inplace_subset_var(self, index):
"""Inplace subsetting along variables dimension.
Same as ``adata = adata[:, index]``, but inplace.
"""
adata_subset = self[:, index].copy()
self._init_as_actual(adata_subset, dtype=self._X.dtype)
def _inplace_subset_obs(self, index):
"""Inplace subsetting along variables dimension.
Same as ``adata = adata[index, :]``, but inplace.
"""
adata_subset = self[index].copy()
self._init_as_actual(adata_subset, dtype=self._X.dtype)
def _get_obs_array(self, k, use_raw=False, layer='X'):
"""Get an array from the layer (default layer='X') along the observation dimension by first looking up
obs.keys and then var.index."""
in_raw_var_names = k in self.raw.var_names if self.raw is not None else False
if use_raw and self.raw is None:
raise ValueError('.raw doesn\'t exist')
if k in self.obs.keys():
x = self._obs[k]
elif in_raw_var_names and use_raw and layer == 'X':
x = self.raw[:, k].X
elif k in self.var_names and not use_raw and (layer == 'X' or layer in self.layers.keys()):
x = self[:, k].X if layer=='X' else self[:, k].layers[layer]
elif use_raw and layer != 'X':
raise ValueError('No layers in .raw')
elif layer != 'X' and layer not in self.layers.keys():
raise ValueError('Did not find {} in layers.keys.'
.format(layer))
else:
raise ValueError('Did not find {} in obs.keys or var_names.'
.format(k))
return x
def _get_var_array(self, k, use_raw=False, layer='X'):
"""Get an array from the layer (default layer='X') along the variables dimension by first looking up
``var.keys`` and then ``obs.index``."""
in_raw_obs_names = k in self.raw.obs_names if self.raw is not None else False
if use_raw and self.raw is None:
raise ValueError('.raw doesn\'t exist')
if k in self.var.keys():
x = self._var[k]
elif in_raw_obs_names and use_raw and layer == 'X':
x = self.raw[k].X
elif k in self.obs_names and not use_raw and (layer == 'X' or layer in self.layers.keys()):
x = self[k].X if layer=='X' else self[k].layers[layer]
elif use_raw and layer != 'X':
raise ValueError('No layers in .raw')
elif layer != 'X' and layer not in self.layers.keys():
raise ValueError('Did not find {} in layers.keys.'
.format(layer))
else:
raise ValueError('Did not find {} in var.keys or obs_names.'
.format(k))
return x
def __setitem__(self, index: Index, val: Union[int, float, np.ndarray, sparse.spmatrix]):
if self.isview:
raise ValueError('Object is view and cannot be accessed with `[]`.')
obs, var = self._normalize_indices(index)
if not self.isbacked:
self._X[obs, var] = val
else:
X = self.file['X']
X[obs, var] = val
self._set_backed('X', X)
def __len__(self) -> int:
return self.shape[0]
def transpose(self) -> 'AnnData':
"""Transpose whole object.
Data matrix is transposed, observations and variables are interchanged.
"""
if self._X is not None and self._X.dtype.name != 'float32':
logger.warning(
'Up to anndata 0.6.12, `.transpose()` cast a '
'non-\'float32\' `.X` to \'float32\'. '
'Now, the dtype \'{}\' is maintained. '
.format(self._X.dtype.name))
if not self.isbacked: X = self._X
else: X = self.file['X']
if self.isview:
raise ValueError(
'You\'re trying to transpose a view of an `AnnData`, which is currently not implemented. '
'Call `.copy()` before transposing.')
layers = {k:(v.T.tocsr() if sparse.isspmatrix_csr(v) else v.T) for (k, v) in self.layers.items(copy=False)}
if sparse.isspmatrix_csr(X):
return AnnData(X.T.tocsr(), self._var, self._obs, self._uns,
self._varm.flipped(), self._obsm.flipped(),
filename=self.filename, layers=layers, dtype=self.X.dtype.name)
return AnnData(X.T, self._var, self._obs, self._uns,
self._varm.flipped(), self._obsm.flipped(),
filename=self.filename, layers=layers, dtype=self.X.dtype.name)
T = property(transpose)
def to_df(self) -> pd.DataFrame:
"""Generate shallow :class:`~pandas.DataFrame`.
The data matrix :attr:`X` is returned as
:class:`~pandas.DataFrame`, where :attr:`obs_names` initializes the
index, and :attr:`var_names` the columns.
* No annotations are maintained in the returned object.
* The data matrix is densified in case it is sparse.
"""
if issparse(self._X):
X = self._X.toarray()
else:
X = self._X
return pd.DataFrame(X, index=self.obs_names, columns=self.var_names)
def copy(self, filename: Optional[PathLike] = None) -> 'AnnData':
"""Full copy, optionally on disk."""
if not self.isbacked:
if self._X is not None and self._X.dtype.name != 'float32':
logger.warning(
'Up to anndata 0.6.12, `.copy()` cast a '
'non-\'float32\' `.X` to \'float32\'. '
'Now, the dtype \'{}\' is maintained. '
.format(self._X.dtype.name))
return AnnData(self._X.copy() if self._X is not None else None,
self._obs.copy(),
self._var.copy(),
# deepcopy on DictView does not work and is unnecessary
# as uns was copied already before
self._uns.copy() if isinstance(self._uns, DictView) else deepcopy(self._uns),
self._obsm.copy(), self._varm.copy(),
raw=None if self._raw is None else self._raw.copy(),
layers=self.layers.as_dict(),
dtype=self._X.dtype.name if self._X is not None else 'float32')
else:
if filename is None:
raise ValueError(
'To copy an AnnData object in backed mode, '
'pass a filename: `.copy(filename=\'myfilename.h5ad\')`.')
if self.isview:
self.write(filename)
else:
from shutil import copyfile
copyfile(self.filename, filename)
return AnnData(filename=filename)
def concatenate(
self, *adatas: 'AnnData',
join: str = 'inner',
batch_key: str = 'batch',
batch_categories: Sequence[Any] = None,
index_unique: Optional[str] = '-'
) -> 'AnnData':
"""Concatenate along the observations axis.
The :attr:`uns`, :attr:`varm` and :attr:`obsm` attributes are ignored.
Currently, this works only in ``'memory'`` mode.
Parameters
----------
adatas
AnnData matrices to concatenate with. Each matrix is referred to as
a “batch”.
join
Use intersection (``'inner'``) or union (``'outer'``) of variables.
batch_key
Add the batch annotation to :attr:`obs` using this key.
batch_categories
Use these as categories for the batch annotation. By default, use increasing numbers.
index_unique
Make the index unique by joining the existing index names with the
batch category, using ``index_unique='-'``, for instance. Provide
``None`` to keep existing indices.
Returns
-------
:class:`~anndata.AnnData`
The concatenated :class:`~anndata.AnnData`, where ``adata.obs[batch_key]``
stores a categorical variable labeling the batch.
Notes
-----
.. warning::
If you use ``join='outer'`` this fills 0s for sparse data when
variables are absent in a batch. Use this with care. Dense data is
filled with ``NaN``. See the examples.
Examples
--------
Joining on intersection of variables.
>>> adata1 = AnnData(np.array([[1, 2, 3], [4, 5, 6]]),
>>> {'obs_names': ['s1', 's2'],
>>> 'anno1': ['c1', 'c2']},
>>> {'var_names': ['a', 'b', 'c'],
>>> 'annoA': [0, 1, 2]})
>>> adata2 = AnnData(np.array([[1, 2, 3], [4, 5, 6]]),
>>> {'obs_names': ['s3', 's4'],
>>> 'anno1': ['c3', 'c4']},
>>> {'var_names': ['d', 'c', 'b'],
>>> 'annoA': [0, 1, 2]})
>>> adata3 = AnnData(np.array([[1, 2, 3], [4, 5, 6]]),
>>> {'obs_names': ['s1', 's2'],
>>> 'anno2': ['d3', 'd4']},
>>> {'var_names': ['d', 'c', 'b'],
>>> 'annoA': [0, 2, 3],
>>> 'annoB': [0, 1, 2]})
>>>
>>> adata = adata1.concatenate(adata2, adata3)
>>> adata
AnnData object with n_obs × n_vars = 6 × 2
obs_keys = ['anno1', 'anno2', 'batch']
var_keys = ['annoA-0', 'annoA-1', 'annoB-2', 'annoA-2']
>>> adata.X
array([[2., 3.],
[5., 6.],
[3., 2.],
[6., 5.],
[3., 2.],
[6., 5.]], dtype=float32)
>>> adata.obs
anno1 anno2 batch
s1-0 c1 NaN 0
s2-0 c2 NaN 0
s3-1 c3 NaN 1
s4-1 c4 NaN 1
s1-2 NaN d3 2
s2-2 NaN d4 2
>>> adata.var.T
b c
annoA-0 1 2
annoA-1 2 1
annoB-2 2 1
annoA-2 3 2
Joining on the union of variables.
>>> adata = adata1.concatenate(adata2, adata3, join='outer')
>>> adata
AnnData object with n_obs × n_vars = 6 × 4
obs_keys = ['anno1', 'anno2', 'batch']
var_keys = ['annoA-0', 'annoA-1', 'annoB-2', 'annoA-2']
>>> adata.var.T
index a b c d
annoA-0 0.0 1.0 2.0 NaN
annoA-1 NaN 2.0 1.0 0.0
annoB-2 NaN 2.0 1.0 0.0
annoA-2 NaN 3.0 2.0 0.0
>>> adata.var_names
Index(['a', 'b', 'c', 'd'], dtype='object')
>>> adata.X
array([[ 1., 2., 3., nan],
[ 4., 5., 6., nan],
[nan, 3., 2., 1.],
[nan, 6., 5., 4.],
[nan, 3., 2., 1.],
[nan, 6., 5., 4.]], dtype=float32)
>>> adata.X.sum(axis=0)
array([nan, 25., 23., nan], dtype=float32)
>>> import pandas as pd
>>> Xdf = pd.DataFrame(adata.X, columns=adata.var_names)
index a b c d
0 1.0 2.0 3.0 NaN
1 4.0 5.0 6.0 NaN
2 NaN 3.0 2.0 1.0
3 NaN 6.0 5.0 4.0
4 NaN 3.0 2.0 1.0
5 NaN 6.0 5.0 4.0
>>> Xdf.sum()
index
a 5.0
b 25.0
c 23.0
d 10.0
dtype: float32
>>> from numpy import ma
>>> adata.X = ma.masked_invalid(adata.X)
>>> adata.X
masked_array(
data=[[1.0, 2.0, 3.0, --],
[4.0, 5.0, 6.0, --],
[--, 3.0, 2.0, 1.0],
[--, 6.0, 5.0, 4.0],
[--, 3.0, 2.0, 1.0],
[--, 6.0, 5.0, 4.0]],
mask=[[False, False, False, True],
[False, False, False, True],
[ True, False, False, False],
[ True, False, False, False],
[ True, False, False, False],
[ True, False, False, False]],
fill_value=1e+20,
dtype=float32)
>>> adata.X.sum(axis=0).data
array([ 5., 25., 23., 10.], dtype=float32)
The masked array is not saved but has to be reinstantiated after saving.
>>> adata.write('./test.h5ad')
>>> from anndata import read_h5ad
>>> adata = read_h5ad('./test.h5ad')
>>> adata.X
array([[ 1., 2., 3., nan],
[ 4., 5., 6., nan],
[nan, 3., 2., 1.],
[nan, 6., 5., 4.],
[nan, 3., 2., 1.],
[nan, 6., 5., 4.]], dtype=float32)
For sparse data, everything behaves similarly, except that for ``join='outer'``, zeros are added.
>>> from scipy.sparse import csr_matrix
>>> adata1 = AnnData(csr_matrix([[0, 2, 3], [0, 5, 6]]),
>>> {'obs_names': ['s1', 's2'],
>>> 'anno1': ['c1', 'c2']},
>>> {'var_names': ['a', 'b', 'c']})
>>> adata2 = AnnData(csr_matrix([[0, 2, 3], [0, 5, 6]]),
>>> {'obs_names': ['s3', 's4'],
>>> 'anno1': ['c3', 'c4']},
>>> {'var_names': ['d', 'c', 'b']})
>>> adata3 = AnnData(csr_matrix([[1, 2, 0], [0, 5, 6]]),
>>> {'obs_names': ['s5', 's6'],
>>> 'anno2': ['d3', 'd4']},
>>> {'var_names': ['d', 'c', 'b']})
>>>
>>> adata = adata1.concatenate(adata2, adata3, join='outer')
>>> adata.var_names
Index(['a', 'b', 'c', 'd'], dtype='object')
>>> adata.X.toarray()
array([[0., 2., 3., 0.],
[0., 5., 6., 0.],
[0., 3., 2., 0.],
[0., 6., 5., 0.],
[0., 0., 2., 1.],
[0., 6., 5., 0.]], dtype=float32)
"""
if self.isbacked:
raise ValueError(
'Currently, concatenate does only work in \'memory\' mode.')
if len(adatas) == 0:
return self
elif len(adatas) == 1 and not isinstance(adatas[0], AnnData):
adatas = adatas[0] # backwards compatibility
all_adatas = (self,) + tuple(adatas)
# for controlled behavior, make all variable names unique
printed_info = False
for i, ad in enumerate(all_adatas):
if not ad.var_names.is_unique:
ad.var_names = utils.make_index_unique(ad.var_names)
if not printed_info:
logger.info(
'Making variable names unique for controlled concatenation.')
printed_info = True
# define variable names of joint AnnData
mergers = dict(inner=set.intersection, outer=set.union)
var_names_reduce = reduce(mergers[join], (set(ad.var_names) for ad in all_adatas))
# restore order of initial var_names, append non-sortable names at the end
# see how this was done in the repo at commit state
# 40a24f
var_names = []
for v in all_adatas[0].var_names:
if v in var_names_reduce:
var_names.append(v)
var_names_reduce.remove(v) # update the set
var_names = pd.Index(var_names + list(var_names_reduce))
if batch_categories is None:
categories = [str(i) for i, _ in enumerate(all_adatas)]
elif len(batch_categories) == len(all_adatas):
categories = batch_categories
else:
raise ValueError('Provide as many `batch_categories` as `adatas`.')
out_shape = (sum(a.n_obs for a in all_adatas), len(var_names))
any_sparse = any(issparse(a.X) for a in all_adatas)
if join == 'outer':
if any_sparse: # not sure whether the lil_matrix is really the best option
X = sparse.lil_matrix(out_shape, dtype=self.X.dtype)
else:
X = np.empty(out_shape, dtype=self.X.dtype)
X[:] = np.nan
else:
Xs = []
# create layers dict that contains layers shared among all AnnDatas
layers = OrderedDict()
shared_layers = [key for key in all_adatas[0].layers.keys()
if all([key in ad.layers.keys() for ad in all_adatas])]
for key in shared_layers:
layers[key] = []
# check whether tries to do 'outer' join and layers is non_empty.
if join == 'outer' and len(shared_layers) > 0:
logger.info(
'layers concatenation is not yet available for \'outer\' intersection and will be ignored.')
# check whether layers are not consistently set in all AnnData objects.
n_layers = np.array([len(ad.layers.keys()) for ad in all_adatas])
if join == 'inner' and not all(len(shared_layers) == n_layers):
logger.info(
'layers are inconsistent - only layers that are shared among all AnnData objects are included.')
var = pd.DataFrame(index=var_names)
obs_i = 0 # start of next adata’s observations in X
out_obss = []
for i, ad in enumerate(all_adatas):
if join == 'outer':
# only those names that are actually present in the current AnnData
vars_intersect = [v for v in var_names if v in ad.var_names]
else:
vars_intersect = var_names
# X
if join == 'outer':
# this is pretty slow, I guess sparse matrices shouldn't be
# constructed like that
X[obs_i:obs_i+ad.n_obs,
var_names.isin(vars_intersect)] = ad[:, vars_intersect].X
else:
Xs.append(ad[:, vars_intersect].X)
obs_i += ad.n_obs
# layers
if join == 'inner':
for key in shared_layers:
layers[key].append(ad[:, vars_intersect].layers[key])
# obs
obs = ad.obs.copy()
obs[batch_key] = pd.Categorical(ad.n_obs * [categories[i]], categories)
if (is_string_dtype(all_adatas[0].obs.index) and not
is_string_dtype(ad.obs.index)):
obs.index = obs.index.astype(str)
if index_unique is not None:
if not is_string_dtype(ad.obs.index):
obs.index = obs.index.astype(str)
obs.index = obs.index.values + index_unique + categories[i]
out_obss.append(obs)
# var
for c in ad.var.columns:
new_c = c + (index_unique if index_unique is not None else '-') + categories[i]
var.loc[vars_intersect, new_c] = ad.var.loc[vars_intersect, c]
if join == 'inner':
if any_sparse:
from scipy.sparse import vstack
X = vstack(Xs)
else:
X = np.concatenate(Xs)
for key in shared_layers:
if any(issparse(a.layers[key]) for a in all_adatas):
layers[key] = vstack(layers[key])
else:
layers[key] = np.concatenate(layers[key])
obs = pd.concat(out_obss, sort=True)
if any_sparse:
sparse_format = all_adatas[0].X.getformat()
X = X.asformat(sparse_format)
new_adata = AnnData(X, obs, var, layers=layers) if join == 'inner' else AnnData(X, obs, var)
if not obs.index.is_unique:
logger.info(
'Or pass `index_unique!=None` to `.concatenate`.')
return new_adata
def var_names_make_unique(self, join: str = '-'):
self.var.index = utils.make_index_unique(self.var.index, join)
var_names_make_unique.__doc__ = utils.make_index_unique.__doc__
def obs_names_make_unique(self, join: str = '-'):
self.obs.index = utils.make_index_unique(self.obs.index, join)
obs_names_make_unique.__doc__ = utils.make_index_unique.__doc__
def _check_uniqueness(self):
if not self.obs.index.is_unique:
utils.warn_names_duplicates('obs')
if not self.var.index.is_unique:
utils.warn_names_duplicates('var')
def __contains__(self, key: Any):
raise AttributeError('AnnData has no attribute __contains__, '
'don\'t check `in adata`.')
def _check_dimensions(self, key=None):
if key is None:
key = {'obs', 'var', 'obsm', 'varm'}
else:
key = {key}
if 'obs' in key and len(self._obs) != self._n_obs:
raise ValueError('Observations annot. `obs` must have number of '
'rows of `X` ({}), but has {} rows.'
.format(self._n_obs, self._obs.shape[0]))
if 'var' in key and len(self._var) != self._n_vars:
raise ValueError('Variables annot. `var` must have number of '
'columns of `X` ({}), but has {} rows.'
.format(self._n_vars, self._var.shape[0]))
if 'obsm' in key and len(self._obsm) != self._n_obs:
raise ValueError('Observations annot. `obsm` must have number of '
'rows of `X` ({}), but has {} rows.'
.format(self._n_obs, len(self._obsm)))
if 'varm' in key and len(self._varm) != self._n_vars:
raise ValueError('Variables annot. `varm` must have number of '
'columns of `X` ({}), but has {} rows.'
.format(self._n_vars, len(self._varm)))
def write_h5ad(
self,
filename: Optional[PathLike] = None,
compression: Optional[str] = None,
compression_opts: Union[int, Any] = None,
force_dense: Optional[bool] = None
):
"""Write ``.h5ad``-formatted hdf5 file.
.. note::
Setting compression to ``'gzip'`` can save disk space but
will slow down writing and subsequent reading. Prior to
v0.6.16, this was the default for parameter
``compression``.
Generally, if you have sparse data that are stored as a dense
matrix, you can dramatically improve performance and reduce
disk space by converting to a :class:`~scipy.sparse.csr_matrix`::
from scipy.sparse import csr_matrix
adata.X = csr_matrix(adata.X)
Parameters
----------
filename
Filename of data file. Defaults to backing file.
compression : ``None``, {``'gzip'``, ``'lzf'``} (default: ``None``)
See the h5py :ref:`dataset_compression`.
compression_opts
See the h5py :ref:`dataset_compression`.
force_dense
Write sparse data as a dense matrix. Defaults to ``True`` if object is
backed, otherwise to ``False``.
"""
from .readwrite.write import _write_h5ad
if filename is None and not self.isbacked:
raise ValueError('Provide a filename!')
if filename is None:
filename = self.filename
if force_dense is None:
force_dense = self.isbacked
_write_h5ad(filename, self, compression=compression,
compression_opts=compression_opts, force_dense=force_dense)
if self.isbacked:
self.file.close()
write = write_h5ad # a shortcut and backwards compat
def write_csvs(self, dirname: PathLike, skip_data: bool = True, sep: str = ','):
"""Write annotation to ``.csv`` files.
It is not possible to recover the full :class:`~anndata.AnnData` from the
output of this function. Use :meth:`~anndata.AnnData.write` for this.
Parameters
----------
dirname
Name of directory to which to export.
skip_data
Skip the data matrix :attr:`X`.
sep
Separator for the data.
"""
from .readwrite.write import write_csvs
write_csvs(dirname, self, skip_data=skip_data, sep=sep)
def write_loom(self, filename: PathLike, write_obsm_varm: bool = False):
"""Write ``.loom``-formatted hdf5 file.
Parameters
----------
filename
The filename.
"""
from .readwrite.write import write_loom
write_loom(filename, self, write_obsm_varm = write_obsm_varm)
def write_zarr(
self,
store: Union[MutableMapping, PathLike],
chunks: Union[bool, int, Tuple[int, ...]],
):
"""Write a hierarchical Zarr array store.
Parameters
----------
store
The filename, a :class:`~typing.MutableMapping`, or a Zarr storage class.
chunks
Chunk shape.
"""
from .readwrite.write import write_zarr
write_zarr(store, self, chunks=chunks)
def chunked_X(self, chunk_size: Optional[int] = None):
"""Return an iterator over the rows of the data matrix :attr:`X`.
Parameters
----------
chunk_size
Row size of a single chunk.
"""
if chunk_size is None:
# Should be some adaptive code
chunk_size = 6000
start = 0
n = self.n_obs
for _ in range(int(n // chunk_size)):
end = start + chunk_size
yield (self.X[start:end], start, end)
start = end
if start < n:
yield (self.X[start:n], start, n)
def chunk_X(
self,
select: Union[int, List[int], Tuple[int, ...], np.ndarray] = 1000,
replace: bool = True,
):
"""Return a chunk of the data matrix :attr:`X` with random or specified indices.
Parameters
----------
select
If select is an integer, a random chunk of row size = select will be returned.
If select is a list, tuple or numpy array of integers, then a chunk
with these indices will be returned.
replace
If select is an integer then ``replace=True`` specifies random sampling of indices
with replacement, ``replace=False`` - without replacement.
"""
if isinstance(select, int):
select = select if select < self.n_obs else self.n_obs
choice = np.random.choice(self.n_obs, select, replace)
elif isinstance(select, (np.ndarray, list, tuple)):
choice = np.asarray(select)
else:
raise ValueError('select should be int or array')
reverse = None
if self.isbacked:
# h5py can only slice with a sorted list of unique index values
# so random batch with indices [2, 2, 5, 3, 8, 10, 8] will fail
# this fixes the problem
indices, reverse = np.unique(choice, return_inverse=True)
selection = self.X[indices.tolist()]
else:
selection = self.X[choice]
selection = selection.toarray() if issparse(selection) else selection
return selection if reverse is None else selection[reverse]
@staticmethod
def _args_from_dict(ddata: Mapping[str, Any]):
"""Allows to construct an instance of AnnData from a dictionary.
Acts as interface for the communication with the hdf5 file.
In particular, from a dict that has been written using
``AnnData._to_dict_fixed_width_arrays``.
"""
d_true_keys = {}
# backwards compat
uns_is_not_key = False
valid_keys = []
for keys in AnnData._H5_ALIASES.values():
valid_keys += keys
valid_keys += ['raw.X', 'raw.var', 'raw.varm', 'raw.cat']
for key in ddata.keys():
# if there is another key then the prdedefined
# then we are reading the old format
if key not in valid_keys:
uns_is_not_key = True
for true_key, keys in AnnData._H5_ALIASES.items():
for key in keys:
if key in ddata:
d_true_keys[true_key] = ddata[key]
if uns_is_not_key: del ddata[key]
break
else:
d_true_keys[true_key] = None
# transform recarray to dataframe
for true_key, keys in AnnData._H5_ALIASES_NAMES.items():
if d_true_keys[true_key] is not None:
for key in keys:
if key in d_true_keys[true_key].dtype.names:
d_true_keys[true_key] = pd.DataFrame.from_records(
d_true_keys[true_key], index=key)
break
d_true_keys[true_key].index = d_true_keys[true_key].index.astype('U')
# transform to unicode string
# TODO: this is quite a hack
for c in d_true_keys[true_key].columns:
if is_string_dtype(d_true_keys[true_key][c]):
d_true_keys[true_key][c] = pd.Index(
d_true_keys[true_key][c]).astype('U').values
# these are the category fields
k_to_delete = []
items = (
ddata.items() if uns_is_not_key
else ddata['uns'].items() if 'uns' in ddata else []
)
for k, v in items:
if k.endswith('_categories'):
k_stripped = k.replace('_categories', '')
if isinstance(v, (str, int)): # fix categories with a single category
v = [v]
for ann in ['obs', 'var']:
if k_stripped in d_true_keys[ann]:
d_true_keys[ann][k_stripped] = pd.Categorical.from_codes(
codes=d_true_keys[ann][k_stripped].values,
categories=v,
)
k_to_delete.append(k)
for k in k_to_delete:
if uns_is_not_key:
del ddata[k]
else:
del ddata['uns'][k]
# assign the variables
X = d_true_keys['X']
obs = d_true_keys['obs']
obsm = d_true_keys['obsm']
var = d_true_keys['var']
varm = d_true_keys['varm']
layers = d_true_keys['layers']
raw = None
if 'raw.X' in ddata:
raw = {}
raw['X'] = ddata['raw.X']
del ddata['raw.X']
# get the dataframe
raw['var'] = pd.DataFrame.from_records(
ddata['raw.var'], index='index')
del ddata['raw.var']
raw['var'].index = raw['var'].index.astype('U')
# transform to unicode string
for c in raw['var'].columns:
if is_string_dtype(raw['var'][c]):
raw['var'][c] =
|
pd.Index(raw['var'][c])
|
pandas.Index
|
from __future__ import print_function
import re
import codecs
import operator
import csv
import glob
import re
from os import listdir
from os.path import isfile, join
import pandas as pd
import bibtexparser
META_STARTS = "META_STARTS"
META_ENDS = "META_ENDS"
CONSTANT_ARTICLE = "@Article"
BIBTEX_MAIN_FILE_NAME = "KDD99ReviewArticles.bib"
bibtex_folder = "../../../latexAndBibliography/"
csv_save_folder = "../latex/excel/"
def get_csv_save_folder():
return csv_save_folder
def read_file_return_content(file_name):
f = codecs.open(file_name, "r", "utf-8")
c = f.read()
return c
def get_bibtex_full_filename_in_bibtex_directory(base_file_name):
file_name = join(bibtex_folder, base_file_name)
return file_name
def read_bibtex_file_bibtex_directory_content(base_file_name):
bibtex_file_name = get_bibtex_full_filename_in_bibtex_directory(base_file_name)
bibtex_file_content = read_file_return_content(bibtex_file_name)
return bibtex_file_content
def find_list_of_bib_files():
files_to_find = bibtex_folder + "/*.bib"
onlyfiles = [f for f in listdir(bibtex_folder) if isfile(join(bibtex_folder, f))]
return [f for f in onlyfiles if f.endswith(".bib")]
def find_bibtex_entries_from_content(bib_file_content):
article_list = bib_file_content.split(CONSTANT_ARTICLE)
articles_containing_meta = []
for article in article_list:
if META_STARTS in article:
articles_containing_meta.append(CONSTANT_ARTICLE + article)
return articles_containing_meta
def combine_bib_files_in_latexAndBibliography_to_one_list():
all_articles = []
total_article_count = 0
list_of_bib_files = sorted(find_list_of_bib_files())
journal_count = 0
for bib_filename in list_of_bib_files:
bib_file_content = read_bibtex_file_bibtex_directory_content(bib_filename)
if META_STARTS in bib_file_content:
journal_count = journal_count + 1
list_of_articles_from_bib_file = find_bibtex_entries_from_content(bib_file_content)
all_articles = all_articles + list_of_articles_from_bib_file
journal_article_count = len(list_of_articles_from_bib_file)
print(bib_filename)
print("journal_article_count",journal_article_count)
for article in list_of_articles_from_bib_file:
first_line = article.split("\n")[0]
print("\t\t",first_line)
total_article_count = total_article_count + journal_article_count
print("total_article_count",total_article_count)
print("journal_count",journal_count)
return all_articles
def create_kdd99_review_bib_file():
list_of_all_articles = combine_bib_files_in_latexAndBibliography_to_one_list()
fileToWrite = open(BIBTEX_MAIN_FILE_NAME, 'w')
for bibtex_content in list_of_all_articles:
lines = bibtex_content.split("\n")
for line in lines:
lineToWrite = not (("file" in line) or ("owner" in line) or ("@Comment" in line) or ("__markedentry" in line) or ("timestamp" in line) )
if(lineToWrite):
lineUtf8 = line.encode('utf-8')
fileToWrite.write("%s\n" % lineUtf8)
fileToWrite.close()
def get_list_of_dictionary_created_from_bibtex_file():
with open(BIBTEX_MAIN_FILE_NAME,"r" , encoding="utf-8") as bibtex_file:
list_of_article_dictionaries = bibtexparser.load(bibtex_file)
for article_entry in list_of_article_dictionaries.entries:
str_review = article_entry["comment"]
start = str_review.index("META_STARTS")
end = str_review.index("META_ENDS")
meta_all_values = str_review[start:end]
meta_lines = meta_all_values.split("\n")
for meta_line in meta_lines:
l = meta_line.split(":")
if (len(l) > 1):
(meta_name,meta_value) = (l[0],l[1])
meta_name = meta_name.replace("%","").replace("-","").strip()
meta_value = meta_value.strip()
article_entry[meta_name] = meta_value
return list_of_article_dictionaries
def get_pandas_data_frame_created_from_bibtex_file():
list_of_all_articles = get_list_of_dictionary_created_from_bibtex_file()
df = pd.DataFrame(list_of_all_articles.entries)
return df
def get_pandas_data_frame_dataset_sizes_training():
df = get_pandas_data_frame_created_from_bibtex_file()
# find wrong records
# df[df.metaTrainingSetSizes.isnull()].ID
list1 = df.metaTrainingSetSizes.str.split(",").tolist()
temp_list = []
for inner_list in list1:
for test_size in inner_list:
try:
a = int(test_size)
temp_list.append(a)
except:
pass
list2 = sorted(temp_list)
df1 =
|
pd.DataFrame(list2)
|
pandas.DataFrame
|
import pandas as pd
from scripts.python.pheno.datasets.filter import filter_pheno, get_passed_fields
from scripts.python.pheno.datasets.features import get_column_name, get_default_statuses_ids, get_status_dict, get_default_statuses, get_sex_dict
from scripts.python.preprocessing.serialization.routines.pheno_betas_checking import get_pheno_betas_with_common_subjects
from scripts.python.routines.betas import betas_drop_na
import plotly.graph_objects as go
from scripts.python.routines.manifest import get_manifest
from scripts.python.routines.plot.save import save_figure
from scripts.python.routines.plot.histogram import add_histogram_trace
from scripts.python.routines.plot.layout import add_layout
import json
from pathlib import Path
path = f"E:/YandexDisk/Work/pydnameth/datasets"
datasets_info = pd.read_excel(f"{path}/datasets.xlsx", index_col='dataset')
folder_name = f"proteomics"
path_save = f"{path}/meta/tasks/{folder_name}"
Path(f"{path_save}/figs").mkdir(parents=True, exist_ok=True)
tissue_datasets = {
'Brain': ['GSE74193'],
'Liver': ['GSE48325', 'GSE61258', 'GSE61446'],
'Blood': ['GSE87571']
}
target_features = ['Status', 'Age', 'Sex']
for tissue, datasets in tissue_datasets.items():
tmp_path = f"{path_save}/{tissue}"
Path(f"{tmp_path}/figs").mkdir(parents=True, exist_ok=True)
pheno_all = pd.DataFrame(columns=target_features + ['Dataset'])
pheno_all.index.name = 'subject_id'
for d_id, dataset in enumerate(datasets):
platform = datasets_info.loc[dataset, 'platform']
manifest = get_manifest(platform)
statuses = get_default_statuses(dataset)
status_col = get_column_name(dataset, 'Status').replace(' ', '_')
statuses_ids = get_default_statuses_ids(dataset)
status_dict = get_status_dict(dataset)
status_passed_fields = get_passed_fields(status_dict, statuses)
controls_status_vals = [status_dict['Control'][x].column for x in statuses_ids['Control']]
controls_labels = ', '.join([status_dict['Control'][x].label for x in statuses_ids['Control']])
age_col = get_column_name(dataset, 'Age').replace(' ', '_')
sex_col = get_column_name(dataset, 'Sex').replace(' ', '_')
sex_dict = get_sex_dict(dataset)
continuous_vars = {'Age': age_col}
categorical_vars = {
status_col: [x.column for x in status_passed_fields],
sex_col: [sex_dict[x] for x in sex_dict]
}
pheno = pd.read_pickle(f"{path}/{platform}/{dataset}/pheno.pkl")
pheno = filter_pheno(dataset, pheno, continuous_vars, categorical_vars)
betas = pd.read_pickle(f"{path}/{platform}/{dataset}/betas.pkl")
betas = betas_drop_na(betas)
df =
|
pd.merge(pheno, betas, left_index=True, right_index=True)
|
pandas.merge
|
import argparse, cv2, os, glob
import numpy as np
import pandas as pd
from distutils.util import strtobool
__all__ = ["get_parameter", "get_args", "rgb_dist", "proba_parts2joints", "figure_disappears", "ensure_dir",
"bvh_exists", "enum_train_files", "enum_test_files"]
def get_parameter(param_filename, tf=None):
param_dict = {}
if tf is None:
with open(param_filename, 'r') as fin:
for line in fin.readlines():
items = line.split(":")
try:
if items[0] == "Camera DollyZ":
param_dict[items[0]] = float(items[1]) + 287
else:
param_dict[items[0]] = float(items[1])
except ValueError:
param_dict[items[0]] = items[1].replace("\n", "")
else:
p_file = tf.extractfile(param_filename)
for line in p_file.readlines():
items = line.decode("utf-8").split(":")
try:
if items[0] == "Camera DollyZ":
param_dict[items[0]] = float(items[1]) + 287
else:
param_dict[items[0]] = float(items[1])
except ValueError:
param_dict[items[0]] = items[1].replace("\n", "")
return param_dict
def get_args():
p = argparse.ArgumentParser()
p.add_argument("-d", "--data_path", type=str, default="../../Data/")
p.add_argument("-t", "--test_path", type=str, default="SyntheticImages/*male/")
p.add_argument("-n", "--n_train_images", type=int, default=2000)
p.add_argument("-N", "--n_test_images", type=int, default=100)
p.add_argument("-f", "--full_rotation", type=str, default="False")
p.add_argument("-D", "--discr_setting_type", type=str, default=None)
p.add_argument("-s", "--n_sep", type=int, default=1)
args = p.parse_args()
args.full_rotation = bool(strtobool(args.full_rotation))
return args
def rgb_dist(px_val, part_label):
return np.sum((px_val.astype(np.float32) - part_label.astype(np.float32))**2)**(1./2.)
def proba_parts2joints(part_proba, also_bg=False):
if also_bg:
joint_proba = np.zeros((part_proba.shape[0], 19))
joint_proba[:, 18] = part_proba[:, 31] # background
else:
joint_proba = np.zeros((part_proba.shape[0], 18))
joint_proba[:, 0] = np.sum(part_proba[:, :4], axis=1) # Head
joint_proba[:, 1] = part_proba[:, 4] # neck
joint_proba[:, 2] = np.sum(part_proba[:, 5:7], axis=1) # Chest
joint_proba[:, 3] = np.sum(part_proba[:, 7:9], axis=1) # Waist
joint_proba[:, 4:6] = part_proba[:, 9:11] # Shoulder
joint_proba[:, 6:12] = part_proba[:, 15:21] # Elbow, Wrist, Hand
joint_proba[:, 12:18] = part_proba[:, 25:31] # Knee, Ankle, Foot
return joint_proba
def figure_disappears(label_filename):
label_px = cv2.imread(label_filename)[:, :, :3][:, :, ::-1]
tmp = np.sum(label_px, axis=2)
if np.where((255 * 3 - 63 >= tmp) & (tmp >= 63))[0].shape[0] > 10:
return False
else:
return True
def ensure_dir(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def bvh_exists(data_path, fname):
bvh_path = data_path + "Preprocessing/MotionBVH/Regularized/"
param_fname = fname + "_0_param"
params = get_parameter(param_fname)
return os.path.exists(bvh_path + params["BVH File Name"])
def enum_train_files(data_path, n_train_images, bpc_model, full_rotation):
bpc_path = data_path + "Main/BodyPartClassification/"
intermediate_path = bpc_path + "Intermediate/"
images_path = bpc_path + "SyntheticImages/"
active_idxs = [0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14,
16, 17, 18, 19, 20, 21, 22, 24, 25, 26, 27, 28, 29, 30,
32, 33, 34, 35, 36, 37, 38, 40, 41, 42, 43, 44, 45, 46,
48, 49, 50, 51, 52, 53, 54, 56, 57, 58, 59, 60, 61, 62]
train_images_order_path = intermediate_path + "input_order.csv"
if os.path.exists(train_images_order_path):
train_filenames = \
np.array([images_path + f for f in
np.array(pd.read_csv(train_images_order_path, dtype=str, header=None))]).flatten()
else:
train_filenames = np.array([images_path+"male/%05d" % i for i in range(7500)])
train_filenames = np.append(train_filenames,
np.array([images_path+"female/%05d" % i for i in range(7500)]))
np.random.seed(1)
np.random.shuffle(train_filenames)
train_filename_ids = ["/".join(f.split("/")[-2:]) for f in train_filenames]
|
pd.DataFrame(train_filename_ids)
|
pandas.DataFrame
|
"""
Provide a generic structure to support window functions,
similar to how we have a Groupby object.
"""
from collections import defaultdict
from datetime import timedelta
from textwrap import dedent
from typing import List, Optional, Set
import warnings
import numpy as np
import pandas._libs.window as libwindow
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution, cache_readonly
from pandas.core.dtypes.common import (
ensure_float64,
is_bool,
is_float_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_scalar,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDateOffset,
ABCDatetimeIndex,
ABCPeriodIndex,
ABCSeries,
ABCTimedeltaIndex,
)
from pandas._typing import Axis, FrameOrSeries
from pandas.core.base import DataError, PandasObject, SelectionMixin
import pandas.core.common as com
from pandas.core.generic import _shared_docs
from pandas.core.groupby.base import GroupByMixin
_shared_docs = dict(**_shared_docs)
_doc_template = """
Returns
-------
Series or DataFrame
Return type is determined by the caller.
See Also
--------
Series.%(name)s : Series %(name)s.
DataFrame.%(name)s : DataFrame %(name)s.
"""
class _Window(PandasObject, SelectionMixin):
_attributes = [
"window",
"min_periods",
"center",
"win_type",
"axis",
"on",
"closed",
] # type: List[str]
exclusions = set() # type: Set[str]
def __init__(
self,
obj,
window=None,
min_periods: Optional[int] = None,
center: Optional[bool] = False,
win_type: Optional[str] = None,
axis: Axis = 0,
on: Optional[str] = None,
closed: Optional[str] = None,
**kwargs
):
self.__dict__.update(kwargs)
self.obj = obj
self.on = on
self.closed = closed
self.window = window
self.min_periods = min_periods
self.center = center
self.win_type = win_type
self.win_freq = None
self.axis = obj._get_axis_number(axis) if axis is not None else None
self.validate()
@property
def _constructor(self):
return Window
@property
def is_datetimelike(self) -> Optional[bool]:
return None
@property
def _on(self):
return None
@property
def is_freq_type(self) -> bool:
return self.win_type == "freq"
def validate(self):
if self.center is not None and not is_bool(self.center):
raise ValueError("center must be a boolean")
if self.min_periods is not None and not is_integer(self.min_periods):
raise ValueError("min_periods must be an integer")
if self.closed is not None and self.closed not in [
"right",
"both",
"left",
"neither",
]:
raise ValueError("closed must be 'right', 'left', 'both' or " "'neither'")
def _create_blocks(self):
"""
Split data into blocks & return conformed data.
"""
obj = self._selected_obj
# filter out the on from the object
if self.on is not None:
if obj.ndim == 2:
obj = obj.reindex(columns=obj.columns.difference([self.on]), copy=False)
blocks = obj._to_dict_of_blocks(copy=False).values()
return blocks, obj
def _gotitem(self, key, ndim, subset=None):
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : str / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
# create a new object to prevent aliasing
if subset is None:
subset = self.obj
self = self._shallow_copy(subset)
self._reset_cache()
if subset.ndim == 2:
if is_scalar(key) and key in subset or is_list_like(key):
self._selection = key
return self
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError(
"%r object has no attribute %r" % (type(self).__name__, attr)
)
def _dir_additions(self):
return self.obj._dir_additions()
def _get_window(self, other=None):
return self.window
@property
def _window_type(self) -> str:
return self.__class__.__name__
def __repr__(self) -> str:
"""
Provide a nice str repr of our rolling object.
"""
attrs = (
"{k}={v}".format(k=k, v=getattr(self, k))
for k in self._attributes
if getattr(self, k, None) is not None
)
return "{klass} [{attrs}]".format(
klass=self._window_type, attrs=",".join(attrs)
)
def __iter__(self):
url = "https://github.com/pandas-dev/pandas/issues/11704"
raise NotImplementedError("See issue #11704 {url}".format(url=url))
def _get_index(self) -> Optional[np.ndarray]:
"""
Return index as an ndarray.
Returns
-------
None or ndarray
"""
if self.is_freq_type:
return self._on.asi8
return None
def _prep_values(self, values: Optional[np.ndarray] = None) -> np.ndarray:
"""Convert input to numpy arrays for Cython routines"""
if values is None:
values = getattr(self._selected_obj, "values", self._selected_obj)
# GH #12373 : rolling functions error on float32 data
# make sure the data is coerced to float64
if is_float_dtype(values.dtype):
values = ensure_float64(values)
elif is_integer_dtype(values.dtype):
values = ensure_float64(values)
elif needs_i8_conversion(values.dtype):
raise NotImplementedError(
"ops for {action} for this "
"dtype {dtype} are not "
"implemented".format(action=self._window_type, dtype=values.dtype)
)
else:
try:
values = ensure_float64(values)
except (ValueError, TypeError):
raise TypeError(
"cannot handle this type -> {0}" "".format(values.dtype)
)
# Always convert inf to nan
values[np.isinf(values)] = np.NaN
return values
def _wrap_result(self, result, block=None, obj=None) -> FrameOrSeries:
"""
Wrap a single result.
"""
if obj is None:
obj = self._selected_obj
index = obj.index
if isinstance(result, np.ndarray):
# coerce if necessary
if block is not None:
if is_timedelta64_dtype(block.values.dtype):
from pandas import to_timedelta
result = to_timedelta(result.ravel(), unit="ns").values.reshape(
result.shape
)
if result.ndim == 1:
from pandas import Series
return Series(result, index, name=obj.name)
return type(obj)(result, index=index, columns=block.columns)
return result
def _wrap_results(self, results, blocks, obj, exclude=None) -> FrameOrSeries:
"""
Wrap the results.
Parameters
----------
results : list of ndarrays
blocks : list of blocks
obj : conformed data (may be resampled)
exclude: list of columns to exclude, default to None
"""
from pandas import Series, concat
from pandas.core.index import ensure_index
final = []
for result, block in zip(results, blocks):
result = self._wrap_result(result, block=block, obj=obj)
if result.ndim == 1:
return result
final.append(result)
# if we have an 'on' column
# we want to put it back into the results
# in the same location
columns = self._selected_obj.columns
if self.on is not None and not self._on.equals(obj.index):
name = self._on.name
final.append(Series(self._on, index=obj.index, name=name))
if self._selection is not None:
selection = ensure_index(self._selection)
# need to reorder to include original location of
# the on column (if its not already there)
if name not in selection:
columns = self.obj.columns
indexer = columns.get_indexer(selection.tolist() + [name])
columns = columns.take(sorted(indexer))
# exclude nuisance columns so that they are not reindexed
if exclude is not None and exclude:
columns = [c for c in columns if c not in exclude]
if not columns:
raise DataError("No numeric types to aggregate")
if not len(final):
return obj.astype("float64")
return concat(final, axis=1).reindex(columns=columns, copy=False)
def _center_window(self, result, window) -> np.ndarray:
"""
Center the result in the window.
"""
if self.axis > result.ndim - 1:
raise ValueError(
"Requested axis is larger then no. of argument " "dimensions"
)
offset = _offset(window, True)
if offset > 0:
if isinstance(result, (ABCSeries, ABCDataFrame)):
result = result.slice_shift(-offset, axis=self.axis)
else:
lead_indexer = [slice(None)] * result.ndim
lead_indexer[self.axis] = slice(offset, None)
result = np.copy(result[tuple(lead_indexer)])
return result
def aggregate(self, func, *args, **kwargs):
result, how = self._aggregate(func, *args, **kwargs)
if result is None:
return self.apply(func, raw=False, args=args, kwargs=kwargs)
return result
agg = aggregate
_shared_docs["sum"] = dedent(
"""
Calculate %(name)s sum of given DataFrame or Series.
Parameters
----------
*args, **kwargs
For compatibility with other %(name)s methods. Has no effect
on the computed value.
Returns
-------
Series or DataFrame
Same type as the input, with the same index, containing the
%(name)s sum.
See Also
--------
Series.sum : Reducing sum for Series.
DataFrame.sum : Reducing sum for DataFrame.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4, 5])
>>> s
0 1
1 2
2 3
3 4
4 5
dtype: int64
>>> s.rolling(3).sum()
0 NaN
1 NaN
2 6.0
3 9.0
4 12.0
dtype: float64
>>> s.expanding(3).sum()
0 NaN
1 NaN
2 6.0
3 10.0
4 15.0
dtype: float64
>>> s.rolling(3, center=True).sum()
0 NaN
1 6.0
2 9.0
3 12.0
4 NaN
dtype: float64
For DataFrame, each %(name)s sum is computed column-wise.
>>> df = pd.DataFrame({"A": s, "B": s ** 2})
>>> df
A B
0 1 1
1 2 4
2 3 9
3 4 16
4 5 25
>>> df.rolling(3).sum()
A B
0 NaN NaN
1 NaN NaN
2 6.0 14.0
3 9.0 29.0
4 12.0 50.0
"""
)
_shared_docs["mean"] = dedent(
"""
Calculate the %(name)s mean of the values.
Parameters
----------
*args
Under Review.
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.mean : Equivalent method for Series.
DataFrame.mean : Equivalent method for DataFrame.
Examples
--------
The below examples will show rolling mean calculations with window sizes of
two and three, respectively.
>>> s = pd.Series([1, 2, 3, 4])
>>> s.rolling(2).mean()
0 NaN
1 1.5
2 2.5
3 3.5
dtype: float64
>>> s.rolling(3).mean()
0 NaN
1 NaN
2 2.0
3 3.0
dtype: float64
"""
)
class Window(_Window):
"""
Provide rolling window calculations.
.. versionadded:: 0.18.0
Parameters
----------
window : int, or offset
Size of the moving window. This is the number of observations used for
calculating the statistic. Each window will be a fixed size.
If its an offset then this will be the time period of each window. Each
window will be a variable sized based on the observations included in
the time-period. This is only valid for datetimelike indexes. This is
new in 0.19.0
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). For a window that is specified by an offset,
`min_periods` will default to 1. Otherwise, `min_periods` will default
to the size of the window.
center : bool, default False
Set the labels at the center of the window.
win_type : str, default None
Provide a window type. If ``None``, all points are evenly weighted.
See the notes below for further information.
on : str, optional
For a DataFrame, a datetime-like column on which to calculate the rolling
window, rather than the DataFrame's index. Provided integer column is
ignored and excluded from result since an integer index is not used to
calculate the rolling window.
axis : int or str, default 0
closed : str, default None
Make the interval closed on the 'right', 'left', 'both' or
'neither' endpoints.
For offset-based windows, it defaults to 'right'.
For fixed windows, defaults to 'both'. Remaining cases not implemented
for fixed windows.
.. versionadded:: 0.20.0
Returns
-------
a Window or Rolling sub-classed for the particular operation
See Also
--------
expanding : Provides expanding transformations.
ewm : Provides exponential weighted functions.
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
To learn more about the offsets & frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
The recognized win_types are:
* ``boxcar``
* ``triang``
* ``blackman``
* ``hamming``
* ``bartlett``
* ``parzen``
* ``bohman``
* ``blackmanharris``
* ``nuttall``
* ``barthann``
* ``kaiser`` (needs beta)
* ``gaussian`` (needs std)
* ``general_gaussian`` (needs power, width)
* ``slepian`` (needs width)
* ``exponential`` (needs tau), center is set to None.
If ``win_type=None`` all points are evenly weighted. To learn more about
different window types see `scipy.signal window functions
<https://docs.scipy.org/doc/scipy/reference/signal.html#window-functions>`__.
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
Rolling sum with a window length of 2, using the 'triang'
window type.
>>> df.rolling(2, win_type='triang').sum()
B
0 NaN
1 0.5
2 1.5
3 NaN
4 NaN
Rolling sum with a window length of 2, min_periods defaults
to the window length.
>>> df.rolling(2).sum()
B
0 NaN
1 1.0
2 3.0
3 NaN
4 NaN
Same as above, but explicitly set the min_periods
>>> df.rolling(2, min_periods=1).sum()
B
0 0.0
1 1.0
2 3.0
3 2.0
4 4.0
A ragged (meaning not-a-regular frequency), time-indexed DataFrame
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},
... index = [pd.Timestamp('20130101 09:00:00'),
... pd.Timestamp('20130101 09:00:02'),
... pd.Timestamp('20130101 09:00:03'),
... pd.Timestamp('20130101 09:00:05'),
... pd.Timestamp('20130101 09:00:06')])
>>> df
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 2.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
Contrasting to an integer rolling window, this will roll a variable
length window corresponding to the time period.
The default for min_periods is 1.
>>> df.rolling('2s').sum()
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 3.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
"""
def validate(self):
super().validate()
window = self.window
if isinstance(window, (list, tuple, np.ndarray)):
pass
elif is_integer(window):
if window <= 0:
raise ValueError("window must be > 0 ")
import_optional_dependency(
"scipy", extra="Scipy is required to generate window weight."
)
import scipy.signal as sig
if not isinstance(self.win_type, str):
raise ValueError("Invalid win_type {0}".format(self.win_type))
if getattr(sig, self.win_type, None) is None:
raise ValueError("Invalid win_type {0}".format(self.win_type))
else:
raise ValueError("Invalid window {0}".format(window))
def _prep_window(self, **kwargs):
"""
Provide validation for our window type, return the window
we have already been validated.
"""
window = self._get_window()
if isinstance(window, (list, tuple, np.ndarray)):
return com.asarray_tuplesafe(window).astype(float)
elif is_integer(window):
import scipy.signal as sig
# the below may pop from kwargs
def _validate_win_type(win_type, kwargs):
arg_map = {
"kaiser": ["beta"],
"gaussian": ["std"],
"general_gaussian": ["power", "width"],
"slepian": ["width"],
"exponential": ["tau"],
}
if win_type in arg_map:
win_args = _pop_args(win_type, arg_map[win_type], kwargs)
if win_type == "exponential":
# exponential window requires the first arg (center)
# to be set to None (necessary for symmetric window)
win_args.insert(0, None)
return tuple([win_type] + win_args)
return win_type
def _pop_args(win_type, arg_names, kwargs):
msg = "%s window requires %%s" % win_type
all_args = []
for n in arg_names:
if n not in kwargs:
raise ValueError(msg % n)
all_args.append(kwargs.pop(n))
return all_args
win_type = _validate_win_type(self.win_type, kwargs)
# GH #15662. `False` makes symmetric window, rather than periodic.
return sig.get_window(win_type, window, False).astype(float)
def _apply_window(self, mean=True, **kwargs):
"""
Applies a moving window of type ``window_type`` on the data.
Parameters
----------
mean : bool, default True
If True computes weighted mean, else weighted sum
Returns
-------
y : same type as input argument
"""
window = self._prep_window(**kwargs)
center = self.center
blocks, obj = self._create_blocks()
block_list = list(blocks)
results = []
exclude = []
for i, b in enumerate(blocks):
try:
values = self._prep_values(b.values)
except (TypeError, NotImplementedError):
if isinstance(obj, ABCDataFrame):
exclude.extend(b.columns)
del block_list[i]
continue
else:
raise DataError("No numeric types to aggregate")
if values.size == 0:
results.append(values.copy())
continue
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, len(window))
return libwindow.roll_window(
np.concatenate((arg, additional_nans)) if center else arg,
window,
minp,
avg=mean,
)
result = np.apply_along_axis(f, self.axis, values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, block_list, obj, exclude)
_agg_see_also_doc = dedent(
"""
See Also
--------
pandas.DataFrame.rolling.aggregate
pandas.DataFrame.aggregate
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3, win_type='boxcar').agg('mean')
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -0.885035 0.212600 -0.711689
3 -0.323928 -0.200122 -1.093408
4 -0.071445 -0.431533 -1.075833
5 0.504739 0.676083 -0.996353
6 0.358206 1.903256 -0.774200
7 0.906020 1.283573 0.085482
8 -0.096361 0.818139 0.472290
9 0.070889 0.134399 -0.031308
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="Series/DataFrame",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, arg, *args, **kwargs):
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
# these must apply directly
result = arg(self)
return result
agg = aggregate
@Substitution(name="window")
@Appender(_shared_docs["sum"])
def sum(self, *args, **kwargs):
nv.validate_window_func("sum", args, kwargs)
return self._apply_window(mean=False, **kwargs)
@Substitution(name="window")
@Appender(_shared_docs["mean"])
def mean(self, *args, **kwargs):
nv.validate_window_func("mean", args, kwargs)
return self._apply_window(mean=True, **kwargs)
class _GroupByMixin(GroupByMixin):
"""
Provide the groupby facilities.
"""
def __init__(self, obj, *args, **kwargs):
parent = kwargs.pop("parent", None) # noqa
groupby = kwargs.pop("groupby", None)
if groupby is None:
groupby, obj = obj, obj.obj
self._groupby = groupby
self._groupby.mutated = True
self._groupby.grouper.mutated = True
super().__init__(obj, *args, **kwargs)
count = GroupByMixin._dispatch("count")
corr = GroupByMixin._dispatch("corr", other=None, pairwise=None)
cov = GroupByMixin._dispatch("cov", other=None, pairwise=None)
def _apply(
self, func, name=None, window=None, center=None, check_minp=None, **kwargs
):
"""
Dispatch to apply; we are stripping all of the _apply kwargs and
performing the original function call on the grouped object.
"""
def f(x, name=name, *args):
x = self._shallow_copy(x)
if isinstance(name, str):
return getattr(x, name)(*args, **kwargs)
return x.apply(name, *args, **kwargs)
return self._groupby.apply(f)
class _Rolling(_Window):
@property
def _constructor(self):
return Rolling
def _apply(
self, func, name=None, window=None, center=None, check_minp=None, **kwargs
):
"""
Rolling statistical measure using supplied function.
Designed to be used with passed-in Cython array-based functions.
Parameters
----------
func : str/callable to apply
name : str, optional
name of this function
window : int/array, default to _get_window()
center : bool, default to self.center
check_minp : function, default to _use_window
Returns
-------
y : type of input
"""
if center is None:
center = self.center
if window is None:
window = self._get_window()
if check_minp is None:
check_minp = _use_window
blocks, obj = self._create_blocks()
block_list = list(blocks)
index_as_array = self._get_index()
results = []
exclude = []
for i, b in enumerate(blocks):
try:
values = self._prep_values(b.values)
except (TypeError, NotImplementedError):
if isinstance(obj, ABCDataFrame):
exclude.extend(b.columns)
del block_list[i]
continue
else:
raise DataError("No numeric types to aggregate")
if values.size == 0:
results.append(values.copy())
continue
# if we have a string function name, wrap it
if isinstance(func, str):
cfunc = getattr(libwindow, func, None)
if cfunc is None:
raise ValueError(
"we do not support this function "
"in libwindow.{func}".format(func=func)
)
def func(arg, window, min_periods=None, closed=None):
minp = check_minp(min_periods, window)
# ensure we are only rolling on floats
arg = ensure_float64(arg)
return cfunc(arg, window, minp, index_as_array, closed, **kwargs)
# calculation function
if center:
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def calc(x):
return func(
np.concatenate((x, additional_nans)),
window,
min_periods=self.min_periods,
closed=self.closed,
)
else:
def calc(x):
return func(
x, window, min_periods=self.min_periods, closed=self.closed
)
with np.errstate(all="ignore"):
if values.ndim > 1:
result = np.apply_along_axis(calc, self.axis, values)
else:
result = calc(values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, block_list, obj, exclude)
class _Rolling_and_Expanding(_Rolling):
_shared_docs["count"] = dedent(
r"""
The %(name)s count of any non-NaN observations inside the window.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
DataFrame.count : Count of the full DataFrame.
Examples
--------
>>> s = pd.Series([2, 3, np.nan, 10])
>>> s.rolling(2).count()
0 1.0
1 2.0
2 1.0
3 1.0
dtype: float64
>>> s.rolling(3).count()
0 1.0
1 2.0
2 2.0
3 2.0
dtype: float64
>>> s.rolling(4).count()
0 1.0
1 2.0
2 2.0
3 3.0
dtype: float64
"""
)
def count(self):
blocks, obj = self._create_blocks()
# Validate the index
self._get_index()
window = self._get_window()
window = min(window, len(obj)) if not self.center else window
results = []
for b in blocks:
result = b.notna().astype(int)
result = self._constructor(
result,
window=window,
min_periods=0,
center=self.center,
axis=self.axis,
closed=self.closed,
).sum()
results.append(result)
return self._wrap_results(results, blocks, obj)
_shared_docs["apply"] = dedent(
r"""
The %(name)s function's apply function.
Parameters
----------
func : function
Must produce a single value from an ndarray input if ``raw=True``
or a single value from a Series if ``raw=False``.
raw : bool, default None
* ``False`` : passes each row or column as a Series to the
function.
* ``True`` or ``None`` : the passed function will receive ndarray
objects instead.
If you are just applying a NumPy reduction function this will
achieve much better performance.
The `raw` parameter is required and will show a FutureWarning if
not passed. In the future `raw` will default to False.
.. versionadded:: 0.23.0
*args, **kwargs
Arguments and keyword arguments to be passed into func.
Returns
-------
Series or DataFrame
Return type is determined by the caller.
See Also
--------
Series.%(name)s : Series %(name)s.
DataFrame.%(name)s : DataFrame %(name)s.
"""
)
def apply(self, func, raw=None, args=(), kwargs={}):
from pandas import Series
kwargs.pop("_level", None)
window = self._get_window()
offset = _offset(window, self.center)
index_as_array = self._get_index()
# TODO: default is for backward compat
# change to False in the future
if raw is None:
warnings.warn(
"Currently, 'apply' passes the values as ndarrays to the "
"applied function. In the future, this will change to passing "
"it as Series objects. You need to specify 'raw=True' to keep "
"the current behaviour, and you can pass 'raw=False' to "
"silence this warning",
FutureWarning,
stacklevel=3,
)
raw = True
def f(arg, window, min_periods, closed):
minp = _use_window(min_periods, window)
if not raw:
arg = Series(arg, index=self.obj.index)
return libwindow.roll_generic(
arg,
window,
minp,
index_as_array,
closed,
offset,
func,
raw,
args,
kwargs,
)
return self._apply(f, func, args=args, kwargs=kwargs, center=False, raw=raw)
def sum(self, *args, **kwargs):
nv.validate_window_func("sum", args, kwargs)
return self._apply("roll_sum", "sum", **kwargs)
_shared_docs["max"] = dedent(
"""
Calculate the %(name)s maximum.
Parameters
----------
*args, **kwargs
Arguments and keyword arguments to be passed into func.
"""
)
def max(self, *args, **kwargs):
nv.validate_window_func("max", args, kwargs)
return self._apply("roll_max", "max", **kwargs)
_shared_docs["min"] = dedent(
"""
Calculate the %(name)s minimum.
Parameters
----------
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with a Series.
DataFrame.%(name)s : Calling object with a DataFrame.
Series.min : Similar method for Series.
DataFrame.min : Similar method for DataFrame.
Examples
--------
Performing a rolling minimum with a window size of 3.
>>> s = pd.Series([4, 3, 5, 2, 6])
>>> s.rolling(3).min()
0 NaN
1 NaN
2 3.0
3 2.0
4 2.0
dtype: float64
"""
)
def min(self, *args, **kwargs):
nv.validate_window_func("min", args, kwargs)
return self._apply("roll_min", "min", **kwargs)
def mean(self, *args, **kwargs):
nv.validate_window_func("mean", args, kwargs)
return self._apply("roll_mean", "mean", **kwargs)
_shared_docs["median"] = dedent(
"""
Calculate the %(name)s median.
Parameters
----------
**kwargs
For compatibility with other %(name)s methods. Has no effect
on the computed median.
Returns
-------
Series or DataFrame
Returned type is the same as the original object.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.median : Equivalent method for Series.
DataFrame.median : Equivalent method for DataFrame.
Examples
--------
Compute the rolling median of a series with a window size of 3.
>>> s = pd.Series([0, 1, 2, 3, 4])
>>> s.rolling(3).median()
0 NaN
1 NaN
2 1.0
3 2.0
4 3.0
dtype: float64
"""
)
def median(self, **kwargs):
return self._apply("roll_median_c", "median", **kwargs)
_shared_docs["std"] = dedent(
"""
Calculate %(name)s standard deviation.
Normalized by N-1 by default. This can be changed using the `ddof`
argument.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
*args, **kwargs
For NumPy compatibility. No additional arguments are used.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the %(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.std : Equivalent method for Series.
DataFrame.std : Equivalent method for DataFrame.
numpy.std : Equivalent method for Numpy array.
Notes
-----
The default `ddof` of 1 used in Series.std is different than the default
`ddof` of 0 in numpy.std.
A minimum of one period is required for the rolling calculation.
Examples
--------
>>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.rolling(3).std()
0 NaN
1 NaN
2 0.577350
3 1.000000
4 1.000000
5 1.154701
6 0.000000
dtype: float64
>>> s.expanding(3).std()
0 NaN
1 NaN
2 0.577350
3 0.957427
4 0.894427
5 0.836660
6 0.786796
dtype: float64
"""
)
def std(self, ddof=1, *args, **kwargs):
nv.validate_window_func("std", args, kwargs)
window = self._get_window()
index_as_array = self._get_index()
def f(arg, *args, **kwargs):
minp = _require_min_periods(1)(self.min_periods, window)
return _zsqrt(
libwindow.roll_var(arg, window, minp, index_as_array, self.closed, ddof)
)
return self._apply(
f, "std", check_minp=_require_min_periods(1), ddof=ddof, **kwargs
)
_shared_docs["var"] = dedent(
"""
Calculate unbiased %(name)s variance.
Normalized by N-1 by default. This can be changed using the `ddof`
argument.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
*args, **kwargs
For NumPy compatibility. No additional arguments are used.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the %(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.var : Equivalent method for Series.
DataFrame.var : Equivalent method for DataFrame.
numpy.var : Equivalent method for Numpy array.
Notes
-----
The default `ddof` of 1 used in :meth:`Series.var` is different than the
default `ddof` of 0 in :func:`numpy.var`.
A minimum of 1 period is required for the rolling calculation.
Examples
--------
>>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.rolling(3).var()
0 NaN
1 NaN
2 0.333333
3 1.000000
4 1.000000
5 1.333333
6 0.000000
dtype: float64
>>> s.expanding(3).var()
0 NaN
1 NaN
2 0.333333
3 0.916667
4 0.800000
5 0.700000
6 0.619048
dtype: float64
"""
)
def var(self, ddof=1, *args, **kwargs):
nv.validate_window_func("var", args, kwargs)
return self._apply(
"roll_var", "var", check_minp=_require_min_periods(1), ddof=ddof, **kwargs
)
_shared_docs[
"skew"
] = """
Unbiased %(name)s skewness.
Parameters
----------
**kwargs
Keyword arguments to be passed into func.
"""
def skew(self, **kwargs):
return self._apply(
"roll_skew", "skew", check_minp=_require_min_periods(3), **kwargs
)
_shared_docs["kurt"] = dedent(
"""
Calculate unbiased %(name)s kurtosis.
This function uses Fisher's definition of kurtosis without bias.
Parameters
----------
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.kurt : Equivalent method for Series.
DataFrame.kurt : Equivalent method for DataFrame.
scipy.stats.skew : Third moment of a probability density.
scipy.stats.kurtosis : Reference SciPy method.
Notes
-----
A minimum of 4 periods is required for the %(name)s calculation.
"""
)
def kurt(self, **kwargs):
return self._apply(
"roll_kurt", "kurt", check_minp=_require_min_periods(4), **kwargs
)
_shared_docs["quantile"] = dedent(
"""
Calculate the %(name)s quantile.
Parameters
----------
quantile : float
Quantile to compute. 0 <= quantile <= 1.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
.. versionadded:: 0.23.0
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
**kwargs:
For compatibility with other %(name)s methods. Has no effect on
the result.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.quantile : Computes value at the given quantile over all data
in Series.
DataFrame.quantile : Computes values at the given quantile over
requested axis in DataFrame.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s.rolling(2).quantile(.4, interpolation='lower')
0 NaN
1 1.0
2 2.0
3 3.0
dtype: float64
>>> s.rolling(2).quantile(.4, interpolation='midpoint')
0 NaN
1 1.5
2 2.5
3 3.5
dtype: float64
"""
)
def quantile(self, quantile, interpolation="linear", **kwargs):
window = self._get_window()
index_as_array = self._get_index()
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, window)
if quantile == 1.0:
return libwindow.roll_max(
arg, window, minp, index_as_array, self.closed
)
elif quantile == 0.0:
return libwindow.roll_min(
arg, window, minp, index_as_array, self.closed
)
else:
return libwindow.roll_quantile(
arg,
window,
minp,
index_as_array,
self.closed,
quantile,
interpolation,
)
return self._apply(f, "quantile", quantile=quantile, **kwargs)
_shared_docs[
"cov"
] = """
Calculate the %(name)s sample covariance.
Parameters
----------
other : Series, DataFrame, or ndarray, optional
If not supplied then will default to self and produce pairwise
output.
pairwise : bool, default None
If False then only matching columns between self and other will be
used and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndexed DataFrame in the case of DataFrame
inputs. In the case of missing elements, only complete pairwise
observations will be used.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
**kwargs
Keyword arguments to be passed into func.
"""
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
# GH 16058: offset window
if self.is_freq_type:
window = self.win_freq
else:
window = self._get_window(other)
def _get_cov(X, Y):
# GH #12373 : rolling functions error on float32 data
# to avoid potential overflow, cast the data to float64
X = X.astype("float64")
Y = Y.astype("float64")
mean = lambda x: x.rolling(
window, self.min_periods, center=self.center
).mean(**kwargs)
count = (X + Y).rolling(window=window, center=self.center).count(**kwargs)
bias_adj = count / (count - ddof)
return (mean(X * Y) - mean(X) * mean(Y)) * bias_adj
return _flex_binary_moment(
self._selected_obj, other._selected_obj, _get_cov, pairwise=bool(pairwise)
)
_shared_docs["corr"] = dedent(
"""
Calculate %(name)s correlation.
Parameters
----------
other : Series, DataFrame, or ndarray, optional
If not supplied then will default to self.
pairwise : bool, default None
Calculate pairwise combinations of columns within a
DataFrame. If `other` is not specified, defaults to `True`,
otherwise defaults to `False`.
Not relevant for :class:`~pandas.Series`.
**kwargs
Unused.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the
%(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.corr : Equivalent method for Series.
DataFrame.corr : Equivalent method for DataFrame.
%(name)s.cov : Similar method to calculate covariance.
numpy.corrcoef : NumPy Pearson's correlation calculation.
Notes
-----
This function uses Pearson's definition of correlation
(https://en.wikipedia.org/wiki/Pearson_correlation_coefficient).
When `other` is not specified, the output will be self correlation (e.g.
all 1's), except for :class:`~pandas.DataFrame` inputs with `pairwise`
set to `True`.
Function will return ``NaN`` for correlations of equal valued sequences;
this is the result of a 0/0 division error.
When `pairwise` is set to `False`, only matching columns between `self` and
`other` will be used.
When `pairwise` is set to `True`, the output will be a MultiIndex DataFrame
with the original index on the first level, and the `other` DataFrame
columns on the second level.
In the case of missing elements, only complete pairwise observations
will be used.
Examples
--------
The below example shows a rolling calculation with a window size of
four matching the equivalent function call using :meth:`numpy.corrcoef`.
>>> v1 = [3, 3, 3, 5, 8]
>>> v2 = [3, 4, 4, 4, 8]
>>> fmt = "{0:.6f}" # limit the printed precision to 6 digits
>>> # numpy returns a 2X2 array, the correlation coefficient
>>> # is the number at entry [0][1]
>>> print(fmt.format(np.corrcoef(v1[:-1], v2[:-1])[0][1]))
0.333333
>>> print(fmt.format(np.corrcoef(v1[1:], v2[1:])[0][1]))
0.916949
>>> s1 = pd.Series(v1)
>>> s2 = pd.Series(v2)
>>> s1.rolling(4).corr(s2)
0 NaN
1 NaN
2 NaN
3 0.333333
4 0.916949
dtype: float64
The below example shows a similar rolling calculation on a
DataFrame using the pairwise option.
>>> matrix = np.array([[51., 35.], [49., 30.], [47., 32.],\
[46., 31.], [50., 36.]])
>>> print(np.corrcoef(matrix[:-1,0], matrix[:-1,1]).round(7))
[[1. 0.6263001]
[0.6263001 1. ]]
>>> print(np.corrcoef(matrix[1:,0], matrix[1:,1]).round(7))
[[1. 0.5553681]
[0.5553681 1. ]]
>>> df = pd.DataFrame(matrix, columns=['X','Y'])
>>> df
X Y
0 51.0 35.0
1 49.0 30.0
2 47.0 32.0
3 46.0 31.0
4 50.0 36.0
>>> df.rolling(4).corr(pairwise=True)
X Y
0 X NaN NaN
Y NaN NaN
1 X NaN NaN
Y NaN NaN
2 X NaN NaN
Y NaN NaN
3 X 1.000000 0.626300
Y 0.626300 1.000000
4 X 1.000000 0.555368
Y 0.555368 1.000000
"""
)
def corr(self, other=None, pairwise=None, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
window = self._get_window(other)
def _get_corr(a, b):
a = a.rolling(
window=window, min_periods=self.min_periods, center=self.center
)
b = b.rolling(
window=window, min_periods=self.min_periods, center=self.center
)
return a.cov(b, **kwargs) / (a.std(**kwargs) * b.std(**kwargs))
return _flex_binary_moment(
self._selected_obj, other._selected_obj, _get_corr, pairwise=bool(pairwise)
)
class Rolling(_Rolling_and_Expanding):
@cache_readonly
def is_datetimelike(self):
return isinstance(
self._on, (ABCDatetimeIndex, ABCTimedeltaIndex, ABCPeriodIndex)
)
@cache_readonly
def _on(self):
if self.on is None:
return self.obj.index
elif isinstance(self.obj, ABCDataFrame) and self.on in self.obj.columns:
from pandas import Index
return Index(self.obj[self.on])
else:
raise ValueError(
"invalid on specified as {0}, "
"must be a column (if DataFrame) "
"or None".format(self.on)
)
def validate(self):
super().validate()
# we allow rolling on a datetimelike index
if (self.obj.empty or self.is_datetimelike) and isinstance(
self.window, (str, ABCDateOffset, timedelta)
):
self._validate_monotonic()
freq = self._validate_freq()
# we don't allow center
if self.center:
raise NotImplementedError(
"center is not implemented "
"for datetimelike and offset "
"based windows"
)
# this will raise ValueError on non-fixed freqs
self.win_freq = self.window
self.window = freq.nanos
self.win_type = "freq"
# min_periods must be an integer
if self.min_periods is None:
self.min_periods = 1
elif not is_integer(self.window):
raise ValueError("window must be an integer")
elif self.window < 0:
raise ValueError("window must be non-negative")
if not self.is_datetimelike and self.closed is not None:
raise ValueError(
"closed only implemented for datetimelike " "and offset based windows"
)
def _validate_monotonic(self):
"""
Validate on is_monotonic.
"""
if not self._on.is_monotonic:
formatted = self.on or "index"
raise ValueError("{0} must be " "monotonic".format(formatted))
def _validate_freq(self):
"""
Validate & return window frequency.
"""
from pandas.tseries.frequencies import to_offset
try:
return to_offset(self.window)
except (TypeError, ValueError):
raise ValueError(
"passed window {0} is not "
"compatible with a datetimelike "
"index".format(self.window)
)
_agg_see_also_doc = dedent(
"""
See Also
--------
Series.rolling
DataFrame.rolling
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3).sum()
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -2.655105 0.637799 -2.135068
3 -0.971785 -0.600366 -3.280224
4 -0.214334 -1.294599 -3.227500
5 1.514216 2.028250 -2.989060
6 1.074618 5.709767 -2.322600
7 2.718061 3.850718 0.256446
8 -0.289082 2.454418 1.416871
9 0.212668 0.403198 -0.093924
>>> df.rolling(3).agg({'A':'sum', 'B':'min'})
A B
0 NaN NaN
1 NaN NaN
2 -2.655105 -0.165272
3 -0.971785 -1.340923
4 -0.214334 -1.340923
5 1.514216 -1.340923
6 1.074618 0.211596
7 2.718061 -1.647453
8 -0.289082 -1.647453
9 0.212668 -1.647453
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="Series/Dataframe",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, arg, *args, **kwargs):
return super().aggregate(arg, *args, **kwargs)
agg = aggregate
@Substitution(name="rolling")
@Appender(_shared_docs["count"])
def count(self):
# different impl for freq counting
if self.is_freq_type:
return self._apply("roll_count", "count")
return super().count()
@Substitution(name="rolling")
@Appender(_shared_docs["apply"])
def apply(self, func, raw=None, args=(), kwargs={}):
return super().apply(func, raw=raw, args=args, kwargs=kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["sum"])
def sum(self, *args, **kwargs):
nv.validate_rolling_func("sum", args, kwargs)
return super().sum(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_doc_template)
@Appender(_shared_docs["max"])
def max(self, *args, **kwargs):
nv.validate_rolling_func("max", args, kwargs)
return super().max(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["min"])
def min(self, *args, **kwargs):
nv.validate_rolling_func("min", args, kwargs)
return super().min(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["mean"])
def mean(self, *args, **kwargs):
nv.validate_rolling_func("mean", args, kwargs)
return super().mean(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["median"])
def median(self, **kwargs):
return super().median(**kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["std"])
def std(self, ddof=1, *args, **kwargs):
nv.validate_rolling_func("std", args, kwargs)
return super().std(ddof=ddof, **kwargs)
@Substitution(name="rolling")
@
|
Appender(_shared_docs["var"])
|
pandas.util._decorators.Appender
|
# Data Science with SQL Server Quick Start Guide
# Chapter 05
# Imports
import numpy as np
import pandas as pd
import pyodbc
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import scipy as sc
# Handling NULLs
con = pyodbc.connect('DSN=AWDW;UID=RUser;PWD=<PASSWORD>')
query = """SELECT c1, c2, c3
FROM dbo.NULLTest;"""
NULLTest = pd.read_sql(query, con)
NULLTest
# Checking for NULLs
pd.isnull(NULLTest)
# Omitting
NULLTest.dropna(axis = 'rows')
NULLTest.dropna(axis = 'columns')
# Aggregate functions
NULLTest.c2.mean()
NULLTest.c2.mean(skipna = False)
# Reading the data from SQL Server
con = pyodbc.connect('DSN=AWDW;UID=RUser;PWD=<PASSWORD>')
query = """SELECT CustomerKey, CommuteDistance,
TotalChildren, NumberChildrenAtHome,
Gender, HouseOwnerFlag,
NumberCarsOwned, MaritalStatus,
Age, YearlyIncome, BikeBuyer,
EnglishEducation AS Education,
EnglishOccupation AS Occupation
FROM dbo.vTargetMail"""
TM = pd.read_sql(query, con)
# check the Age
TM["Age"].describe()
# Generating dummies (indicators)
pd.get_dummies(TM.MaritalStatus)
pd.get_dummies(TM.MaritalStatus, prefix = 'TM')
# Create the dummies
TM1 = TM[['MaritalStatus']].join(pd.get_dummies(TM.MaritalStatus, prefix = 'TM'))
TM1.tail(3)
# Show the Age in 20 equal width bins
TM['AgeEWB'] =
|
pd.cut(TM['Age'], 20)
|
pandas.cut
|
"""Evaluate GRA."""
import os
import argparse
import numpy as np
import tensorflow as tf
import pandas as pd
from pandas import Series, DataFrame
from model.gra import GRA
import config as cfg
import util.dataset as dt
def train(epoch, dataset, config, log_dir):
"""Train model for one epoch."""
model_config = config['model']
train_config = config['train']
sess_config = config['session']
with tf.Graph().as_default():
model = GRA(model_config)
model.build_inference()
model.build_loss(train_config['reg_coeff'], train_config['shu_coeff'])
model.build_train(train_config['learning_rate'])
with tf.Session(config=sess_config) as sess:
sum_dir = os.path.join(log_dir, 'summary')
# create event file for graph
if not os.path.exists(sum_dir):
summary_writer = tf.summary.FileWriter(sum_dir, sess.graph)
summary_writer.close()
summary_writer = tf.summary.FileWriter(sum_dir)
ckpt_dir = os.path.join(log_dir, 'checkpoint')
ckpt_path = tf.train.latest_checkpoint(ckpt_dir)
saver = tf.train.Saver()
if ckpt_path:
print('load checkpoint {}.'.format(ckpt_path))
saver.restore(sess, ckpt_path)
else:
print('no checkpoint.')
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
sess.run(tf.global_variables_initializer())
stats_dir = os.path.join(log_dir, 'stats')
stats_path = os.path.join(stats_dir, 'train.json')
if os.path.exists(stats_path):
print('load stats file {}.'.format(stats_path))
stats =
|
pd.read_json(stats_path, 'records')
|
pandas.read_json
|
import os
import pandas as pd
from keras.preprocessing.sequence import pad_sequences
from sklearn.metrics import roc_auc_score
from main import load_train_data, load_test_data, fit_tokenizer, create_padded_sequences
from main import TRAIN_DATA_PATH, TEST_DATA_PATH, MODELS_DATA_PATH, RANDOM_STATE, LABEL_COLUMNS
from utils import load_hparams_and_model, train_model_from_experiment
'''
This script should be run after an experiment is done in main.py file. After entering the experiment id,
all hyperparameters and model architecture will be loaded from experiment directory and model will be
retrained on the wholde training set. The submission file will be created and saved to submissions directory.
If available, test set labels will be used for model evaluation, i.e. for estimating model performance on the test set.
Evaluation results will be saved to evaluations directory.
'''
SAVE_TRAINED_MODEL = True # set to True if you want to save a model from which a submission file was created
TEST_DATA_LABELS_PATH = 'test_data/test_labels.csv' # set this variable to None if test labels aren't available
def load_test_labels(file_path, label_columns):
data_frame =
|
pd.read_csv(file_path)
|
pandas.read_csv
|
import datetime, time
from sklearn.linear_model import LinearRegression
import numpy as np
def predictTemperature(startDate, endDate, temparature, n):
p = int(len(temparature) / 24)
x = []
for i in range(1, ((24 * p) + 1)):
x.append(i)
y = temparature
lm = LinearRegression()
lm.fit(np.asarray(x).reshape(-1, 1), y)
f = x[-1] + 1
z = []
for i in range(24 * n):
z.append(f)
f += 1
return lm.predict(np.asarray(z).reshape(-1, 1)).tolist()
import datetime
from sklearn.linear_model import LinearRegression
import pandas as pd
import random
# predict temperature method
def predictTemperature(startDate, endDate, temps, n):
startDate = datetime.datetime.strptime(startDate, "%Y-%m-%d")
endDate = datetime.datetime.strptime(endDate, "%Y-%m-%d")
endDate = endDate + datetime.timedelta(days=1)
dates = []
# create datetime objects between start and end dates
while startDate < endDate:
dates.append(startDate)
startDate = startDate + datetime.timedelta(hours=1)
# create 24*n test data dates
testdates = [endDate + datetime.timedelta(hours=x) for x in range(24 * n)]
X_test =
|
pd.DataFrame(testdates, columns=['datetime'])
|
pandas.DataFrame
|
from datetime import timedelta
import numpy as np
import pytest
from pandas._libs import iNaT
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
IntervalIndex,
NaT,
Series,
Timestamp,
date_range,
isna,
)
import pandas._testing as tm
class TestSeriesMissingData:
def test_categorical_nan_equality(self):
cat = Series(Categorical(["a", "b", "c", np.nan]))
exp = Series([True, True, True, False])
res = cat == cat
tm.assert_series_equal(res, exp)
def test_categorical_nan_handling(self):
# NaNs are represented as -1 in labels
s = Series(Categorical(["a", "b", np.nan, "a"]))
tm.assert_index_equal(s.cat.categories, Index(["a", "b"]))
tm.assert_numpy_array_equal(
s.values.codes, np.array([0, 1, -1, 0], dtype=np.int8)
)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype="M8[ns]")
filled = series.fillna(method="pad")
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="pad")
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype="M8[ns]")
filled = series.fillna(method="bfill")
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="bfill")
filled2 = df.fillna(value=series[1])
expected =
|
DataFrame({"A": expected})
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# python3 d_speed_size.py : test speed/compression for folder 'corpus'
# python3 d_speed_size.py indir : test speed/compression for folder 'indir'
import os
import sys
import stat
import time
import shutil
import ntpath
import subprocess
import pandas as pd
def _cmp(
exe,
fnm,
lvl,
opts=' -f -k -',
):
"""
compress file 'fnm' using executable 'exe'
Parameters
----------
exe : str
name of compression executable
fnm : str
name of file to be compressed
lvl : int
compression level
opts : str
command line options for executable (default, ' -f -k -')
"""
env = os.environ
cmd = exe + opts + str(lvl) + ' "' + fnm + '"'
subprocess.call(cmd, shell=True)
def test_cmp(
exe='gzip',
indir='',
repeats=1,
ext='.gz',
opts=' -q -f -k -',
max_level=9,
):
"""
compress all files in folder 'indir' using executable 'exe'
Parameters
----------
exe : str
name of compression executable
indir : str
name of folder with files to compress
repeats : int
how many times is each file compressed. More is slower but better timing accuracy
ext : str
extension for files created by exe (default, '.gz')
opts : str
command line options for executable (default, ' -f -k -')
max_level : int
maximum compression level to test (default 9)
"""
if not os.path.exists(exe) and not shutil.which(exe):
print('Skipping test: Unable to find "' + exe + '"')
return ()
if len(indir) < 1:
indir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'corpus')
if not os.path.isdir(indir):
print('Run a_compile.py first: Unable to find "' + indir +'"')
sys.exit()
meth = ntpath.basename(exe)
print('Method\tLevel\tms\tmb/s\t%')
for lvl in range(1, max_level + 1):
t0 = time.time()
size = 0
nsize = 0
for rep in range(repeats):
for f in os.listdir(indir):
if not os.path.isfile(os.path.join(indir, f)):
continue
if f.startswith('.'):
continue
if not f.endswith('.zst') and not f.endswith('.gz') \
and not f.endswith('.bz2'):
fnm = os.path.join(indir, f)
_cmp(exe, fnm, lvl, opts)
if rep > 0:
continue
size = size + os.stat(fnm).st_size
nsize = nsize + os.stat(fnm + ext).st_size
size = size * repeats
nsize = nsize * repeats
seconds = time.time() - t0
# bytes_per_mb = 1024**2
bytes_per_mb = 1000000
speed = size / bytes_per_mb / seconds
print('{}\t{}\t{:.0f}\t{:.0f}\t{:.2f}'.format(meth, lvl,
seconds * 1000, speed, nsize / size * 100))
row_df =
|
pd.DataFrame([[meth, nsize / size * 100, speed, lvl]])
|
pandas.DataFrame
|
# Copyright (c) 2021 <NAME>. All rights reserved.
# This code is licensed under Apache 2.0 with Commons Clause license (see LICENSE.md for details)
"""Custom pandas accessors for signals data.
Methods can be accessed as follows:
* `SignalsSRAccessor` -> `pd.Series.vbt.signals.*`
* `SignalsDFAccessor` -> `pd.DataFrame.vbt.signals.*`
```python-repl
>>> import pandas as pd
>>> import vectorbt as vbt
>>> # vectorbt.signals.accessors.SignalsAccessor.pos_rank
>>> pd.Series([False, True, True, True, False]).vbt.signals.pos_rank()
0 0
1 1
2 2
3 3
4 0
dtype: int64
```
The accessors extend `vectorbt.generic.accessors`.
!!! note
The underlying Series/DataFrame should already be a signal series.
Input arrays should be `np.bool_`.
Grouping is only supported by the methods that accept the `group_by` argument.
Accessors do not utilize caching.
Run for the examples below:
```python-repl
>>> import vectorbt as vbt
>>> import numpy as np
>>> import pandas as pd
>>> from numba import njit
>>> from datetime import datetime
>>> mask = pd.DataFrame({
... 'a': [True, False, False, False, False],
... 'b': [True, False, True, False, True],
... 'c': [True, True, True, False, False]
... }, index=pd.Index([
... datetime(2020, 1, 1),
... datetime(2020, 1, 2),
... datetime(2020, 1, 3),
... datetime(2020, 1, 4),
... datetime(2020, 1, 5)
... ]))
>>> mask
a b c
2020-01-01 True True True
2020-01-02 False False True
2020-01-03 False True True
2020-01-04 False False False
2020-01-05 False True False
```
## Stats
!!! hint
See `vectorbt.generic.stats_builder.StatsBuilderMixin.stats` and `SignalsAccessor.metrics`.
```python-repl
>>> mask.vbt.signals.stats(column='a')
Start 2020-01-01 00:00:00
End 2020-01-05 00:00:00
Period 5 days 00:00:00
Total 1
Rate [%] 20
First Index 2020-01-01 00:00:00
Last Index 2020-01-01 00:00:00
Norm Avg Index [-1, 1] -1
Distance: Min NaT
Distance: Max NaT
Distance: Mean NaT
Distance: Std NaT
Total Partitions 1
Partition Rate [%] 100
Partition Length: Min 1 days 00:00:00
Partition Length: Max 1 days 00:00:00
Partition Length: Mean 1 days 00:00:00
Partition Length: Std NaT
Partition Distance: Min NaT
Partition Distance: Max NaT
Partition Distance: Mean NaT
Partition Distance: Std NaT
Name: a, dtype: object
```
We can pass another signal array to compare this array with:
```python-repl
>>> mask.vbt.signals.stats(column='a', settings=dict(other=mask['b']))
Start 2020-01-01 00:00:00
End 2020-01-05 00:00:00
Period 5 days 00:00:00
Total 1
Rate [%] 20
Total Overlapping 1
Overlapping Rate [%] 33.3333
First Index 2020-01-01 00:00:00
Last Index 2020-01-01 00:00:00
Norm Avg Index [-1, 1] -1
Distance -> Other: Min 0 days 00:00:00
Distance -> Other: Max 0 days 00:00:00
Distance -> Other: Mean 0 days 00:00:00
Distance -> Other: Std NaT
Total Partitions 1
Partition Rate [%] 100
Partition Length: Min 1 days 00:00:00
Partition Length: Max 1 days 00:00:00
Partition Length: Mean 1 days 00:00:00
Partition Length: Std NaT
Partition Distance: Min NaT
Partition Distance: Max NaT
Partition Distance: Mean NaT
Partition Distance: Std NaT
Name: a, dtype: object
```
We can also return duration as a floating number rather than a timedelta:
```python-repl
>>> mask.vbt.signals.stats(column='a', settings=dict(to_timedelta=False))
Start 2020-01-01 00:00:00
End 2020-01-05 00:00:00
Period 5
Total 1
Rate [%] 20
First Index 2020-01-01 00:00:00
Last Index 2020-01-01 00:00:00
Norm Avg Index [-1, 1] -1
Distance: Min NaN
Distance: Max NaN
Distance: Mean NaN
Distance: Std NaN
Total Partitions 1
Partition Rate [%] 100
Partition Length: Min 1
Partition Length: Max 1
Partition Length: Mean 1
Partition Length: Std NaN
Partition Distance: Min NaN
Partition Distance: Max NaN
Partition Distance: Mean NaN
Partition Distance: Std NaN
Name: a, dtype: object
```
`SignalsAccessor.stats` also supports (re-)grouping:
```python-repl
>>> mask.vbt.signals.stats(column=0, group_by=[0, 0, 1])
Start 2020-01-01 00:00:00
End 2020-01-05 00:00:00
Period 5 days 00:00:00
Total 4
Rate [%] 40
First Index 2020-01-01 00:00:00
Last Index 2020-01-05 00:00:00
Norm Avg Index [-1, 1] -0.25
Distance: Min 2 days 00:00:00
Distance: Max 2 days 00:00:00
Distance: Mean 2 days 00:00:00
Distance: Std 0 days 00:00:00
Total Partitions 4
Partition Rate [%] 100
Partition Length: Min 1 days 00:00:00
Partition Length: Max 1 days 00:00:00
Partition Length: Mean 1 days 00:00:00
Partition Length: Std 0 days 00:00:00
Partition Distance: Min 2 days 00:00:00
Partition Distance: Max 2 days 00:00:00
Partition Distance: Mean 2 days 00:00:00
Partition Distance: Std 0 days 00:00:00
Name: 0, dtype: object
```
## Plots
!!! hint
See `vectorbt.generic.plots_builder.PlotsBuilderMixin.plots` and `SignalsAccessor.subplots`.
This class inherits subplots from `vectorbt.generic.accessors.GenericAccessor`.
"""
import warnings
import numpy as np
import pandas as pd
from vectorbt import _typing as tp
from vectorbt.base import reshape_fns
from vectorbt.base.array_wrapper import ArrayWrapper
from vectorbt.generic import nb as generic_nb
from vectorbt.generic import plotting
from vectorbt.generic.accessors import GenericAccessor, GenericSRAccessor, GenericDFAccessor
from vectorbt.generic.ranges import Ranges
from vectorbt.records.mapped_array import MappedArray
from vectorbt.root_accessors import register_dataframe_vbt_accessor, register_series_vbt_accessor
from vectorbt.signals import nb
from vectorbt.utils import checks
from vectorbt.utils.colors import adjust_lightness
from vectorbt.utils.config import merge_dicts, Config
from vectorbt.utils.decorators import class_or_instancemethod
from vectorbt.utils.template import RepEval
__pdoc__ = {}
class SignalsAccessor(GenericAccessor):
"""Accessor on top of signal series. For both, Series and DataFrames.
Accessible through `pd.Series.vbt.signals` and `pd.DataFrame.vbt.signals`."""
def __init__(self, obj: tp.SeriesFrame, **kwargs) -> None:
checks.assert_dtype(obj, np.bool_)
GenericAccessor.__init__(self, obj, **kwargs)
@property
def sr_accessor_cls(self) -> tp.Type["SignalsSRAccessor"]:
"""Accessor class for `pd.Series`."""
return SignalsSRAccessor
@property
def df_accessor_cls(self) -> tp.Type["SignalsDFAccessor"]:
"""Accessor class for `pd.DataFrame`."""
return SignalsDFAccessor
# ############# Overriding ############# #
def bshift(self, *args, fill_value: bool = False, **kwargs) -> tp.SeriesFrame:
"""`vectorbt.generic.accessors.GenericAccessor.bshift` with `fill_value=False`."""
return GenericAccessor.bshift(self, *args, fill_value=fill_value, **kwargs)
def fshift(self, *args, fill_value: bool = False, **kwargs) -> tp.SeriesFrame:
"""`vectorbt.generic.accessors.GenericAccessor.fshift` with `fill_value=False`."""
return GenericAccessor.fshift(self, *args, fill_value=fill_value, **kwargs)
@classmethod
def empty(cls, *args, fill_value: bool = False, **kwargs) -> tp.SeriesFrame:
"""`vectorbt.base.accessors.BaseAccessor.empty` with `fill_value=False`."""
return GenericAccessor.empty(*args, fill_value=fill_value, dtype=np.bool_, **kwargs)
@classmethod
def empty_like(cls, *args, fill_value: bool = False, **kwargs) -> tp.SeriesFrame:
"""`vectorbt.base.accessors.BaseAccessor.empty_like` with `fill_value=False`."""
return GenericAccessor.empty_like(*args, fill_value=fill_value, dtype=np.bool_, **kwargs)
# ############# Generation ############# #
@classmethod
def generate(cls,
shape: tp.RelaxedShape,
choice_func_nb: tp.ChoiceFunc, *args,
pick_first: bool = False,
**kwargs) -> tp.SeriesFrame:
"""See `vectorbt.signals.nb.generate_nb`.
`**kwargs` will be passed to pandas constructor.
## Example
Generate random signals manually:
```python-repl
>>> @njit
... def choice_func_nb(from_i, to_i, col):
... return col + from_i
>>> pd.DataFrame.vbt.signals.generate((5, 3),
... choice_func_nb, index=mask.index, columns=mask.columns)
a b c
2020-01-01 True False False
2020-01-02 False True False
2020-01-03 False False True
2020-01-04 False False False
2020-01-05 False False False
```
"""
checks.assert_numba_func(choice_func_nb)
if not isinstance(shape, tuple):
shape = (shape, 1)
elif isinstance(shape, tuple) and len(shape) == 1:
shape = (shape[0], 1)
result = nb.generate_nb(shape, pick_first, choice_func_nb, *args)
if cls.is_series():
if shape[1] > 1:
raise ValueError("Use DataFrame accessor")
return pd.Series(result[:, 0], **kwargs)
return pd.DataFrame(result, **kwargs)
@classmethod
def generate_both(cls,
shape: tp.RelaxedShape,
entry_choice_func_nb: tp.Optional[tp.ChoiceFunc] = None,
entry_args: tp.ArgsLike = None,
exit_choice_func_nb: tp.Optional[tp.ChoiceFunc] = None,
exit_args: tp.ArgsLike = None,
entry_wait: int = 1,
exit_wait: int = 1,
entry_pick_first: bool = True,
exit_pick_first: bool = True,
**kwargs) -> tp.Tuple[tp.SeriesFrame, tp.SeriesFrame]:
"""See `vectorbt.signals.nb.generate_enex_nb`.
`**kwargs` will be passed to pandas constructor.
## Example
Generate entry and exit signals one after another. Each column increment
the number of ticks to wait before placing the exit signal.
```python-repl
>>> @njit
... def entry_choice_func_nb(from_i, to_i, col, temp_idx_arr):
... temp_idx_arr[0] = from_i
... return temp_idx_arr[:1] # array with one signal
>>> @njit
... def exit_choice_func_nb(from_i, to_i, col, temp_idx_arr):
... wait = col
... temp_idx_arr[0] = from_i + wait
... if temp_idx_arr[0] < to_i:
... return temp_idx_arr[:1] # array with one signal
... return temp_idx_arr[:0] # empty array
>>> temp_idx_arr = np.empty((1,), dtype=np.int_) # reuse memory
>>> en, ex = pd.DataFrame.vbt.signals.generate_both(
... (5, 3),
... entry_choice_func_nb, (temp_idx_arr,),
... exit_choice_func_nb, (temp_idx_arr,),
... index=mask.index, columns=mask.columns)
>>> en
a b c
2020-01-01 True True True
2020-01-02 False False False
2020-01-03 True False False
2020-01-04 False True False
2020-01-05 True False True
>>> ex
a b c
2020-01-01 False False False
2020-01-02 True False False
2020-01-03 False True False
2020-01-04 True False True
2020-01-05 False False False
```
"""
checks.assert_not_none(entry_choice_func_nb)
checks.assert_not_none(exit_choice_func_nb)
checks.assert_numba_func(entry_choice_func_nb)
checks.assert_numba_func(exit_choice_func_nb)
if entry_args is None:
entry_args = ()
if exit_args is None:
exit_args = ()
if not isinstance(shape, tuple):
shape = (shape, 1)
elif isinstance(shape, tuple) and len(shape) == 1:
shape = (shape[0], 1)
result1, result2 = nb.generate_enex_nb(
shape,
entry_wait,
exit_wait,
entry_pick_first,
exit_pick_first,
entry_choice_func_nb, entry_args,
exit_choice_func_nb, exit_args
)
if cls.is_series():
if shape[1] > 1:
raise ValueError("Use DataFrame accessor")
return pd.Series(result1[:, 0], **kwargs), pd.Series(result2[:, 0], **kwargs)
return pd.DataFrame(result1, **kwargs), pd.DataFrame(result2, **kwargs)
def generate_exits(self,
exit_choice_func_nb: tp.ChoiceFunc, *args,
wait: int = 1,
until_next: bool = True,
skip_until_exit: bool = False,
pick_first: bool = False,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""See `vectorbt.signals.nb.generate_ex_nb`.
## Example
Fill all space after signals in `mask`:
```python-repl
>>> @njit
... def exit_choice_func_nb(from_i, to_i, col, temp_range):
... return temp_range[from_i:to_i]
>>> temp_range = np.arange(mask.shape[0]) # reuse memory
>>> mask.vbt.signals.generate_exits(exit_choice_func_nb, temp_range)
a b c
2020-01-01 False False False
2020-01-02 True True False
2020-01-03 True False False
2020-01-04 True True True
2020-01-05 True False True
```
"""
checks.assert_numba_func(exit_choice_func_nb)
exits = nb.generate_ex_nb(
self.to_2d_array(),
wait,
until_next,
skip_until_exit,
pick_first,
exit_choice_func_nb,
*args
)
return self.wrapper.wrap(exits, group_by=False, **merge_dicts({}, wrap_kwargs))
# ############# Filtering ############# #
@class_or_instancemethod
def clean(cls_or_self,
*args,
entry_first: bool = True,
broadcast_kwargs: tp.KwargsLike = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeTuple[tp.SeriesFrame]:
"""Clean signals.
If one array passed, see `SignalsAccessor.first`.
If two arrays passed, entries and exits, see `vectorbt.signals.nb.clean_enex_nb`."""
if not isinstance(cls_or_self, type):
args = (cls_or_self.obj, *args)
if len(args) == 1:
obj = args[0]
if not isinstance(obj, (pd.Series, pd.DataFrame)):
wrapper = ArrayWrapper.from_shape(np.asarray(obj).shape)
obj = wrapper.wrap(obj)
return obj.vbt.signals.first(wrap_kwargs=wrap_kwargs)
elif len(args) == 2:
if broadcast_kwargs is None:
broadcast_kwargs = {}
entries, exits = reshape_fns.broadcast(*args, **broadcast_kwargs)
entries_out, exits_out = nb.clean_enex_nb(
reshape_fns.to_2d_array(entries),
reshape_fns.to_2d_array(exits),
entry_first
)
return (
ArrayWrapper.from_obj(entries).wrap(entries_out, group_by=False, **merge_dicts({}, wrap_kwargs)),
ArrayWrapper.from_obj(exits).wrap(exits_out, group_by=False, **merge_dicts({}, wrap_kwargs))
)
else:
raise ValueError("Either one or two arrays must be passed")
# ############# Random ############# #
@classmethod
def generate_random(cls,
shape: tp.RelaxedShape,
n: tp.Optional[tp.ArrayLike] = None,
prob: tp.Optional[tp.ArrayLike] = None,
pick_first: bool = False,
seed: tp.Optional[int] = None,
**kwargs) -> tp.SeriesFrame:
"""Generate signals randomly.
If `n` is set, see `vectorbt.signals.nb.generate_rand_nb`.
If `prob` is set, see `vectorbt.signals.nb.generate_rand_by_prob_nb`.
`n` should be either a scalar or an array that will broadcast to the number of columns.
`prob` should be either a single number or an array that will broadcast to match `shape`.
`**kwargs` will be passed to pandas constructor.
## Example
For each column, generate a variable number of signals:
```python-repl
>>> pd.DataFrame.vbt.signals.generate_random((5, 3), n=[0, 1, 2],
... seed=42, index=mask.index, columns=mask.columns)
a b c
2020-01-01 False False True
2020-01-02 False False True
2020-01-03 False False False
2020-01-04 False True False
2020-01-05 False False False
```
For each column and time step, pick a signal with 50% probability:
```python-repl
>>> pd.DataFrame.vbt.signals.generate_random((5, 3), prob=0.5,
... seed=42, index=mask.index, columns=mask.columns)
a b c
2020-01-01 True True True
2020-01-02 False True False
2020-01-03 False False False
2020-01-04 False False True
2020-01-05 True False True
```
"""
flex_2d = True
if not isinstance(shape, tuple):
flex_2d = False
shape = (shape, 1)
elif isinstance(shape, tuple) and len(shape) == 1:
flex_2d = False
shape = (shape[0], 1)
if n is not None and prob is not None:
raise ValueError("Either n or prob should be set, not both")
if n is not None:
n = np.broadcast_to(n, shape[1])
result = nb.generate_rand_nb(shape, n, seed=seed)
elif prob is not None:
prob = np.broadcast_to(prob, shape)
result = nb.generate_rand_by_prob_nb(shape, prob, pick_first, flex_2d, seed=seed)
else:
raise ValueError("At least n or prob should be set")
if cls.is_series():
if shape[1] > 1:
raise ValueError("Use DataFrame accessor")
return
|
pd.Series(result[:, 0], **kwargs)
|
pandas.Series
|
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2021/7/8 22:08
Desc: 金十数据中心-经济指标-美国
https://datacenter.jin10.com/economic
"""
import json
import time
import pandas as pd
import demjson
import requests
from akshare.economic.cons import (
JS_USA_NON_FARM_URL,
JS_USA_UNEMPLOYMENT_RATE_URL,
JS_USA_EIA_CRUDE_URL,
JS_USA_INITIAL_JOBLESS_URL,
JS_USA_CORE_PCE_PRICE_URL,
JS_USA_CPI_MONTHLY_URL,
JS_USA_LMCI_URL,
JS_USA_ADP_NONFARM_URL,
JS_USA_GDP_MONTHLY_URL,
)
# 东方财富-美国-未决房屋销售月率
def macro_usa_phs():
"""
未决房屋销售月率
http://data.eastmoney.com/cjsj/foreign_0_5.html
:return: 未决房屋销售月率
:rtype: pandas.DataFrame
"""
url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
'type': 'GJZB',
'sty': 'HKZB',
'js': '({data:[(x)],pages:(pc)})',
'p': '1',
'ps': '2000',
'mkt': '0',
'stat': '5',
'pageNo': '1',
'pageNum': '1',
'_': '1625474966006'
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(',') for item in data_json['data']])
temp_df.columns = [
'时间',
'前值',
'现值',
'发布日期',
]
temp_df['前值'] = pd.to_numeric(temp_df['前值'])
temp_df['现值'] = pd.to_numeric(temp_df['现值'])
return temp_df
# 金十数据中心-经济指标-美国-经济状况-美国GDP
def macro_usa_gdp_monthly():
"""
美国国内生产总值(GDP)报告, 数据区间从20080228-至今
https://datacenter.jin10.com/reportType/dc_usa_gdp
:return: pandas.Series
2008-02-28 0.6
2008-03-27 0.6
2008-04-30 0.9
2008-06-26 1
2008-07-31 1.9
...
2019-06-27 3.1
2019-07-26 2.1
2019-08-29 2
2019-09-26 2
2019-10-30 0
"""
t = time.time()
res = requests.get(
JS_USA_GDP_MONTHLY_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国国内生产总值(GDP)"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "53",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "gdp"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国CPI月率报告
def macro_usa_cpi_monthly():
"""
美国CPI月率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_cpi
https://cdn.jin10.com/dc/reports/dc_usa_cpi_all.js?v=1578741110
:return: 美国CPI月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_CPI_MONTHLY_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国居民消费价格指数(CPI)(月环比)"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "9",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "cpi_monthly"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国核心CPI月率报告
def macro_usa_core_cpi_monthly():
"""
美国核心CPI月率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_core_cpi
https://cdn.jin10.com/dc/reports/dc_usa_core_cpi_all.js?v=1578740570
:return: 美国核心CPI月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_core_cpi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国核心CPI月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "6",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_core_cpi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国个人支出月率报告
def macro_usa_personal_spending():
"""
美国个人支出月率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_personal_spending
https://cdn.jin10.com/dc/reports/dc_usa_personal_spending_all.js?v=1578741327
:return: 美国个人支出月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_personal_spending_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国个人支出月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "35",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_personal_spending"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国零售销售月率报告
def macro_usa_retail_sales():
"""
美国零售销售月率报告, 数据区间从19920301-至今
https://datacenter.jin10.com/reportType/dc_usa_retail_sales
https://cdn.jin10.com/dc/reports/dc_usa_retail_sales_all.js?v=1578741528
:return: 美国零售销售月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_retail_sales_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国零售销售月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "39",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_retail_sales"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国进口物价指数报告
def macro_usa_import_price():
"""
美国进口物价指数报告, 数据区间从19890201-至今
https://datacenter.jin10.com/reportType/dc_usa_import_price
https://cdn.jin10.com/dc/reports/dc_usa_import_price_all.js?v=1578741716
:return: 美国进口物价指数报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_import_price_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国进口物价指数"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "18",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_import_price"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国出口价格指数报告
def macro_usa_export_price():
"""
美国出口价格指数报告, 数据区间从19890201-至今
https://datacenter.jin10.com/reportType/dc_usa_export_price
https://cdn.jin10.com/dc/reports/dc_usa_export_price_all.js?v=1578741832
:return: 美国出口价格指数报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_export_price_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国出口价格指数"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "79",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_export_price"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-LMCI
def macro_usa_lmci():
"""
美联储劳动力市场状况指数报告, 数据区间从20141006-至今
https://datacenter.jin10.com/reportType/dc_usa_lmci
https://cdn.jin10.com/dc/reports/dc_usa_lmci_all.js?v=1578742043
:return: 美联储劳动力市场状况指数报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_LMCI_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美联储劳动力市场状况指数"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "93",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "lmci"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-失业率-美国失业率报告
def macro_usa_unemployment_rate():
"""
美国失业率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_unemployment_rate
https://cdn.jin10.com/dc/reports/dc_usa_unemployment_rate_all.js?v=1578821511
:return: 获取美国失业率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_UNEMPLOYMENT_RATE_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国失业率"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "47",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "unemployment_rate"
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-失业率-美国挑战者企业裁员人数报告
def macro_usa_job_cuts():
"""
美国挑战者企业裁员人数报告, 数据区间从19940201-至今
https://datacenter.jin10.com/reportType/dc_usa_job_cuts
https://cdn.jin10.com/dc/reports/dc_usa_job_cuts_all.js?v=1578742262
:return: 美国挑战者企业裁员人数报告-今值(万人)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_job_cuts_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国挑战者企业裁员人数报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万人)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "78",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "usa_job_cuts"
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-就业人口-美国非农就业人数报告
def macro_usa_non_farm():
"""
美国非农就业人数报告, 数据区间从19700102-至今
https://datacenter.jin10.com/reportType/dc_nonfarm_payrolls
https://cdn.jin10.com/dc/reports/dc_nonfarm_payrolls_all.js?v=1578742490
:return: 美国非农就业人数报告-今值(万人)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_NON_FARM_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国非农就业人数"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万人)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "33",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "non_farm"
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-就业人口-美国ADP就业人数报告
def macro_usa_adp_employment():
"""
美国ADP就业人数报告, 数据区间从20010601-至今
https://datacenter.jin10.com/reportType/dc_adp_nonfarm_employment
https://cdn.jin10.com/dc/reports/dc_adp_nonfarm_employment_all.js?v=1578742564
:return: 美国ADP就业人数报告-今值(万人)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_ADP_NONFARM_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国ADP就业人数(万人)"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万人)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "1",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "adp"
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-消费者收入与支出-美国核心PCE物价指数年率报告
def macro_usa_core_pce_price():
"""
美国核心PCE物价指数年率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_core_pce_price
https://cdn.jin10.com/dc/reports/dc_usa_core_pce_price_all.js?v=1578742641
:return: 美国核心PCE物价指数年率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_CORE_PCE_PRICE_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国核心PCE物价指数年率"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "80",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "core_pce_price"
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-消费者收入与支出-美国实际个人消费支出季率初值报告
def macro_usa_real_consumer_spending():
"""
美国实际个人消费支出季率初值报告, 数据区间从20131107-至今
https://datacenter.jin10.com/reportType/dc_usa_real_consumer_spending
https://cdn.jin10.com/dc/reports/dc_usa_real_consumer_spending_all.js?v=1578742802
:return: 美国实际个人消费支出季率初值报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_real_consumer_spending_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国实际个人消费支出季率初值报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "81",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "usa_real_consumer_spending"
return temp_df
# 金十数据中心-经济指标-美国-贸易状况-美国贸易帐报告
def macro_usa_trade_balance():
"""
美国贸易帐报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_trade_balance
https://cdn.jin10.com/dc/reports/dc_usa_trade_balance_all.js?v=1578742911
:return: 美国贸易帐报告-今值(亿美元)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_trade_balance_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国贸易帐报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(亿美元)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "42",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "usa_trade_balance"
return temp_df
# 金十数据中心-经济指标-美国-贸易状况-美国经常帐报告
def macro_usa_current_account():
"""
美国经常帐报告, 数据区间从20080317-至今
https://datacenter.jin10.com/reportType/dc_usa_current_account
https://cdn.jin10.com/dc/reports/dc_usa_current_account_all.js?v=1578743012
:return: 美国经常帐报告-今值(亿美元)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_current_account_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国经常账报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(亿美元)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "12",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "usa_current_account"
return temp_df
# 金十数据中心-经济指标-美国-产业指标-制造业-贝克休斯钻井报告
def macro_usa_rig_count():
"""
贝克休斯钻井报告, 数据区间从20080317-至今
https://datacenter.jin10.com/reportType/dc_rig_count_summary
https://cdn.jin10.com/dc/reports/dc_rig_count_summary_all.js?v=1578743203
:return: 贝克休斯钻井报告-当周
:rtype: pandas.Series
"""
t = time.time()
params = {
"_": t
}
res = requests.get("https://cdn.jin10.com/data_center/reports/baker.json", params=params)
temp_df = pd.DataFrame(res.json().get("values")).T
big_df = pd.DataFrame()
big_df["钻井总数_钻井数"] = temp_df["钻井总数"].apply(lambda x: x[0])
big_df["钻井总数_变化"] = temp_df["钻井总数"].apply(lambda x: x[1])
big_df["美国石油钻井_钻井数"] = temp_df["美国石油钻井"].apply(lambda x: x[0])
big_df["美国石油钻井_变化"] = temp_df["美国石油钻井"].apply(lambda x: x[1])
big_df["混合钻井_钻井数"] = temp_df["混合钻井"].apply(lambda x: x[0])
big_df["混合钻井_变化"] = temp_df["混合钻井"].apply(lambda x: x[1])
big_df["美国天然气钻井_钻井数"] = temp_df["美国天然气钻井"].apply(lambda x: x[0])
big_df["美国天然气钻井_变化"] = temp_df["美国天然气钻井"].apply(lambda x: x[1])
big_df = big_df.astype("float")
return big_df
# 金十数据中心-经济指标-美国-产业指标-制造业-美国个人支出月率报告
# 金十数据中心-经济指标-美国-产业指标-制造业-美国生产者物价指数(PPI)报告
def macro_usa_ppi():
"""
美国生产者物价指数(PPI)报告, 数据区间从20080226-至今
https://datacenter.jin10.com/reportType/dc_usa_ppi
https://cdn.jin10.com/dc/reports/dc_usa_ppi_all.js?v=1578743628
:return: 美国生产者物价指数(PPI)报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_ppi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国生产者物价指数(PPI)报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "37",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_ppi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-制造业-美国核心生产者物价指数(PPI)报告
def macro_usa_core_ppi():
"""
美国核心生产者物价指数(PPI)报告, 数据区间从20080318-至今
https://datacenter.jin10.com/reportType/dc_usa_core_ppi
https://cdn.jin10.com/dc/reports/dc_usa_core_ppi_all.js?v=1578743709
:return: 美国核心生产者物价指数(PPI)报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_core_ppi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国核心生产者物价指数(PPI)报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "7",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_core_ppi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-制造业-美国API原油库存报告
def macro_usa_api_crude_stock():
"""
美国API原油库存报告, 数据区间从20120328-至今
https://datacenter.jin10.com/reportType/dc_usa_api_crude_stock
https://cdn.jin10.com/dc/reports/dc_usa_api_crude_stock_all.js?v=1578743859
:return: 美国API原油库存报告-今值(万桶)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_api_crude_stock_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国API原油库存报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万桶)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "69",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_api_crude_stock"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-制造业-美国Markit制造业PMI初值报告
def macro_usa_pmi():
"""
美国Markit制造业PMI初值报告, 数据区间从20120601-至今
https://datacenter.jin10.com/reportType/dc_usa_pmi
https://cdn.jin10.com/dc/reports/dc_usa_pmi_all.js?v=1578743969
:return: 美国Markit制造业PMI初值报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_pmi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国Markit制造业PMI报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "74",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_pmi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-制造业-美国ISM制造业PMI报告
def macro_usa_ism_pmi():
"""
美国ISM制造业PMI报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_ism_pmi
https://cdn.jin10.com/dc/reports/dc_usa_ism_pmi_all.js?v=1578744071
:return: 美国ISM制造业PMI报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_ism_pmi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国ISM制造业PMI报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "28",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_ism_pmi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-工业-美国工业产出月率报告
def macro_usa_industrial_production():
"""
美国工业产出月率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_industrial_production
https://cdn.jin10.com/dc/reports/dc_usa_industrial_production_all.js?v=1578744188
:return: 美国工业产出月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_industrial_production_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国工业产出月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "20",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_industrial_production"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-工业-美国耐用品订单月率报告
def macro_usa_durable_goods_orders():
"""
美国耐用品订单月率报告, 数据区间从20080227-至今
https://datacenter.jin10.com/reportType/dc_usa_durable_goods_orders
https://cdn.jin10.com/dc/reports/dc_usa_durable_goods_orders_all.js?v=1578744295
:return: 美国耐用品订单月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_durable_goods_orders_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国耐用品订单月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "13",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_durable_goods_orders"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-工业-美国工厂订单月率报告
def macro_usa_factory_orders():
"""
美国工厂订单月率报告, 数据区间从19920401-至今
https://datacenter.jin10.com/reportType/dc_usa_factory_orders
https://cdn.jin10.com/dc/reports/dc_usa_factory_orders_all.js?v=1578744385
:return: 美国工厂订单月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_factory_orders_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国工厂订单月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "16",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_factory_orders"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-服务业-美国Markit服务业PMI初值报告
def macro_usa_services_pmi():
"""
美国Markit服务业PMI初值报告, 数据区间从20120701-至今
https://datacenter.jin10.com/reportType/dc_usa_services_pmi
https://cdn.jin10.com/dc/reports/dc_usa_services_pmi_all.js?v=1578744503
:return: 美国Markit服务业PMI初值报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_services_pmi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国Markit服务业PMI初值报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "89",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_services_pmi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-服务业-美国商业库存月率报告
def macro_usa_business_inventories():
"""
美国商业库存月率报告, 数据区间从19920301-至今
https://datacenter.jin10.com/reportType/dc_usa_business_inventories
https://cdn.jin10.com/dc/reports/dc_usa_business_inventories_all.js?v=1578744618
:return: 美国商业库存月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_business_inventories_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国商业库存月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "4",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_business_inventories"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-服务业-美国ISM非制造业PMI报告
def macro_usa_ism_non_pmi():
"""
美国ISM非制造业PMI报告, 数据区间从19970801-至今
https://datacenter.jin10.com/reportType/dc_usa_ism_non_pmi
https://cdn.jin10.com/dc/reports/dc_usa_ism_non_pmi_all.js?v=1578744693
:return: 美国ISM非制造业PMI报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_ism_non_pmi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国ISM非制造业PMI报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "29",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_ism_non_pmi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国NAHB房产市场指数报告
def macro_usa_nahb_house_market_index():
"""
美国NAHB房产市场指数报告, 数据区间从19850201-至今
https://datacenter.jin10.com/reportType/dc_usa_nahb_house_market_index
https://cdn.jin10.com/dc/reports/dc_usa_nahb_house_market_index_all.js?v=1578744817
:return: 美国NAHB房产市场指数报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_nahb_house_market_index_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国NAHB房产市场指数报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "31",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_nahb_house_market_index"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国新屋开工总数年化报告
def macro_usa_house_starts():
"""
美国新屋开工总数年化报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_house_starts
https://cdn.jin10.com/dc/reports/dc_usa_house_starts_all.js?v=1578747388
:return: 美国新屋开工总数年化报告-今值(万户)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_house_starts_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国新屋开工总数年化报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万户)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "17",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_house_starts"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国新屋销售总数年化报告
def macro_usa_new_home_sales():
"""
美国新屋销售总数年化报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_new_home_sales
https://cdn.jin10.com/dc/reports/dc_usa_new_home_sales_all.js?v=1578747501
:return: 美国新屋销售总数年化报告-今值(万户)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_new_home_sales_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国新屋销售总数年化报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万户)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "32",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_new_home_sales"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国营建许可总数报告
def macro_usa_building_permits():
"""
美国营建许可总数报告, 数据区间从20080220-至今
https://datacenter.jin10.com/reportType/dc_usa_building_permits
https://cdn.jin10.com/dc/reports/dc_usa_building_permits_all.js?v=1578747599
:return: 美国营建许可总数报告-今值(万户)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_building_permits_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国营建许可总数报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万户)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "3",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_building_permits"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国成屋销售总数年化报告
def macro_usa_exist_home_sales():
"""
美国成屋销售总数年化报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_exist_home_sales
https://cdn.jin10.com/dc/reports/dc_usa_exist_home_sales_all.js?v=1578747703
:return: 美国成屋销售总数年化报告-今值(万户)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_exist_home_sales_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国成屋销售总数年化报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万户)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "15",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_exist_home_sales"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国FHFA房价指数月率报告
def macro_usa_house_price_index():
"""
美国FHFA房价指数月率报告, 数据区间从19910301-至今
https://datacenter.jin10.com/reportType/dc_usa_house_price_index
https://cdn.jin10.com/dc/reports/dc_usa_house_price_index_all.js?v=1578747781
:return: 美国FHFA房价指数月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_house_price_index_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国FHFA房价指数月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "51",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_house_price_index"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国S&P/CS20座大城市房价指数年率报告
def macro_usa_spcs20():
"""
美国S&P/CS20座大城市房价指数年率报告, 数据区间从20010201-至今
https://datacenter.jin10.com/reportType/dc_usa_spcs20
https://cdn.jin10.com/dc/reports/dc_usa_spcs20_all.js?v=1578747873
:return: 美国S&P/CS20座大城市房价指数年率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_spcs20_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国S&P/CS20座大城市房价指数年率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "52",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_spcs20"
temp_df = temp_df.astype(float)
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国成屋签约销售指数月率报告
def macro_usa_pending_home_sales():
"""
美国成屋签约销售指数月率报告, 数据区间从20010301-至今
https://datacenter.jin10.com/reportType/dc_usa_pending_home_sales
https://cdn.jin10.com/dc/reports/dc_usa_pending_home_sales_all.js?v=1578747959
:return: 美国成屋签约销售指数月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_pending_home_sales_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国成屋签约销售指数月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index =
|
pd.to_datetime(date_list)
|
pandas.to_datetime
|
import argparse
import json
import logging
import os
import sys
from functools import partial
import fiona
import geopandas as gpd
import numpy as np
import pandas as pd
from tqdm.auto import tqdm
from hiector.utils.aws_utils import LocalFile, get_filesystem
stdout_handler = logging.StreamHandler(sys.stdout)
handlers = [stdout_handler]
logging.basicConfig(
level=logging.INFO, format="[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s", handlers=handlers
)
LOGGER = logging.getLogger(__name__)
parser = argparse.ArgumentParser(description="Compute normalization factors")
parser.add_argument("--config", type=str, help="Path to config file with execution parameters", required=True)
args = parser.parse_args()
statistic_mapping = {
"mean": np.mean,
"median": np.median,
"min": np.min,
"max": np.max,
"perc1": partial(np.percentile, q=1),
"perc5": partial(np.percentile, q=5),
"perc95": partial(np.percentile, q=95),
"perc99": partial(np.percentile, q=99),
}
def compute_norm_stats(config):
filesystem = get_filesystem(config["bucket_name"], config["aws_profile"])
LOGGER.info("Opening the file descriptor the the samples file.")
with LocalFile(config["samples_file"], mode="r", filesystem=filesystem) as f:
layers = fiona.listlayers(f.path)
layers_to_read = [x for x in layers if int(x.split("_")[1]) in config["scales"]]
LOGGER.info(f"Reading and concatenating layers: {layers_to_read}")
gdf = pd.concat([gpd.read_file(f.path, layer=layer) for layer in layers_to_read])
if "query" in config:
gdf = gdf.query(config["query"])
gdf = gdf.sample(frac=config["fraction"], replace=False)
sampled = []
LOGGER.info("Sampling images...")
for image_name in tqdm(gdf.NAME.values):
imgpath = os.path.join(config["data_dir"], "images", f"{image_name}.npy")
imgs = np.load(filesystem.openbin(imgpath, "rb"))
sampled.append(imgs[np.newaxis, ...])
sampled = np.concatenate(sampled)
rows = []
LOGGER.info("Calculating statistics...")
for (
statistic_name,
statistic_f,
) in statistic_mapping.items():
rows.append(
{
"modality": config["modality"],
"statistic": statistic_name,
"B": statistic_f(sampled[..., 0]),
"G": statistic_f(sampled[..., 1]),
"R": statistic_f(sampled[..., 2]),
"N": statistic_f(sampled[..., 3]),
}
)
rows =
|
pd.DataFrame(rows)
|
pandas.DataFrame
|
""" Initial setup of the brightway2 project.
Creates project, imports databases and generates some useful data.
Should be run first.
"""
from pathlib import Path
import pickle
import json
import numpy as np
import pandas as pd
from brightway2 import *
from .utils import missing_useful_files, _check_result_dir
def setup_project(project_name, database_name, result_dir,
database_dir=None, overwrite_project=False,
overwrite_database=False, save_det_lci=True,
force_write_common_files=False,
default_bw2setup=True):
""" Create project, import databases and generate common files as required
Parameters
-----------
project_name : str
Name of the brightway2 project in which to import data. If project does
not exist, it will be created.
database_name : str
Name of the existing LCI database or to give to the LCI database being
imported.
result_dir : str
Path to the directory where data used or generated by bw2preagg is saved.
database_dir : str, default=None
Path to directory with ecoSpold2 data to be imported, None if LCI
database is not to be imported (i.e. if it exists already).
overwrite_project : bool, default=False
If True, then the existing project with the name project_name is deleted
first, and all data is reimported for a clean slate import.
overwrite_database : bool, default=False
If True, then the existing LCI database with name database_name in the
brightway2 project is deleted first, and LCI data is reimported
force_write_common_files : bool, default=True
If True, then the common files are generated even if they already exist
at given location
save_det_lci : bool, default=True
If True, deterministic LCI arrays are saved in the deterministic subfolder
of the result_dir
default_bw2setup: bool, default=True
If True, run bw2setup to include default elementary flows and LCIA methods
Returns
-------
None
"""
# Delete project on demand
if overwrite_project and project_name in projects:
print("Deleting preexisting project")
projects.delete_project(project_name, delete_dir=True)
# Create new project or switch to existing project
projects.set_current(project_name)
# Setup new project, if necessary
if default_bw2setup:
bw2setup()
# Import LCI database, if necessary
if not overwrite_database and database_name in databases:
print("Importing of {} not necessary".format(database_name))
pass
else:
importer = _prepare_import(database_dir, database_name)
print("Importing {}".format(database_name))
if database_name in databases and overwrite_database:
print("Deleting {}".format(database_name))
Database(database_name).delete()
Database(database_name).deregister()
importer.write_database()
result_dir = Path(result_dir)
result_dir.mkdir(parents=True, exist_ok=True)
# Generate common data
if missing_useful_files(result_dir) \
or force_write_common_files\
or save_det_lci:
sacrificial_lca = get_sacrificial_LCA(database_name)
if missing_useful_files(result_dir) or force_write_common_files:
_generate_common_files(result_dir, database_name, sacrificial_lca)
if save_det_lci:
with open(result_dir / 'common_files' / 'ordered_activity_codes.json', "r") as f:
activity_codes = json.load(f)
_save_det_lci(result_dir, activity_codes, database_name, sacrificial_lca)
def _prepare_import(database_dir, database_name):
"""Check ecoSpolds can be imported
Returns SingleOutputEcospold2Importer with strategies applied it is possible
to write the data
Will raise an error if something prevents it.
"""
if not database_name:
raise ValueError(
"Cannot overwrite LCI database without database_name"
)
if not Path(database_dir).is_dir():
raise ValueError(
"database_dir does not exist, cannot import LCI data"
)
db_importer = SingleOutputEcospold2Importer(database_dir, database_name)
db_importer.apply_strategies()
if not db_importer.statistics()[2] == 0:
raise ValueError(
"{} unlinked exchanges when trying to import database".format(
db_importer.statistics()[2]
)
)
return db_importer
def _generate_common_files(result_dir, database_name, sacrificial_lca):
"""Generate and save common files used in subsequent steps"""
print("\nGenerating common files")
common_files_dir = Path(result_dir)/'common_files'
common_files_dir.mkdir(exist_ok=True, parents=True)
# Activity codes
db = Database(database_name)
activity_codes = [act.key[1] for act in db]
activity_codes.sort()
with open(common_files_dir/'ordered_activity_codes.json', "w") as f:
json.dump(activity_codes, f, indent=4)
sacrificial_lca.lci()
# Save various attributes for eventual reuse in interpretation
# LCA dicts, used to identify matrix coordinates
with open(common_files_dir/'product_dict.pickle', "wb") as f:
pickle.dump(sacrificial_lca.product_dict, f)
with open(common_files_dir/'bio_dict.pickle', "wb") as f:
pickle.dump(sacrificial_lca.biosphere_dict, f)
with open(common_files_dir/'activity_dict.pickle', "wb") as f:
pickle.dump(sacrificial_lca.activity_dict, f)
# A matrix values, as coo
with open(common_files_dir / "A_as_coo_scipy.pickle", "wb") as f:
pickle.dump(sacrificial_lca.technosphere_matrix.tocoo(), f)
df = pd.DataFrame(
columns=['row', 'col', 'value'],
data=np.concatenate(
[
sacrificial_lca.technosphere_matrix.tocoo().row.reshape(-1, 1),
sacrificial_lca.technosphere_matrix.tocoo().col.reshape(-1, 1),
sacrificial_lca.technosphere_matrix.tocoo().data.reshape(-1, 1)
], axis=1)
)
df.to_excel(common_files_dir / "A_as_coo.xlsx")
# B matrix values, as coo
with open(common_files_dir / "B_as_coo_scipy.pickle", "wb") as f:
pickle.dump(sacrificial_lca.biosphere_matrix.tocoo(), f)
df = pd.DataFrame(
columns=['row', 'col', 'value'],
data=np.concatenate(
[
sacrificial_lca.biosphere_matrix.tocoo().row.reshape(-1, 1),
sacrificial_lca.biosphere_matrix.tocoo().col.reshape(-1, 1),
sacrificial_lca.biosphere_matrix.tocoo().data.reshape(-1, 1)
], axis=1)
)
df.to_excel(common_files_dir / "B_as_coo.xlsx")
# A row and col descriptions
df = pd.DataFrame(columns=[
'index',
'activity name',
'location',
'ecoinvent activity uuid',
'activity brightway2 code',
'reference product name',
'reference product amount',
'reference product unit',
'ecoinvent product uuid',
'CPC',
'EcoSpold01Categories',
'ISIC rev.4 ecoinvent',
]
)
db_loaded = Database(database_name).load()
rev_product_dict = {v:k for k, v in sacrificial_lca.product_dict.items()}
for index in rev_product_dict:
assert sacrificial_lca.activity_dict[rev_product_dict[index]]==index
act_key = rev_product_dict[index]
act = db_loaded[act_key]
classifications = {c[0]: c[1] for c in act['classifications']}
data = [
index,
act['name'],
act['location'],
act['filename'][0:36],
act_key[1],
act['reference product'],
act['production amount'],
act['unit'],
act['filename'][37: 37+36],
classifications.get('CPC', ''),
classifications.get('EcoSpold01Categories', ''),
classifications.get('ISIC rev.4 ecoinvent', ''),
]
df.loc[index] = data
df = df.set_index('index')
df.to_excel(common_files_dir / "technosphere_description.xlsx")
db_loaded = None
df = None
# B row and col descriptions
df = pd.DataFrame(columns=[
'index',
'name',
'unit',
'compartment',
'subcompartment',
'type',
'ecoinvent uuid',
'brightway2 code',
]
)
db_loaded = Database('biosphere3').load()
rev_bio_dict = {v:k for k, v in sacrificial_lca.biosphere_dict.items()}
for index in rev_bio_dict:
act_key = rev_bio_dict[index]
act = db_loaded[act_key]
cats = act['categories']
subcat = cats[1] if len(cats) == 2 else ""
data = [
index,
act['name'],
act['unit'],
cats[0],
subcat,
act['type'],
act['code'],
act_key[1]
]
df.loc[index] = data
df = df.set_index('index')
df.to_excel(common_files_dir / "biosphere_description.xlsx")
db_loaded = None
df = None
# CFs
good_methods = [m for m in methods if "obsolete" not in str(m)]
cfs = np.zeros(shape=(len(sacrificial_lca.biosphere_dict), len(good_methods)))
for i, m in enumerate(good_methods):
sacrificial_lca.switch_method(m)
cfs[:, i] = sacrificial_lca.characterization_matrix.sum(axis=1).ravel()
np.save(str(common_files_dir / "cfs.npy"), cfs)
df =
|
pd.DataFrame(columns=good_methods, data=cfs)
|
pandas.DataFrame
|
# This script preps the data for a paper on the impact of COVID on ultramarathoning
# Importing required modules
import pandas as pd
import numpy as np
import datetime
# Specifiying your username
username = ''
# Reading in the race data and results data
data =
|
pd.read_csv('C:/Users/' + username + '/Documents/Data/ultraCOVID/raw_results_data.csv')
|
pandas.read_csv
|
import pandas as pd
import pybedtools
import xarray as xr
import numpy as np
import dask
import warnings
import joblib
import subprocess
import pathlib
import yaml
import pyBigWig
from pybedtools import BedTool
from concurrent.futures import ProcessPoolExecutor, as_completed
from .region_ds_utilities import update_region_ds_config
from .utilities import determine_engine, obj_to_str, write_ordered_chunks
import os
from ALLCools.utilities import parse_chrom_size
os.environ["NUMEXPR_MAX_THREADS"] = "16"
def _bigwig_over_bed(bed: pd.DataFrame, path, value_type="mean", dtype="float32"):
with pyBigWig.open(path, "r") as bw:
def _region_stat(row, t=value_type):
chrom, start, end, *_ = row
try:
value = bw.stats(chrom, start, end, type=t)[0]
except RuntimeError:
# happens when the region has error or chrom not exist in bw
# let user decide what happen, here just return nan
value = np.NaN
return value
values = bed.apply(_region_stat, t=value_type, axis=1)
values = values.astype(dtype)
return values
def _region_bed_sorted(bed_path, g, bed_sorted):
chrom_sizes = parse_chrom_size(g)
bed_df = pd.read_csv(bed_path, sep="\t", index_col=None, header=None)
# select chroms that exist in g
bed_df = bed_df.loc[bed_df.iloc[:, 0].isin(chrom_sizes.keys())]
bed = BedTool.from_dataframe(bed_df)
if bed_sorted:
return bed
else:
return bed.sort(g=g)
def _bed_intersection(bed: pybedtools.BedTool, path, g, region_index, bed_sorted):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
query_bed = _region_bed_sorted(path, g, bed_sorted)
try:
df = bed.intersect(
query_bed, wa=True, f=0.2, g=g, sorted=True
).to_dataframe()
if df.shape[0] == 0:
regions_idx = pd.Series([])
else:
regions_idx = df["name"]
except pd.errors.EmptyDataError:
regions_idx = pd.Series([])
regions = pd.Index(regions_idx.values)
bool_series = pd.Series(region_index.isin(regions), index=region_index)
query_bed.delete_temporary_history(ask=False)
return bool_series
def _annotate_by_bigwigs_worker(
dataset_path,
region_dim,
chrom_size_path,
track_paths,
output_path,
dim,
slop,
value_type,
dtype,
**kwargs,
):
len(kwargs)
# set dask scheduler to allow multiprocessing
with dask.config.set(scheduler="sync"):
# Open region ds again inside the worker function
region_ds = RegionDS.open(
path=dataset_path, region_dim=region_dim, chrom_size_path=chrom_size_path
)
# get dmr region bed and bigwig files
dmr_bed = region_ds.get_bed(
with_id=False, bedtools=False, slop=slop, chrom_size_path=chrom_size_path
)
# iterate each bigwig
total_values = {}
for sample, bigwig_path in track_paths.items():
values = _bigwig_over_bed(
bed=dmr_bed, path=bigwig_path, value_type=value_type, dtype=dtype
)
total_values[sample] = values
total_values = pd.DataFrame(total_values)
total_values.columns.name = dim
total_values.index.name = region_dim
ds = xr.Dataset({f"{region_dim}_{dim}_da": total_values})
ds.to_zarr(output_path, mode="w")
return output_path
def _annotate_by_beds_worker(
dataset_path,
region_dim,
chrom_size_path,
slop,
track_paths,
dtype,
dim,
output_path,
bed_sorted,
**kwargs,
):
len(kwargs)
# set dask scheduler to allow multiprocessing
with dask.config.set(scheduler="sync"):
# Open region ds again inside the worker function
region_ds = RegionDS.open(
path=dataset_path, region_dim=region_dim, chrom_size_path=chrom_size_path
)
# get dmr region bed
dmr_bed = region_ds.get_bed(
with_id=True, bedtools=True, slop=slop, chrom_size_path=chrom_size_path
).sort(g=chrom_size_path)
total_values = {}
for sample, bed_path in track_paths.items():
values = _bed_intersection(
bed=dmr_bed,
path=bed_path,
bed_sorted=bed_sorted,
g=chrom_size_path,
region_index=region_ds.get_index(region_ds.region_dim),
)
total_values[sample] = values.astype(dtype)
total_values = pd.DataFrame(total_values)
total_values.columns.name = dim
ds = xr.Dataset({f"{region_dim}_{dim}_da": total_values})
ds.to_zarr(output_path, mode="w")
dmr_bed.delete_temporary_history(ask=False)
return output_path
def _fisher_exact(row, alternative="two-sided"):
from scipy.stats import fisher_exact
oddsratio, p = fisher_exact(row.values.reshape((2, 2)), alternative=alternative)
value = pd.Series({"oddsratio": oddsratio, "p": p})
return value
class RegionDS(xr.Dataset):
__slots__ = ()
def __init__(self, dataset, region_dim=None, location=None, chrom_size_path=None):
super().__init__(
data_vars=dataset.data_vars, coords=dataset.coords, attrs=dataset.attrs
)
self.region_dim = region_dim
self.location = location
self.chrom_size_path = chrom_size_path
return
@property
def region_dim(self):
return self.attrs.get("region_dim")
@region_dim.setter
def region_dim(self, region_dim):
if region_dim is not None:
if region_dim not in self.dims:
raise KeyError(
f"{region_dim} does not occur in dimension names: {list(self.dims.keys())}"
)
self.attrs["region_dim"] = region_dim
else:
return
@property
def chrom_size_path(self):
return self.attrs.get("chrom_size_path")
@chrom_size_path.setter
def chrom_size_path(self, chrom_size_path):
if chrom_size_path is not None:
chrom_size_path = pathlib.Path(chrom_size_path).absolute()
if not chrom_size_path.exists():
raise FileNotFoundError(str(chrom_size_path))
self.attrs["chrom_size_path"] = str(chrom_size_path)
else:
return
@property
def location(self):
return self.attrs.get("region_ds_location")
@location.setter
def location(self, path):
if path is not None:
location = pathlib.Path(path).absolute()
self.attrs["region_ds_location"] = str(location)
location.mkdir(exist_ok=True, parents=True)
else:
return
@classmethod
def from_bed(
cls, bed, location, chrom_size_path, region_dim="region", sort_bed=True
):
"""
Create empty RegionDS from a bed file.
Parameters
----------
bed
location
region_dim
chrom_size_path
sort_bed
Returns
-------
"""
# sort bed based on chrom_size_path
if isinstance(bed, (str, pathlib.PosixPath)):
if sort_bed:
bed = BedTool(bed).sort(g=chrom_size_path).to_dataframe()
else:
bed = BedTool(bed)
else:
bed = bed
n_cols = bed.shape[1]
if n_cols == 3:
bed.index = bed.index.map(lambda i: f"{region_dim}_{i}")
elif n_cols == 4:
bed.set_index(bed.columns[3], inplace=True)
else:
raise ValueError(
"bed file need to be either 3 columns (chrom, start, end) "
"or 4 columns (chrom, start, end, name)"
)
bed.index.name = region_dim
bed.columns = ["chrom", "start", "end"]
ds = xr.Dataset({})
region_dim = bed.index.name
for k, v in bed.items():
key = f"{region_dim}_{k}"
ds.coords[key] = v
if ds.coords[key].dtype == "object":
ds.coords[key] = ds.coords[key].astype(str)
location = pathlib.Path(location).absolute()
location.mkdir(exist_ok=True, parents=True)
region_ds = cls(
ds,
region_dim=region_dim,
location=location,
chrom_size_path=chrom_size_path,
)
region_ds.save()
return region_ds
@classmethod
def open(
cls,
path,
region_dim=None,
use_regions=None,
split_large_chunks=True,
chrom_size_path=None,
select_dir=None,
engine="zarr",
):
if isinstance(path, (str, pathlib.PosixPath)):
_path = pathlib.Path(path).absolute()
if _path.is_dir():
# check if this is a RegionDS dir that contains multiple datasets
region_ds_file = _path / ".ALLCools"
if region_ds_file.exists():
with open(region_ds_file) as f:
region_ds_config = yaml.load(f, yaml.SafeLoader)
if region_dim is None:
region_dim = region_ds_config["region_dim"]
print(f"Using {region_dim} as region_dim")
# only open datasets having the region_dim
# other datasets will not be opened
# e.g, when region_dim == 'dmr', dms dataset will not be opened.
exclude_dir_name = [
k
for k, v in region_ds_config["ds_region_dim"].items()
if v != region_dim
]
# add output_dir
region_ds_location = pathlib.Path(path).absolute()
# add chrom size path if exist
if chrom_size_path is None:
chrom_size_path = _path / "chrom_sizes.txt"
if not chrom_size_path.exists():
chrom_size_path = None
# read all sub dir as a RegionDS, then merge all the RegionDS together.
datasets = []
for sub_dir_path in _path.iterdir():
if sub_dir_path.is_dir() and sub_dir_path.name[0] != ".":
if select_dir is not None:
if sub_dir_path.name not in select_dir:
# not in select_dir list, skip
continue
if sub_dir_path.name in exclude_dir_name:
# the dir do not have region_dim, skip
continue
if not (sub_dir_path / ".zattrs").exists():
# print(f'{sub_dir_path} does not seem to be a zarr storage, skipped')
continue
try:
datasets.append(
cls._open_single_dataset(
path=sub_dir_path,
region_dim=region_dim,
split_large_chunks=split_large_chunks,
engine=engine,
)
)
except BaseException as e:
print(f"An error raised when reading {sub_dir_path}.")
raise e
region_ds = cls(
xr.merge(datasets),
region_dim=region_dim,
location=region_ds_location,
chrom_size_path=chrom_size_path,
)
else:
# is dir, but is not RegionDS dir, could be just a zarr dir
region_ds = cls._open_single_dataset(
path=path,
region_dim=region_dim,
split_large_chunks=split_large_chunks,
chrom_size_path=chrom_size_path,
engine=engine,
)
else:
# dataset stored in other format, such as netcdf4
region_ds = cls._open_single_dataset(
path=path,
region_dim=region_dim,
split_large_chunks=split_large_chunks,
chrom_size_path=chrom_size_path,
engine=engine,
)
else:
# could be a list of paths, open it as a single dataset
path = list(path)
region_ds = cls._open_single_dataset(
path=path,
region_dim=region_dim,
split_large_chunks=split_large_chunks,
chrom_size_path=chrom_size_path,
engine=engine,
)
if use_regions is not None:
region_ds = region_ds.sel({region_dim: use_regions})
return region_ds
@classmethod
def _open_single_dataset(
cls,
path,
region_dim,
split_large_chunks=True,
chrom_size_path=None,
location=None,
engine=None,
):
"""
Take one or multiple RegionDS file paths and create single RegionDS concatenated on region_dim
Parameters
----------
path
Single RegionDS path or RegionDS path pattern with wildcard or RegionDS path list
region_dim
Dimension name of regions
split_large_chunks
Split large dask array chunks if true
Returns
-------
RegionDS
"""
# print('opening', path)
if region_dim is None:
raise ValueError(
"Please specify a region_dim name when open a normal xr.Dataset with RegionDS."
)
if engine is None:
engine = determine_engine(path)
# if engine is None:
# print(f'Open RegionDS with netcdf4 engine.')
# else:
# print(f'Open RegionDS with {engine} engine.')
try:
if (isinstance(path, str) and "*" not in path) or isinstance(
path, pathlib.PosixPath
):
ds = xr.open_dataset(path, engine=engine)
else:
with dask.config.set(
**{"array.slicing.split_large_chunks": split_large_chunks}
):
if isinstance(path, str):
import glob
path = sorted([p for p in glob.glob(path)])
ds = xr.open_mfdataset(
path,
parallel=False,
combine="nested",
concat_dim=region_dim,
engine=engine,
)
except Exception as e:
print(f"Got error when opening {path}")
print(f"Engine parameter is {engine}")
raise e
ds = cls(
ds,
region_dim=region_dim,
location=location,
chrom_size_path=chrom_size_path,
).squeeze()
return ds
def iter_index(self, chunk_size=100000, dim=None):
if dim is None:
dim = self.region_dim
index = self.get_index(dim)
for chunk_start in range(0, index.size, chunk_size):
use_index = index[chunk_start: chunk_start + chunk_size]
yield use_index
def iter_array(self, chunk_size=100000, dim=None, da=None, load=False):
if dim is None:
dim = self.region_dim
if da is None:
da = f"{dim}_da"
_da = self[da]
assert dim in _da.dims
for _index in self.iter_index(chunk_size=chunk_size, dim=dim):
use_da = _da.sel({dim: _index})
if load:
use_da.load()
yield use_da
def get_fasta(
self,
genome_fasta,
output_path,
slop=None,
chrom_size_path=None,
standardize_length=None,
):
bed = self.get_bed(
with_id=True,
bedtools=True,
slop=slop,
chrom_size_path=chrom_size_path,
standardize_length=standardize_length,
)
bed.getfasta(fo=output_path, nameOnly=True, fi=genome_fasta)
return
def get_bed(
self,
with_id=True,
bedtools=False,
slop=None,
chrom_size_path=None,
standardize_length=None,
):
if chrom_size_path is None:
chrom_size_path = self.chrom_size_path # will be none if not exist
region_dim = self.region_dim
bed_df = pd.DataFrame(
{
"chrom": self.coords[f"{region_dim}_chrom"],
"start": self.coords[f"{region_dim}_start"],
"end": self.coords[f"{region_dim}_end"],
}
)
# standardize region length, used in motif enrichment analysis
if standardize_length is not None:
# standardize_length is an int number
region_center = bed_df["start"] + (bed_df["end"] - bed_df["start"]) // 2
bed_df["start"] = region_center - 1
bed_df["end"] = region_center
slop = (
standardize_length // 2
) # use the bedtools slop to extend the center to standard length
if with_id:
bed_df["name"] = self.get_index(region_dim).tolist()
bed = None
if slop is not None and slop > 0:
if chrom_size_path is None:
raise ValueError("Must provide chrom_size_path when slop is not None.")
with warnings.catch_warnings():
warnings.simplefilter("ignore")
bed = BedTool.from_dataframe(bed_df).slop(b=slop, g=chrom_size_path)
if not bedtools:
bed_df = bed.to_dataframe()
if bedtools:
if bed is None:
bed = BedTool.from_dataframe(bed_df)
return bed
else:
bed_df.index = self.get_index(self.region_dim)
return bed_df
def _chunk_annotation_executor(self, annotation_function, cpu, save=True, **kwargs):
chrom_size_path = kwargs["chrom_size_path"]
dim = kwargs["dim"]
chunk_size = kwargs["chunk_size"]
# add back when submit jobs, use this to control parallel
track_paths = kwargs.pop("track_paths")
region_ds_path = self.location
if region_ds_path is None:
raise ValueError(f"Must have an on-disk location to annotate bigwigs.")
if chrom_size_path is None:
chrom_size_path = self.chrom_size_path
region_dim = self.region_dim
# deal with path
chunk_dir_path = f"{region_ds_path}/.{region_dim}_{dim}_chunks"
chunk_dir_path = pathlib.Path(chunk_dir_path)
if chunk_dir_path.exists():
subprocess.run(f"rm -rf {chunk_dir_path}", shell=True)
chunk_dir_path.mkdir(exist_ok=True)
final_dir_path = f"{region_ds_path}/{region_dim}_{dim}"
n_features = len(track_paths)
if chunk_size == "auto":
chunk_size = max(1, n_features // cpu // 2 + 1)
print(f"Use chunk size {chunk_size}")
other_kwargs = {
"region_dim": region_dim,
"dataset_path": region_ds_path,
"chrom_size_path": chrom_size_path,
}
kwargs.update(other_kwargs)
with ProcessPoolExecutor(cpu) as exe:
futures = {}
for i, chunk_start in enumerate(range(0, n_features, chunk_size)):
output_path = f"{chunk_dir_path}/chunk_{i}.zarr"
kwargs["output_path"] = output_path
kwargs["track_paths"] = track_paths[
chunk_start: chunk_start + chunk_size
]
future = exe.submit(annotation_function, **kwargs)
futures[future] = i
# time.sleep(1)
chunks_to_write = {}
for i, future in enumerate(as_completed(futures)):
chunk_i = futures[future]
output_path = future.result()
chunks_to_write[chunk_i] = output_path
if save:
write_ordered_chunks(
chunks_to_write=chunks_to_write,
final_path=final_dir_path,
append_dim=dim,
engine="zarr",
dtype=kwargs["dtype"],
coord_dtypes=None,
)
update_region_ds_config(
self.location, new_dataset_dim={f"{region_dim}_{dim}": region_dim}
)
# load the newly generated da only
_ds = xr.open_zarr(final_dir_path)
else:
_ds = xr.open_mfdataset(
[chunks_to_write[k] for k in sorted(chunks_to_write.keys())],
concat_dim=dim,
combine="nested",
engine="zarr",
).load()
self.update(_ds)
subprocess.run(f"rm -rf {chunk_dir_path}", shell=True)
return
def annotate_by_bigwigs(
self,
bigwig_table,
dim,
slop=100,
chrom_size_path=None,
value_type="mean",
chunk_size="auto",
dtype="float32",
cpu=1,
save=True,
):
if isinstance(bigwig_table, dict):
track_paths = pd.Series(bigwig_table)
elif isinstance(bigwig_table, pd.Series):
track_paths = bigwig_table
elif isinstance(bigwig_table, str) and bigwig_table.endswith("csv"):
track_paths = pd.read_csv(
bigwig_table, index_col=0, squeeze=True, header=None
)
else:
track_paths = pd.read_csv(
bigwig_table, sep="\t", index_col=0, squeeze=True, header=None
)
kwargs = dict(
track_paths=track_paths,
dim=dim,
slop=slop,
chrom_size_path=chrom_size_path,
value_type=value_type,
chunk_size=chunk_size,
dtype=dtype,
)
self._chunk_annotation_executor(
_annotate_by_bigwigs_worker, cpu=cpu, save=save, **kwargs
)
return
def annotate_by_beds(
self,
bed_table,
dim,
slop=100,
chrom_size_path=None,
chunk_size="auto",
dtype="bool",
bed_sorted=True,
cpu=1,
save=True,
):
bed_tmp = pathlib.Path(
f"./pybedtools_tmp_{np.random.randint(0, 100000)}"
).absolute()
bed_tmp.mkdir(exist_ok=True)
default_tmp = pybedtools.helpers.get_tempdir()
pybedtools.helpers.set_tempdir(str(bed_tmp))
if isinstance(bed_table, dict):
track_paths = pd.Series(bed_table)
elif isinstance(bed_table, pd.Series):
track_paths = bed_table
elif isinstance(bed_table, str) and bed_table.endswith("csv"):
track_paths =
|
pd.read_csv(bed_table, index_col=0, squeeze=True, header=None)
|
pandas.read_csv
|
from __future__ import print_function
import argparse
import json
import logging
import os
import pandas as pd
import numpy as np
import pickle as pkl
from sagemaker_containers import entry_point
from sagemaker_xgboost_container.data_utils import get_dmatrix
import xgboost as xgb
from xgboost.sklearn import XGBClassifier # how do we make sure we are using this
from sklearn import metrics #Additional scklearn functions
def CreateBalancedSampleWeights(y_train, largest_class_weight_coef):
classes = y_train.unique()
classes.sort()
class_samples = np.bincount(y_train)
total_samples = class_samples.sum()
n_classes = len(class_samples)
weights = total_samples / (n_classes * class_samples * 1.0)
class_weight_dict = {key : value for (key, value) in zip(classes, weights)}
class_weight_dict[classes[1]] = class_weight_dict[classes[1]] * largest_class_weight_coef
sample_weights = [class_weight_dict[y] for y in y_train]
return sample_weights
def input_fn(request_body, request_content_type):
"""An input_fn that loads a numpy array"""
if request_content_type == "text/csv":
input_features =[]
for i in request_body.split('\n'): # the first element is the id, the rest is payload
if len(i) == 0: continue
input_features.append([float(j) for j in i.split(",")])
return np.array(input_features)
else:
pass
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Hyperparameters are described here
parser.add_argument('--n_estimators', type=int, default=1000)
parser.add_argument('--n_jobs', type=int, default=4)
parser.add_argument('--max_depth', type=int, default=5)
parser.add_argument('--learning_rate', type=float, default=0.01)
parser.add_argument('--objective', type=str, default='multi:softmax')
parser.add_argument('--subsample', type=float, default=1)
parser.add_argument('--reg_lambda', type=float, default=0.1)
parser.add_argument('--eval_metric', type=str, default='merror') #- looks like we don't include this in fact, worth checking later
parser.add_argument('--colsample_bytree', type=float, default=1)
parser.add_argument('--gamma', type=float, default=1)
# SageMaker specific arguments. Defaults are set in the environment variables.
parser.add_argument('--output_data_dir', type=str, default=os.environ.get('SM_OUTPUT_DATA_DIR'))
parser.add_argument('--model_dir', type=str, default=os.environ.get('SM_MODEL_DIR'))
parser.add_argument('--train', type=str, default=os.environ['SM_CHANNEL_TRAIN'])
parser.add_argument('--validation', type=str, default=os.environ['SM_CHANNEL_VALIDATION'])
args = parser.parse_args()
# Take the set of files and read them all into a single pandas dataframe
input_files1 = [ os.path.join(args.train, file1) for file1 in os.listdir(args.train) ]
if len(input_files1) == 0:
raise ValueError(('There are no files in {}.\n' +
'This usually indicates that the channel was incorrectly specified,\n' +
'the data specification in S3 was incorrectly specified or the role specified\n' +
'does not have permission to access the data.').format(args.train))
raw_data1 = [
|
pd.read_csv(file1, header=None, engine='python')
|
pandas.read_csv
|
import pandas as pd
from psycopg2 import extras
import os
import json
import math
from google.cloud import storage
from payouts_config import BaseConfig
import psycopg2
from calculate_email import send_mail
from mail_to_foundation_acc import mail_to_foundation_accounts
from datetime import datetime, timezone, timedelta
from logger_util import logger
from itertools import groupby
import warnings
import sys
warnings.filterwarnings('ignore')
connection_archive = psycopg2.connect(
host=BaseConfig.POSTGRES_ARCHIVE_HOST,
port=BaseConfig.POSTGRES_ARCHIVE_PORT,
database=BaseConfig.POSTGRES_ARCHIVE_DB,
user=BaseConfig.POSTGRES_ARCHIVE_USER,
password=BaseConfig.POSTGRES_ARCHIVE_PASSWORD
)
connection_payout = psycopg2.connect(
host=BaseConfig.POSTGRES_PAYOUT_HOST,
port=BaseConfig.POSTGRES_PAYOUT_PORT,
database=BaseConfig.POSTGRES_PAYOUT_DB,
user=BaseConfig.POSTGRES_PAYOUT_USER,
password=BaseConfig.POSTGRES_PAYOUT_PASSWORD
)
ERROR = 'Error: {0}'
def get_gcs_client():
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = BaseConfig.CREDENTIAL_PATH
return storage.Client()
def read_staking_json_list():
storage_client = get_gcs_client()
bucket = storage_client.get_bucket(BaseConfig.GCS_BUCKET_NAME)
staking_file_prefix = "staking-"
blobs = storage_client.list_blobs(bucket, start_offset=staking_file_prefix)
# convert to string
file_dict_for_memory = dict()
for blob in blobs:
file_dict_for_memory[blob.name] = blob.updated
sorted_list = [k for k, v in sorted(file_dict_for_memory.items(), key=lambda p: p[1], reverse=False)]
recent_file = [list(i) for j, i in groupby(sorted_list, lambda a: a.split('-')[1])]
recent_file = [recent[-1] for recent in recent_file]
file_name_list_for_memory = [file for file in recent_file if str(file).endswith(".json")]
return file_name_list_for_memory
def get_last_processed_epoch_from_audit():
audit_query = '''select epoch_id from payout_audit_log where job_type='calculation'
order by id desc limit 1'''
last_epoch = 0
try:
cursor = connection_payout.cursor()
cursor.execute(audit_query)
if cursor.rowcount > 0:
data_count = cursor.fetchall()
last_epoch = int(data_count[-1][-1])
except (Exception, psycopg2.DatabaseError) as error:
logger.error(ERROR.format(error))
cursor.close()
return -1
finally:
cursor.close()
return last_epoch
# determine whether process can run now for given epoch number
def can_run_job(next_epoch):
next_epoch_end = (int(next_epoch+1) * 7140 * 3) + (100 * 3)
next_job_time = BaseConfig.GENESIS_DATE + timedelta(minutes=next_epoch_end)
next_job_time = next_job_time.replace(tzinfo=timezone.utc)
next_job_time = next_job_time + timedelta(days=1)
next_job_time= next_job_time.replace(hour=00, minute=30)
current_time = datetime.now(timezone.utc)
if next_job_time > current_time:
result = False
else:
result = True
return result
# this will check audit log table, and will determine last processed epoch
# if no entries found, default to first epoch
def initialize():
result = 0
last_epoch = get_last_processed_epoch_from_audit()
if can_run_job(last_epoch+1) :
result = main(last_epoch + 1, True)
else:
result = last_epoch
return result
# for epoch 0 & epoch 1
# - have to use same staking ledger 'staking-1'
# - blocks produced would be for epoch 0 & epoch 1
# - payment recieved would be for epoch 0 & epoch 1
def is_genesis_epoch(epoch_id):
return True if int(epoch_id) <2 else False
def read_staking_json_for_epoch(epoch_id):
storage_client = get_gcs_client()
bucket = storage_client.get_bucket(BaseConfig.GCS_BUCKET_NAME)
if is_genesis_epoch(epoch_id):
staking_file_prefix = "staking-1-"
else:
staking_file_prefix = "staking-" + str(epoch_id)
blobs = storage_client.list_blobs(bucket, prefix=staking_file_prefix)
# convert to string
ledger_name = ''
modified_staking_df = pd.DataFrame()
file_to_read = read_staking_json_list()
for blob in blobs:
if blob.name in file_to_read:
logger.info(blob.name)
ledger_name = blob.name
json_data_string = blob.download_as_string()
json_data_dict = json.loads(json_data_string)
staking_df =
|
pd.DataFrame(json_data_dict)
|
pandas.DataFrame
|
"""
A strategy should always receive the tape to be modified and return the last line so can be feeded to the tape back in the main market_maker class
"""
from market_maker.utils import log
import pandas as pd
from market_maker.settings import settings
from time import sleep
import matplotlib.pyplot as plt
logger = log.setup_custom_logger(__name__)
pd.options.display.width = 300 # for pandas
class Strategy:
def __init__(self, exchange):
self.exchange = exchange
def check_contingent_limit(self):
"""
Check API limits. If a limit is reached no trades should be submitted.
@return: True if the limit is reached, false otherwise
"""
#max_contingents = 10
max_contingents = 2
# max_stops = 10
# check for API limits
open_position = self.exchange.get_position()['simpleCost']
# open_orders = self.exchange.get_num_open_orders()
contingent_orders = self.exchange.get_num_contingent_orders()
# check contingent orders limit to avoid being kicked our form the API
# if (contingent_orders >= max_contingents) and (open_position != 0): # DEBUG: prevents any additional orders being added when there is an open position
if (contingent_orders >= max_contingents):
logger.info("Max contingent orders reached: {} ... System sleeping for {}s".format(contingent_orders, settings.LOOP_INTERVAL))
return True
#elif open_position != 0:
# logger.info("Open position detected of {} contracts. ... System sleeping for {}s".format(open_position, settings.LOOP_INTERVAL))
# return 'position'
elif (contingent_orders < max_contingents) and open_position == 0:
return False
def check_open_position(self):
position = self.exchange.get_position()
if position['isOpen']:
open_position = position['simpleCost']
return open_position
else:
return False
def run_strategy(self):
# Get latest quote (raw df line)
"""
INPUT : latest quote contains :
askPrice askSize bidPrice bidSize symbol walletBalance marginBalance fundingRate fundingTimestamp indicativeFundingRate
timestamp
2018-04-02 03:12:00+00:00 6938.0 4258 6937.5 70688 XBTUSD 3.13026403 3.13026403 -0.001697 2018-04-02 04:00:00+00:00 0.000967
--------------------------------------------------------------------------------------------------------------
"""
# set defaults
trade_side = False
executed = False
latest_quote = self.exchange.get_latest_quote_with_funding()
current_time = latest_quote.index.tolist()[0] # extrapolate latest time
logger.info('....executing run_strategy()')
# check limites
if not self.check_contingent_limit():
open_position = self.check_open_position()
latest_quote_time =
|
pd.to_datetime(latest_quote.index)
|
pandas.to_datetime
|
import pandas as pd
import numpy as np
import pytest
from model.calculatemoments import CalculateMoments
@pytest.fixture
def setup_mean_matrix1():
x1 = [1 for i in range(1000)]
x2 = [2 for i in range(1000)]
x3 = [3 for i in range(1000)]
return
|
pd.DataFrame({'x1': x1, 'x2': x2, 'x3': x3})
|
pandas.DataFrame
|
#!/usr/bin/env python3
import argparse
import copy
import hashlib
import itertools
import math
import multiprocessing
import numpy as np
import os
import pandas as pd
import pickle
import random
import time
from multiprocessing import Pool
import subprocess
from collections import defaultdict
from collections import Counter
f_fields = ['hour', 'banner_pos', 'device_id', 'device_ip', 'device_model', 'device_conn_type', 'C14', 'C17', 'C20', 'C21', 'pub_id', 'pub_domain', 'pub_category', 'device_id_count', 'device_ip_count', 'user_count', 'smooth_user_hour_count', 'user_click_histroy']
def parse_args():
parser = argparse.ArgumentParser('Calculate group features and dump them to a specified file')
parser.add_argument('train', type=str, help='csv file')
parser.add_argument('valid', type=str, help='csv file')
parser.add_argument('partition', type=str, help='site/app')
parser.add_argument('g_field', type=str, help='specified the fields used to group instances')
parser.add_argument('a_field', type=str, help='specified the fields considered in each group')
parser.add_argument('--gc_begin', type=int, default=16, help='the index of the first column in group features')
parser.add_argument('--max_occur', type=int, default=100, help='specified the maximum number of count features. Any feature with counts less than the value would be replaced with its count.')
parser.add_argument('--max_sz_group', type=int, default=100, help='the upper limit of the size of each group')
parser.add_argument('--max_nr_group_feats', type=int, default=2500, help='the maximum number of features among a group')
return vars(parser.parse_args())
def hashstr(str, nr_bins=1e6):
return int(hashlib.md5(str.encode('utf8')).hexdigest(), 16)%(nr_bins-1)+1
def vtform(v, partition, c, cnts, max_occur):
pub_in_raw = {'pub_id': {'app': 'app_id', 'site': 'site_id'}, 'pub_domain': {'app': 'app_domain', 'site': 'site_domain'}, 'pub_category': {'app': 'app_category', 'site': 'site_category'}}
if c in pub_in_raw:
c = pub_in_raw[c][partition]
if c != 'hour':
if v in cnts[c]:
if cnts[c][v] >= max_occur:
return c+'-'+v
else:
return c+'-less-'+str(cnts[c][v])
else:
return c+'-less'
else:
return c+'-'+v[-2:]
def generate_feats(df, partition, a_field, gc_begin, max_occur, max_sz_group, max_nr_group_feats, tr_path, va_path):
g_added = set(a_field.split(',')) & set(f_fields)
col_fm_indices = {c:i+gc_begin for i, c in enumerate(g_added)}
with open('fc.trva.r0.t2.pkl', 'rb') as fh:
cnts = pickle.load(fh)
with open(tr_path, 'wt') as f_tr, open(va_path, 'wt') as f_va:
for gid, group in df.groupby('__kid__'):
group_feats = dict()
if len(group) < max_sz_group:
for c in g_added:
group_feats[c] = Counter(group[c].apply(lambda x: vtform(x, partition, c, cnts, max_occur)))
c_norm = 1/math.sqrt(sum([w**2 for w in group_feats[c].values()]))/len(g_added)
for v, w in group_feats[c].items():
group_feats[c][v] = w*c_norm
gf_str = ''
for c, vws in group_feats.items():
for v, w in vws.items():
gf_str += ' {0}:{1}:{2:.5f}'.format(col_fm_indices[c], int(hashstr('group-'+v)), w)
for rid, row in group.iterrows():
feats_str = row['id'] + gf_str
if row['__src__'] == '__tr__':
f_tr.write(feats_str+'\n')
elif row['__src__'] == '__va__':
f_va.write(feats_str+'\n')
def cat(combined, names):
if os.path.exists(combined):
os.remove(combined)
for name in names:
cmd = 'cat {0} >> {1}'.format(name, combined)
p = subprocess.Popen(cmd, shell=True)
p.communicate()
def delete(names):
for name in names:
cmd = 'rm {0}'.format(name)
p = subprocess.Popen(cmd, shell=True)
p.communicate()
def get_pid_table(df, col, sz_chunk):
return df.groupby(col)['id'].count().cumsum().apply(lambda x: int(x/sz_chunk))
if __name__ == '__main__':
args = parse_args()
spec = '.T_{max_occur}.gins_{max_sz_group}.gfeat_{max_nr_group_feats}.gby_{g_field}.add_{a_field}'.format(
max_occur=args['max_occur'], max_sz_group=args['max_sz_group'], max_nr_group_feats=args['max_nr_group_feats'],
g_field=args['g_field'], a_field=args['a_field'])
# loading
start = time.time()
tr = pd.read_csv(args['train'], dtype=str)
tr['__src__'] = '__tr__'
va = pd.read_csv(args['valid'], dtype=str)
va['__src__'] = '__va__'
trva =
|
pd.concat([tr, va])
|
pandas.concat
|
# -*- coding: utf-8 -*-
from datetime import timedelta
from distutils.version import LooseVersion
import numpy as np
import pytest
import pandas as pd
import pandas.util.testing as tm
from pandas import (
DatetimeIndex, Int64Index, Series, Timedelta, TimedeltaIndex, Timestamp,
date_range, timedelta_range
)
from pandas.errors import NullFrequencyError
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=str)
def delta(request):
# Several ways of representing two hours
return request.param
@pytest.fixture(params=['B', 'D'])
def freq(request):
return request.param
class TestTimedeltaIndexArithmetic(object):
# Addition and Subtraction Operations
# -------------------------------------------------------------
# TimedeltaIndex.shift is used by __add__/__sub__
def test_tdi_shift_empty(self):
# GH#9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
def test_tdi_shift_hours(self):
# GH#9903
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_tdi_shift_minutes(self):
# GH#9903
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_tdi_shift_int(self):
# GH#8083
trange = pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
result = trange.shift(1)
expected = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00',
'4 days 01:00:00', '5 days 01:00:00'],
freq='D')
tm.assert_index_equal(result, expected)
def test_tdi_shift_nonstandard_freq(self):
# GH#8083
trange = pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
result = trange.shift(3, freq='2D 1s')
expected = TimedeltaIndex(['6 days 01:00:03', '7 days 01:00:03',
'8 days 01:00:03', '9 days 01:00:03',
'10 days 01:00:03'], freq='D')
|
tm.assert_index_equal(result, expected)
|
pandas.util.testing.assert_index_equal
|
from PIL import Image
import os
from glob import glob
import torch
import pandas as pd
import argparse
import time
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader
from util import create_params, flip
from model import create_model
from collections import OrderedDict
from tqdm import tqdm
normalize = transforms.Normalize(mean=[0.5754, 0.4529, 0.3986],
std=[0.2715, 0.2423, 0.2354])
class FacialDataset_test(Dataset):
def __init__(self, data_path, img_size):
if not os.path.exists(data_path):
raise Exception(f"[!] {self.data_path} not existed")
self.imgs = []
self.transform = transforms.Compose([
transforms.Resize((img_size,img_size)),
transforms.ToTensor(),
normalize
])
self.age_path = sorted(glob(os.path.join(data_path, "*.*")))
for pth in self.age_path:
img = pth
self.imgs.append(img)
def __getitem__(self, idx):
image = self.transform(Image.open(self.imgs[idx]))
return image
def __len__(self):
return len(self.age_path)
def eval():
config = create_params()
test_dataset = FacialDataset_test(config.data_dir+'/test', config.img_size)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,batch_size=1,shuffle=False)
if not os.path.exists(os.path.join(config.save_dir, config.arch)):
os.makedirs(os.path.join(config.save_dir, config.arch, 'best'))
os.makedirs(os.path.join(config.save_dir, config.arch, 'latest'))
print("Created directory ",str(os.path.join(config.save_dir, config.arch)))
# load checkpoint
checkpoint = None
centroid = None
if config.eval:
checkpoint = torch.load(os.path.join(config.output_dir, config.arch, 'best','model_{}.pt'.format(config.trial)))
if 'centroid' in checkpoint.keys():
centroid = checkpoint['centroid']
model = create_model(config)
if torch.cuda.device_count() > 1:
new_state_dict = OrderedDict()
for k, v in checkpoint['state_dict'].items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
else:
model.load_state_dict(checkpoint['state_dict'])
model.eval()
if config.use_gpu and torch.cuda.is_available():
model = model.cuda()
print('Make an evaluation csv file(best) for submission...')
Category = []
for input in tqdm(test_loader):
input = input.cuda()
output = model(input)
if config.arch == 'random_bin':
est = (output * centroid.view(1,-1)).view(-1, config.M, config.N)
y_hat = est.sum(dim=2)
y_bar = y_hat.mean(dim=1)
output = [y_bar.item()]
elif config.arch == 'dldlv2':
flipped = flip(input).cuda()
output_flipped = model(flipped)
output = [torch.sum(output*centroid, dim=1).item()/2 + torch.sum(output_flipped*centroid, dim=1).item()/2]
else:
output = [output.item()]
Category = Category + output
Id = list(range(0, len(Category)))
samples = {
'Id': Id,
'Category': Category
}
df =
|
pd.DataFrame(samples, columns=['Id', 'Category'])
|
pandas.DataFrame
|
import pytest
from mapping import mappings
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas as pd
from pandas import Timestamp as TS
import numpy as np
from pandas.tseries.offsets import BDay
@pytest.fixture
def dates():
return pd.Series(
[TS('2016-10-20'), TS('2016-11-21'), TS('2016-12-20')],
index=['CLX16', 'CLZ16', 'CLF17']
)
def test_not_in_roll_one_generic_static_roller(dates):
dt = dates.iloc[0]
contract_dates = dates.iloc[0:2]
sd, ed = (dt + BDay(-8), dt + BDay(-7))
timestamps = pd.date_range(sd, ed, freq='b')
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
trans = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
midx = pd.MultiIndex.from_product([timestamps, ['CLX16']])
midx.names = ['date', 'contract']
cols = pd.Index([0], name='generic')
wts_exp = pd.DataFrame([1.0, 1.0], index=midx, columns=cols)
# with DatetimeIndex
wts = mappings.roller(timestamps, contract_dates,
mappings.static_transition, transition=trans)
assert_frame_equal(wts, wts_exp)
# with tuple
wts = mappings.roller(tuple(timestamps), contract_dates,
mappings.static_transition, transition=trans)
assert_frame_equal(wts, wts_exp)
def test_not_in_roll_one_generic_static_transition(dates):
contract_dates = dates.iloc[0:2]
ts = dates.iloc[0] + BDay(-8)
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLX16', 1.0, ts)]
assert wts == wts_exp
def test_non_numeric_column_static_transition(dates):
contract_dates = dates.iloc[0:2]
ts = dates.iloc[0] + BDay(-8)
cols = pd.MultiIndex.from_product([["CL1"], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [("CL1", 'CLX16', 1.0, ts)]
assert wts == wts_exp
def test_finished_roll_pre_expiry_static_transition(dates):
contract_dates = dates.iloc[0:2]
ts = dates.iloc[0] + BDay(-2)
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-9, -8]
transition = pd.DataFrame([[1.0, 0.0], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLZ16', 1.0, ts)]
assert wts == wts_exp
def test_not_in_roll_one_generic_filtering_front_contracts_static_transition(dates): # NOQA
contract_dates = dates.iloc[0:2]
ts = dates.iloc[1] + BDay(-8)
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLZ16', 1.0, ts)]
assert wts == wts_exp
def test_roll_with_holiday(dates):
contract_dates = dates.iloc[-2:]
ts = pd.Timestamp("2016-11-17")
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
holidays = [np.datetime64("2016-11-18")]
# the holiday moves the roll schedule up one day, since Friday is
# excluded as a day
wts = mappings.static_transition(ts, contract_dates, transition,
holidays)
wts_exp = [(0, 'CLZ16', 0.5, ts), (0, 'CLF17', 0.5, ts)]
assert wts == wts_exp
def test_not_in_roll_one_generic_zero_weight_back_contract_no_contract_static_transition(dates): # NOQA
contract_dates = dates.iloc[0:1]
ts = dates.iloc[0] + BDay(-8)
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLX16', 1.0, ts)]
assert wts == wts_exp
def test_aggregate_weights():
ts = pd.Timestamp("2015-01-01")
wts_list = [(0, 'CLX16', 1.0, ts), (1, 'CLZ16', 1.0, ts)]
wts = mappings.aggregate_weights(wts_list)
idx = pd.MultiIndex.from_product([[ts], ["CLX16", "CLZ16"]],
names=["date", "contract"])
cols = pd.Index([0, 1], name="generic")
wts_exp = pd.DataFrame([[1.0, 0], [0, 1.0]], index=idx, columns=cols)
assert_frame_equal(wts, wts_exp)
def test_aggregate_weights_drop_date():
ts = pd.Timestamp("2015-01-01")
wts_list = [(0, 'CLX16', 1.0, ts), (1, 'CLZ16', 1.0, ts)]
wts = mappings.aggregate_weights(wts_list, drop_date=True)
idx = pd.Index(["CLX16", "CLZ16"], name="contract")
cols = pd.Index([0, 1], name="generic")
wts_exp = pd.DataFrame([[1.0, 0], [0, 1.0]], index=idx, columns=cols)
assert_frame_equal(wts, wts_exp)
def test_static_bad_transitions(dates):
contract_dates = dates.iloc[[0]]
ts = dates.iloc[0] + BDay(-8)
# transition does not contain 'front' column
cols = pd.MultiIndex.from_product([[0], ['not_front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
with pytest.raises(ValueError):
mappings.static_transition(ts, contract_dates, transition)
# transition does not sum to one across rows
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.0], [0.0, 1.0]],
index=idx, columns=cols)
with pytest.raises(ValueError):
mappings.static_transition(ts, contract_dates, transition)
# transition is not monotonic increasing in back
transition = pd.DataFrame([[0.7, 0.3], [0.8, 0.2], [0.0, 1.0]],
index=idx, columns=cols)
with pytest.raises(ValueError):
mappings.static_transition(ts, contract_dates, transition)
def test_no_roll_date_two_generics_static_transition(dates):
dt = dates.iloc[0]
contract_dates = dates
ts = dt + BDay(-8)
cols = pd.MultiIndex.from_product([[0, 1], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5],
[0.0, 1.0, 0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLX16', 1.0, ts), (1, 'CLZ16', 1.0, ts)]
assert wts == wts_exp
def test_not_in_roll_two_generics_static_roller(dates):
dt = dates.iloc[0]
contract_dates = dates.iloc[0:3]
sd, ed = (dt + BDay(-8), dt + BDay(-7))
timestamps = pd.date_range(sd, ed, freq='b')
cols = pd.MultiIndex.from_product([[0, 1], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5],
[0.0, 1.0, 0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.roller(timestamps, contract_dates,
mappings.static_transition,
transition=transition)
midx = pd.MultiIndex.from_product([timestamps, ['CLX16', 'CLZ16']])
midx.names = ['date', 'contract']
cols = pd.Index([0, 1], name='generic')
wts_exp = pd.DataFrame([[1.0, 0.0], [0.0, 1.0],
[1.0, 0.0], [0.0, 1.0]], index=midx,
columns=cols)
assert_frame_equal(wts, wts_exp)
def test_during_roll_two_generics_one_day_static_transition(dates):
contract_dates = dates
ts = dates.iloc[0] + BDay(-1)
cols = pd.MultiIndex.from_product([[0, 1], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5],
[0.0, 1.0, 0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLX16', 0.5, ts), (0, 'CLZ16', 0.5, ts),
(1, 'CLZ16', 0.5, ts), (1, 'CLF17', 0.5, ts)]
assert wts == wts_exp
def test_invalid_contract_dates():
ts = [pd.Timestamp("2016-10-19")]
cols = pd.MultiIndex.from_product([[0, 1], ['front', 'back']])
idx = [-1, 0]
trans = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0]],
index=idx, columns=cols)
non_unique_index = pd.Series([pd.Timestamp('2016-10-20'),
pd.Timestamp('2016-11-21')],
index=['instr1', 'instr1'])
with pytest.raises(ValueError):
mappings.roller(ts, non_unique_index, mappings.static_transition,
transition=trans)
non_unique_vals = pd.Series([pd.Timestamp('2016-10-20'),
pd.Timestamp('2016-10-20')],
index=['instr1', 'instr2'])
with pytest.raises(ValueError):
mappings.roller(ts, non_unique_vals, mappings.static_transition,
transition=trans)
non_monotonic_vals = pd.Series([pd.Timestamp('2016-10-20'),
pd.Timestamp('2016-10-19')],
index=['instr1', 'instr2'])
with pytest.raises(ValueError):
mappings.static_transition(ts[0], non_monotonic_vals, transition=trans)
not_enough_vals = pd.Series([pd.Timestamp('2016-10-19')],
index=['instr1'])
with pytest.raises(IndexError):
mappings.static_transition(ts[0], not_enough_vals, transition=trans)
def test_during_roll_two_generics_one_day_static_roller(dates):
dt = dates.iloc[0]
contract_dates = dates
timestamps = pd.DatetimeIndex([dt + BDay(-1)])
cols = pd.MultiIndex.from_product([[0, 1], ['front', 'back']])
idx = [-2, -1, 0]
trans = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5],
[0.0, 1.0, 0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.roller(timestamps, contract_dates,
mappings.static_transition, transition=trans)
midx = pd.MultiIndex.from_product([timestamps,
['CLF17', 'CLX16', 'CLZ16']])
midx.names = ['date', 'contract']
cols = pd.Index([0, 1], name='generic')
wts_exp = pd.DataFrame([[0, 0.5], [0.5, 0], [0.5, 0.5]],
index=midx, columns=cols)
|
assert_frame_equal(wts, wts_exp)
|
pandas.util.testing.assert_frame_equal
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
import timeit
__author__ = ['<NAME>']
__email__ = ['<EMAIL>']
__package__ = 'Gemm testing'
NUM_REPEATS = 10
NUMBER = 500
def gemm_nn (N, M, K):
SETUP_CODE = '''
import numpy as np
np.random.seed(123)
N, M, K = ({N}, {M}, {K})
a = np.random.uniform(low=0., high=1., size=(N, M))
b = np.random.uniform(low=0., high=1., size=(M, K))
'''.format(**{'N' : N,
'M' : M,
'K' : K
})
TEST_CODE = '''
c = np.einsum('ij, jk -> ik', a, b, optimize=True)
'''
times = timeit.repeat(setup=SETUP_CODE,
stmt=TEST_CODE,
repeat=NUM_REPEATS,
number=NUMBER)
return times
def gemm_nt (N, M, K):
SETUP_CODE = '''
import numpy as np
np.random.seed(123)
N, M, K = ({N}, {M}, {K})
a = np.random.uniform(low=0., high=1., size=(N, M))
b = np.random.uniform(low=0., high=1., size=(M, K))
bt = b.T
'''.format(**{'N' : N,
'M' : M,
'K' : K
})
TEST_CODE = '''
c = np.einsum('ij, kj -> ik', a, bt, optimize=True)
'''
times = timeit.repeat(setup=SETUP_CODE,
stmt=TEST_CODE,
repeat=NUM_REPEATS,
number=NUMBER)
return times
if __name__ == '__main__':
import seaborn as sns
import pylab as plt
import pandas as pd
import numpy as np
N, M, K = (100, 200, 300)
times_nn = gemm_nn(N, M, K)
times_nt = gemm_nt(N, M, K)
ref = np.asarray(times_nn)
val = np.asarray(times_nt)
times_nt = np.asarray(times_nt)/ref
times_nn = np.asarray(times_nn)/ref
times_nn = pd.DataFrame(data=times_nn, columns=['Times'])
times_nn['Gemm'] = 'GEMM_NN'
times_nt = pd.DataFrame(data=times_nt, columns=['Times'])
times_nt['Gemm'] = 'GEMM_NT'
data =
|
pd.concat((times_nn, times_nt), axis=0)
|
pandas.concat
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.