prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import os
import sys
import math
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
sys.path.append(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
)
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib.backends.backend_pdf import PdfPages
from src.data.config import SITE, FOUNTAIN, FOLDERS
from multiprocessing import Pool
from src.models.air import Icestupa
class Discharge_Icestupa(Icestupa):
# def __init__(self):
# self.df = pd.read_hdf(FOLDERS["input_folder"] + "model_input_extended.h5", "df")
def run(self, experiment):
self.df = pd.read_hdf(FOLDERS["input_folder"] + "model_input_extended.h5", "df")
key = experiment.get("dia_f")
self.dia_f = key
self.melt_freeze()
Max_IceV = self.df["iceV"].max()
Efficiency = (
(self.df["meltwater"].iloc[-1] + self.df["ice"].iloc[-1])
/ (self.df["input"].iloc[-1])
* 100
)
Duration = self.df.index[-1] * 5 / (60 * 24)
h_r = self.df.h_ice.max() / self.df.r_ice.max()
water_stored = self.df["meltwater"].iloc[-1] + self.df["ice"].iloc[-1]
water_lost = self.df["vapour"].iloc[-1]
unfrozen_water = self.df["unfrozen_water"].iloc[-1]
avg_freeze_rate = self.df[self.df["Discharge"] > 0]["solid"].mean() / 5
print("\nDia", key)
print("Ice Volume Max", float(self.df["iceV"].max()))
print("Fountain efficiency", Efficiency)
print("Ice Mass Remaining", self.df["ice"].iloc[-1])
print("Meltwater", self.df["meltwater"].iloc[-1])
print("Ppt", self.df["ppt"].sum())
print("Deposition", self.df["dpt"].sum())
print("Duration", Duration)
result = pd.Series(
[
experiment.get("dia_f"),
Max_IceV,
Efficiency,
Duration,
h_r,
water_stored,
water_lost,
unfrozen_water,
avg_freeze_rate,
]
)
self.df = self.df.set_index("When").resample("H").mean().reset_index()
return (
key,
self.df["When"].values,
self.df["SA"].values,
self.df["iceV"].values,
self.df["solid"].values,
self.df["Discharge"].values,
self.df["input"].values,
self.df["meltwater"].values,
result,
)
if __name__ == "__main__":
param_values = np.arange(0.002, 0.015, 0.001).tolist()
print(param_values)
experiments = pd.DataFrame(param_values, columns=["dia_f"])
model = Discharge_Icestupa()
variables = ["When", "SA", "iceV", "solid", "Discharge", "input", "meltwater"]
df_out =
|
pd.DataFrame()
|
pandas.DataFrame
|
from os.path import abspath, dirname, join, isfile, normpath, relpath
from pandas.testing import assert_frame_equal
from numpy.testing import assert_allclose
from scipy.interpolate import interp1d
import matplotlib.pylab as plt
from datetime import datetime
import mhkit.wave as wave
from io import StringIO
import pandas as pd
import numpy as np
import contextlib
import unittest
import netCDF4
import inspect
import pickle
import json
import sys
import os
import time
from random import seed, randint
testdir = dirname(abspath(__file__))
datadir = normpath(join(testdir,relpath('../../examples/data/wave')))
class TestResourceSpectrum(unittest.TestCase):
@classmethod
def setUpClass(self):
omega = np.arange(0.1,3.5,0.01)
self.f = omega/(2*np.pi)
self.Hs = 2.5
self.Tp = 8
df = self.f[1] - self.f[0]
Trep = 1/df
self.t = np.arange(0, Trep, 0.05)
@classmethod
def tearDownClass(self):
pass
def test_pierson_moskowitz_spectrum(self):
S = wave.resource.pierson_moskowitz_spectrum(self.f,self.Tp)
Tp0 = wave.resource.peak_period(S).iloc[0,0]
error = np.abs(self.Tp - Tp0)/self.Tp
self.assertLess(error, 0.01)
def test_bretschneider_spectrum(self):
S = wave.resource.bretschneider_spectrum(self.f,self.Tp,self.Hs)
Hm0 = wave.resource.significant_wave_height(S).iloc[0,0]
Tp0 = wave.resource.peak_period(S).iloc[0,0]
errorHm0 = np.abs(self.Tp - Tp0)/self.Tp
errorTp0 = np.abs(self.Hs - Hm0)/self.Hs
self.assertLess(errorHm0, 0.01)
self.assertLess(errorTp0, 0.01)
def test_surface_elevation_seed(self):
S = wave.resource.bretschneider_spectrum(self.f,self.Tp,self.Hs)
sig = inspect.signature(wave.resource.surface_elevation)
seednum = sig.parameters['seed'].default
eta0 = wave.resource.surface_elevation(S, self.t)
eta1 = wave.resource.surface_elevation(S, self.t, seed=seednum)
assert_frame_equal(eta0, eta1)
def test_surface_elevation_phasing(self):
S = wave.resource.bretschneider_spectrum(self.f,self.Tp,self.Hs)
eta0 = wave.resource.surface_elevation(S, self.t)
sig = inspect.signature(wave.resource.surface_elevation)
seednum = sig.parameters['seed'].default
np.random.seed(seednum)
phases = np.random.rand(len(S)) * 2 * np.pi
eta1 = wave.resource.surface_elevation(S, self.t, phases=phases)
assert_frame_equal(eta0, eta1)
def test_surface_elevation_phases_np_and_pd(self):
S0 = wave.resource.bretschneider_spectrum(self.f,self.Tp,self.Hs)
S1 = wave.resource.bretschneider_spectrum(self.f,self.Tp,self.Hs*1.1)
S = pd.concat([S0, S1], axis=1)
phases_np = np.random.rand(S.shape[0], S.shape[1]) * 2 * np.pi
phases_pd = pd.DataFrame(phases_np, index=S.index, columns=S.columns)
eta_np = wave.resource.surface_elevation(S, self.t, phases=phases_np)
eta_pd = wave.resource.surface_elevation(S, self.t, phases=phases_pd)
assert_frame_equal(eta_np, eta_pd)
def test_surface_elevation_frequency_bins_np_and_pd(self):
S0 = wave.resource.bretschneider_spectrum(self.f,self.Tp,self.Hs)
S1 = wave.resource.bretschneider_spectrum(self.f,self.Tp,self.Hs*1.1)
S = pd.concat([S0, S1], axis=1)
eta0 = wave.resource.surface_elevation(S, self.t)
f_bins_np = np.array([np.diff(S.index)[0]]*len(S))
f_bins_pd =
|
pd.DataFrame(f_bins_np, index=S.index, columns=['df'])
|
pandas.DataFrame
|
# Databricks notebook source
# MAGIC %md
# MAGIC ScaDaMaLe Course [site](https://lamastex.github.io/scalable-data-science/sds/3/x/) and [book](https://lamastex.github.io/ScaDaMaLe/index.html)
# MAGIC
# MAGIC This is a 2019-2021 augmentation and update of [<NAME>](https://www.linkedin.com/in/adbreind)'s initial notebooks.
# MAGIC
# MAGIC _Thanks to [<NAME>](https://www.linkedin.com/in/christianvonkoch/) and [<NAME>](https://www.linkedin.com/in/william-anz%C3%A9n-b52003199/) for their contributions towards making these materials Spark 3.0.1 and Python 3+ compliant._
# COMMAND ----------
# MAGIC %md
# MAGIC ##Keras Deep Feed-Forward Network
# MAGIC ### (solution)
# COMMAND ----------
from keras.models import Sequential
from keras.layers import Dense
import numpy as np
import pandas as pd
input_file = "/dbfs/databricks-datasets/Rdatasets/data-001/csv/ggplot2/diamonds.csv"
df = pd.read_csv(input_file, header = 0)
df.drop(df.columns[0], axis=1, inplace=True)
df =
|
pd.get_dummies(df, prefix=['cut_', 'color_', 'clarity_'])
|
pandas.get_dummies
|
import pandas as pd
import numpy as np
from scipy.stats import skew
df_test =
|
pd.read_csv("../../test.csv")
|
pandas.read_csv
|
import vectorbt as vbt
import numpy as np
import pandas as pd
from numba import njit
from datetime import datetime
import pytest
from vectorbt.generic import nb as generic_nb
from vectorbt.generic.enums import range_dt
from tests.utils import record_arrays_close
seed = 42
day_dt = np.timedelta64(86400000000000)
mask = pd.DataFrame([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]), columns=['a', 'b', 'c'])
ts = pd.Series([1., 2., 3., 2., 1.], index=mask.index)
price = pd.DataFrame({
'open': [10, 11, 12, 11, 10],
'high': [11, 12, 13, 12, 11],
'low': [9, 10, 11, 10, 9],
'close': [11, 12, 11, 10, 9]
})
group_by = pd.Index(['g1', 'g1', 'g2'])
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# accessors.py ############# #
class TestAccessors:
def test_indexing(self):
assert mask.vbt.signals['a'].total() == mask['a'].vbt.signals.total()
def test_freq(self):
assert mask.vbt.signals.wrapper.freq == day_dt
assert mask['a'].vbt.signals.wrapper.freq == day_dt
assert mask.vbt.signals(freq='2D').wrapper.freq == day_dt * 2
assert mask['a'].vbt.signals(freq='2D').wrapper.freq == day_dt * 2
assert pd.Series([False, True]).vbt.signals.wrapper.freq is None
assert pd.Series([False, True]).vbt.signals(freq='3D').wrapper.freq == day_dt * 3
assert pd.Series([False, True]).vbt.signals(freq=np.timedelta64(4, 'D')).wrapper.freq == day_dt * 4
@pytest.mark.parametrize(
"test_n",
[1, 2, 3, 4, 5],
)
def test_fshift(self, test_n):
pd.testing.assert_series_equal(mask['a'].vbt.signals.fshift(test_n), mask['a'].shift(test_n, fill_value=False))
np.testing.assert_array_equal(
mask['a'].vbt.signals.fshift(test_n).values,
generic_nb.fshift_1d_nb(mask['a'].values, test_n, fill_value=False)
)
pd.testing.assert_frame_equal(mask.vbt.signals.fshift(test_n), mask.shift(test_n, fill_value=False))
@pytest.mark.parametrize(
"test_n",
[1, 2, 3, 4, 5],
)
def test_bshift(self, test_n):
pd.testing.assert_series_equal(
mask['a'].vbt.signals.bshift(test_n),
mask['a'].shift(-test_n, fill_value=False))
np.testing.assert_array_equal(
mask['a'].vbt.signals.bshift(test_n).values,
generic_nb.bshift_1d_nb(mask['a'].values, test_n, fill_value=False)
)
pd.testing.assert_frame_equal(mask.vbt.signals.bshift(test_n), mask.shift(-test_n, fill_value=False))
def test_empty(self):
pd.testing.assert_series_equal(
pd.Series.vbt.signals.empty(5, index=np.arange(10, 15), name='a'),
pd.Series(np.full(5, False), index=np.arange(10, 15), name='a')
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.empty((5, 3), index=np.arange(10, 15), columns=['a', 'b', 'c']),
pd.DataFrame(np.full((5, 3), False), index=np.arange(10, 15), columns=['a', 'b', 'c'])
)
pd.testing.assert_series_equal(
pd.Series.vbt.signals.empty_like(mask['a']),
pd.Series(np.full(mask['a'].shape, False), index=mask['a'].index, name=mask['a'].name)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.empty_like(mask),
pd.DataFrame(np.full(mask.shape, False), index=mask.index, columns=mask.columns)
)
def test_generate(self):
@njit
def choice_func_nb(from_i, to_i, col, n):
if col == 0:
return np.arange(from_i, to_i)
elif col == 1:
return np.full(1, from_i)
else:
return np.full(1, to_i - n)
pd.testing.assert_series_equal(
pd.Series.vbt.signals.generate(5, choice_func_nb, 1, index=mask['a'].index, name=mask['a'].name),
pd.Series(
np.array([True, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
with pytest.raises(Exception):
_ = pd.Series.vbt.signals.generate((5, 2), choice_func_nb, 1)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate(
(5, 3), choice_func_nb, 1, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[True, True, False],
[True, False, False],
[True, False, False],
[True, False, False],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate(
(5, 3), choice_func_nb, 1, pick_first=True, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[True, True, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
def test_generate_both(self):
@njit
def entry_func_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
return temp_int[:1]
@njit
def exit_func_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
return temp_int[:1]
temp_int = np.empty((mask.shape[0],), dtype=np.int_)
en, ex = pd.Series.vbt.signals.generate_both(
5, entry_func_nb, (temp_int,), exit_func_nb, (temp_int,),
index=mask['a'].index, name=mask['a'].name)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, False, True, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, False, True, False]),
index=mask['a'].index,
name=mask['a'].name
)
)
en, ex = pd.DataFrame.vbt.signals.generate_both(
(5, 3), entry_func_nb, (temp_int,), exit_func_nb, (temp_int,),
index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[True, True, True],
[False, False, False],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[False, False, False],
[True, True, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.Series.vbt.signals.generate_both(
(5,), entry_func_nb, (temp_int,), exit_func_nb, (temp_int,),
index=mask['a'].index, name=mask['a'].name, entry_wait=1, exit_wait=0)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([True, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
en, ex = pd.Series.vbt.signals.generate_both(
(5,), entry_func_nb, (temp_int,), exit_func_nb, (temp_int,),
index=mask['a'].index, name=mask['a'].name, entry_wait=0, exit_wait=1)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
@njit
def entry_func2_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
if from_i + 1 < to_i:
temp_int[1] = from_i + 1
return temp_int[:2]
return temp_int[:1]
@njit
def exit_func2_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
if from_i + 1 < to_i:
temp_int[1] = from_i + 1
return temp_int[:2]
return temp_int[:1]
en, ex = pd.DataFrame.vbt.signals.generate_both(
(5, 3), entry_func2_nb, (temp_int,), exit_func2_nb, (temp_int,),
entry_pick_first=False, exit_pick_first=False,
index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[False, False, False],
[False, False, False],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[True, True, True],
[True, True, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
def test_generate_exits(self):
@njit
def choice_func_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
return temp_int[:1]
temp_int = np.empty((mask.shape[0],), dtype=np.int_)
pd.testing.assert_series_equal(
mask['a'].vbt.signals.generate_exits(choice_func_nb, temp_int),
pd.Series(
np.array([False, True, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_exits(choice_func_nb, temp_int),
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_exits(choice_func_nb, temp_int, wait=0),
pd.DataFrame(
np.array([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
]),
index=mask.index,
columns=mask.columns
)
)
@njit
def choice_func2_nb(from_i, to_i, col, temp_int):
for i in range(from_i, to_i):
temp_int[i - from_i] = i
return temp_int[:to_i - from_i]
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_exits(choice_func2_nb, temp_int, until_next=False, pick_first=False),
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[True, True, False],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
mask2 = pd.Series([True, True, True, True, True], index=mask.index)
pd.testing.assert_series_equal(
mask2.vbt.signals.generate_exits(choice_func_nb, temp_int, until_next=False, skip_until_exit=True),
pd.Series(
np.array([False, True, False, True, False]),
index=mask.index
)
)
def test_clean(self):
entries = pd.DataFrame([
[True, False, True],
[True, False, False],
[True, True, True],
[False, True, False],
[False, True, True]
], index=mask.index, columns=mask.columns)
exits = pd.Series([True, False, True, False, True], index=mask.index)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(),
pd.DataFrame(
np.array([
[True, False, True],
[False, False, False],
[False, True, True],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.clean(entries),
pd.DataFrame(
np.array([
[True, False, True],
[False, False, False],
[False, True, True],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits)[0],
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits)[1],
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits, entry_first=False)[0],
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits, entry_first=False)[1],
pd.DataFrame(
np.array([
[False, True, False],
[False, False, False],
[False, False, False],
[False, False, False],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.clean(entries, exits)[0],
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.clean(entries, exits)[1],
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
with pytest.raises(Exception):
_ = pd.Series.vbt.signals.clean(entries, entries, entries)
def test_generate_random(self):
pd.testing.assert_series_equal(
pd.Series.vbt.signals.generate_random(
5, n=3, seed=seed, index=mask['a'].index, name=mask['a'].name),
pd.Series(
np.array([False, True, True, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
with pytest.raises(Exception):
_ = pd.Series.vbt.signals.generate_random((5, 2), n=3)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), n=3, seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[False, False, True],
[True, True, True],
[True, True, False],
[False, True, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), n=[0, 1, 2], seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[False, False, True],
[False, False, True],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_series_equal(
pd.Series.vbt.signals.generate_random(
5, prob=0.5, seed=seed, index=mask['a'].index, name=mask['a'].name),
pd.Series(
np.array([True, False, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
with pytest.raises(Exception):
_ = pd.Series.vbt.signals.generate_random((5, 2), prob=3)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), prob=0.5, seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[True, True, True],
[False, True, False],
[False, False, False],
[False, False, True],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), prob=[0., 0.5, 1.], seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[False, True, True],
[False, True, True],
[False, False, True],
[False, False, True],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
with pytest.raises(Exception):
pd.DataFrame.vbt.signals.generate_random((5, 3))
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), prob=[0., 0.5, 1.], pick_first=True, seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[False, True, True],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
def test_generate_random_both(self):
# n
en, ex = pd.Series.vbt.signals.generate_random_both(
5, n=2, seed=seed, index=mask['a'].index, name=mask['a'].name)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, False, True, False, False]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), n=2, seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[True, True, False],
[False, False, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[False, False, False],
[False, True, False],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), n=[0, 1, 2], seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[False, False, True],
[False, True, False],
[False, False, False],
[False, False, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, False, True],
[False, False, False],
[False, True, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both((2, 3), n=2, seed=seed, entry_wait=1, exit_wait=0)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
])
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True]
])
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both((3, 3), n=2, seed=seed, entry_wait=0, exit_wait=1)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[False, False, False]
])
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[True, True, True],
])
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both((7, 3), n=2, seed=seed, entry_wait=2, exit_wait=2)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, False],
[True, True, True],
[False, False, False],
[False, False, False]
])
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, False],
[True, True, True]
])
)
)
n = 10
a = np.full(n * 2, 0.)
for i in range(10000):
en, ex = pd.Series.vbt.signals.generate_random_both(1000, n, entry_wait=2, exit_wait=2)
_a = np.empty((n * 2,), dtype=np.int_)
_a[0::2] = np.flatnonzero(en)
_a[1::2] = np.flatnonzero(ex)
a += _a
greater = a > 10000000 / (2 * n + 1) * np.arange(0, 2 * n)
less = a < 10000000 / (2 * n + 1) * np.arange(2, 2 * n + 2)
assert np.all(greater & less)
# probs
en, ex = pd.Series.vbt.signals.generate_random_both(
5, entry_prob=0.5, exit_prob=1., seed=seed, index=mask['a'].index, name=mask['a'].name)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, False, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, False, False, False]),
index=mask['a'].index,
name=mask['a'].name
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=0.5, exit_prob=1., seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=[0., 0.5, 1.], exit_prob=[0., 0.5, 1.],
seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[False, True, True],
[False, False, False],
[False, False, True],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, True, True],
[False, False, False],
[False, False, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=1., exit_prob=1., exit_wait=0,
seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=1., exit_prob=1., entry_pick_first=False, exit_pick_first=True,
seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=1., exit_prob=1., entry_pick_first=True, exit_pick_first=False,
seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
# none
with pytest.raises(Exception):
pd.DataFrame.vbt.signals.generate_random((5, 3))
def test_generate_random_exits(self):
pd.testing.assert_series_equal(
mask['a'].vbt.signals.generate_random_exits(seed=seed),
pd.Series(
np.array([False, False, True, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(seed=seed),
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[True, True, False],
[False, False, False],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(seed=seed, wait=0),
pd.DataFrame(
np.array([
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, True],
[True, True, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_series_equal(
mask['a'].vbt.signals.generate_random_exits(prob=1., seed=seed),
pd.Series(
np.array([False, True, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(prob=1., seed=seed),
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(prob=[0., 0.5, 1.], seed=seed),
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, True, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(prob=1., wait=0, seed=seed),
pd.DataFrame(
np.array([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(prob=1., until_next=False, seed=seed),
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
def test_generate_stop_exits(self):
e = pd.Series([True, False, False, False, False, False])
t = pd.Series([2, 3, 4, 3, 2, 1]).astype(np.float64)
# stop loss
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(t, -0.1),
pd.Series(np.array([False, False, False, False, False, True]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True),
pd.Series(np.array([False, False, False, True, False, False]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, pick_first=False),
pd.Series(np.array([False, False, False, True, True, True]))
)
pd.testing.assert_frame_equal(
e.vbt.signals.generate_stop_exits(t.vbt.tile(3), [np.nan, -0.5, -1.], trailing=True, pick_first=False),
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, True, False],
[False, True, False]
]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, exit_wait=3),
pd.Series(np.array([False, False, False, False, True, False]))
)
# take profit
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(4 - t, 0.1),
pd.Series(np.array([False, False, False, False, False, True]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(4 - t, 0.1, trailing=True),
pd.Series(np.array([False, False, False, True, False, False]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(4 - t, 0.1, trailing=True, pick_first=False),
pd.Series(np.array([False, False, False, True, True, True]))
)
pd.testing.assert_frame_equal(
e.vbt.signals.generate_stop_exits((4 - t).vbt.tile(3), [np.nan, 0.5, 1.], trailing=True, pick_first=False),
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, True, True],
[False, True, True],
[False, True, True]
]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(4 - t, 0.1, trailing=True, exit_wait=3),
pd.Series(np.array([False, False, False, False, True, False]))
)
# chain
e = pd.Series([True, True, True, True, True, True])
en, ex = e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, chain=True)
pd.testing.assert_series_equal(
en,
pd.Series(np.array([True, False, False, False, True, False]))
)
pd.testing.assert_series_equal(
ex,
pd.Series(np.array([False, False, False, True, False, True]))
)
en, ex = e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, entry_wait=2, chain=True)
pd.testing.assert_series_equal(
en,
pd.Series(np.array([True, False, False, False, False, True]))
)
pd.testing.assert_series_equal(
ex,
pd.Series(np.array([False, False, False, True, False, False]))
)
en, ex = e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, exit_wait=2, chain=True)
pd.testing.assert_series_equal(
en,
pd.Series(np.array([True, False, False, False, True, False]))
)
pd.testing.assert_series_equal(
ex,
pd.Series(np.array([False, False, False, True, False, False]))
)
# until_next and pick_first
e2 = pd.Series([True, True, True, True, True, True])
t2 = pd.Series([6, 5, 4, 3, 2, 1]).astype(np.float64)
ex = e2.vbt.signals.generate_stop_exits(t2, -0.1, until_next=False, pick_first=False)
pd.testing.assert_series_equal(
ex,
pd.Series(np.array([False, True, True, True, True, True]))
)
def test_generate_ohlc_stop_exits(self):
with pytest.raises(Exception):
_ = mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=-0.1)
with pytest.raises(Exception):
_ = mask.vbt.signals.generate_ohlc_stop_exits(ts, tp_stop=-0.1)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, -0.1),
mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=0.1)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, -0.1, trailing=True),
mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=0.1, sl_trail=True)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, 0.1),
mask.vbt.signals.generate_ohlc_stop_exits(ts, tp_stop=0.1)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, 0.1),
mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=0.1, reverse=True)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, 0.1, trailing=True),
mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=0.1, sl_trail=True, reverse=True)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, -0.1),
mask.vbt.signals.generate_ohlc_stop_exits(ts, tp_stop=0.1, reverse=True)
)
def _test_ohlc_stop_exits(**kwargs):
out_dict = {'stop_price': np.nan, 'stop_type': -1}
result = mask.vbt.signals.generate_ohlc_stop_exits(
price['open'], price['high'], price['low'], price['close'],
out_dict=out_dict, **kwargs
)
if isinstance(result, tuple):
_, ex = result
else:
ex = result
return result, out_dict['stop_price'], out_dict['stop_type']
ex, stop_price, stop_type = _test_ohlc_stop_exits()
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(sl_stop=0.1)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, True],
[True, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 10.8],
[9.9, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, 0],
[0, -1, -1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(sl_stop=0.1, sl_trail=True)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, True, True],
[True, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 11.7, 10.8],
[9.9, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1],
[-1, 1, 1],
[1, -1, -1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(tp_stop=0.1)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, False],
[False, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[11.0, np.nan, np.nan],
[np.nan, 12.1, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[2, -1, -1],
[-1, 2, -1],
[-1, -1, -1],
[-1, -1, -1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(sl_stop=0.1, sl_trail=True, tp_stop=0.1)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[11.0, np.nan, np.nan],
[np.nan, 12.1, np.nan],
[np.nan, np.nan, 10.8],
[9.9, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[2, -1, -1],
[-1, 2, -1],
[-1, -1, 1],
[1, -1, -1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(
sl_stop=[np.nan, 0.1, 0.2], sl_trail=True, tp_stop=[np.nan, 0.1, 0.2])
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, True, False],
[False, False, False],
[False, False, True]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 12.1, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 9.6]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[-1, -1, -1],
[-1, 2, -1],
[-1, -1, -1],
[-1, -1, 1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(sl_stop=0.1, sl_trail=True, tp_stop=0.1, exit_wait=0)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, True],
[True, True, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[9.0, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 12.1, np.nan],
[np.nan, np.nan, 11.7],
[10.8, 9.0, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[1, -1, -1],
[-1, -1, -1],
[-1, 2, -1],
[-1, -1, 1],
[1, 1, -1]
]), index=mask.index, columns=mask.columns)
)
(en, ex), stop_price, stop_type = _test_ohlc_stop_exits(
sl_stop=0.1, sl_trail=True, tp_stop=0.1, chain=True)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(np.array([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[11.0, np.nan, np.nan],
[np.nan, 12.1, np.nan],
[np.nan, np.nan, 10.8],
[9.9, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[2, -1, -1],
[-1, 2, -1],
[-1, -1, 1],
[1, -1, -1]
]), index=mask.index, columns=mask.columns)
)
def test_between_ranges(self):
ranges = mask.vbt.signals.between_ranges()
record_arrays_close(
ranges.values,
np.array([
(0, 0, 0, 3, 1), (1, 1, 1, 4, 1)
], dtype=range_dt)
)
assert ranges.wrapper == mask.vbt.wrapper
mask2 = pd.DataFrame([
[True, True, True],
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, False]
], index=mask.index, columns=mask.columns)
other_mask = pd.DataFrame([
[False, False, False],
[True, False, False],
[True, True, False],
[False, True, True],
[False, False, True]
], index=mask.index, columns=mask.columns)
ranges = mask2.vbt.signals.between_ranges(other=other_mask)
record_arrays_close(
ranges.values,
np.array([
(0, 0, 0, 1, 1), (1, 0, 1, 1, 1), (2, 1, 0, 2, 1),
(3, 1, 1, 2, 1), (4, 2, 0, 3, 1), (5, 2, 1, 3, 1)
], dtype=range_dt)
)
assert ranges.wrapper == mask2.vbt.wrapper
ranges = mask2.vbt.signals.between_ranges(other=other_mask, from_other=True)
record_arrays_close(
ranges.values,
np.array([
(0, 0, 1, 1, 1), (1, 0, 1, 2, 1), (2, 1, 1, 2, 1),
(3, 1, 1, 3, 1), (4, 2, 1, 3, 1), (5, 2, 1, 4, 1)
], dtype=range_dt)
)
assert ranges.wrapper == mask2.vbt.wrapper
def test_partition_ranges(self):
mask2 = pd.DataFrame([
[False, False, False],
[True, False, False],
[True, True, False],
[False, True, True],
[True, False, True]
], index=mask.index, columns=mask.columns)
ranges = mask2.vbt.signals.partition_ranges()
record_arrays_close(
ranges.values,
np.array([
(0, 0, 1, 3, 1), (1, 0, 4, 4, 0), (2, 1, 2, 4, 1), (3, 2, 3, 4, 0)
], dtype=range_dt)
)
assert ranges.wrapper == mask2.vbt.wrapper
def test_between_partition_ranges(self):
mask2 = pd.DataFrame([
[True, False, False],
[True, True, False],
[False, True, True],
[True, False, True],
[False, True, False]
], index=mask.index, columns=mask.columns)
ranges = mask2.vbt.signals.between_partition_ranges()
record_arrays_close(
ranges.values,
np.array([
(0, 0, 1, 3, 1), (1, 1, 2, 4, 1)
], dtype=range_dt)
)
assert ranges.wrapper == mask2.vbt.wrapper
def test_pos_rank(self):
pd.testing.assert_series_equal(
(~mask['a']).vbt.signals.pos_rank(),
pd.Series([-1, 0, 1, -1, 0], index=mask['a'].index, name=mask['a'].name)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.pos_rank(),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 1],
[1, 0, -1],
[-1, 1, 0],
[0, -1, 1]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.pos_rank(after_false=True),
pd.DataFrame(
np.array([
[-1, -1, -1],
[0, -1, -1],
[1, 0, -1],
[-1, 1, 0],
[0, -1, 1]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.pos_rank(allow_gaps=True),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 1],
[1, 1, -1],
[-1, 2, 2],
[2, -1, 3]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.pos_rank(reset_by=mask['a'], allow_gaps=True),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 1],
[1, 1, -1],
[-1, 0, 0],
[0, -1, 1]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.pos_rank(reset_by=mask, allow_gaps=True),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 1],
[1, 0, -1],
[-1, 1, 0],
[0, -1, 1]
]),
index=mask.index,
columns=mask.columns
)
)
def test_partition_pos_rank(self):
pd.testing.assert_series_equal(
(~mask['a']).vbt.signals.partition_pos_rank(),
pd.Series([-1, 0, 0, -1, 1], index=mask['a'].index, name=mask['a'].name)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.partition_pos_rank(),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 0],
[0, 1, -1],
[-1, 1, 1],
[1, -1, 1]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.partition_pos_rank(after_false=True),
pd.DataFrame(
np.array([
[-1, -1, -1],
[0, -1, -1],
[0, 0, -1],
[-1, 0, 0],
[1, -1, 0]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.partition_pos_rank(reset_by=mask['a']),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 0],
[0, 1, -1],
[-1, 0, 0],
[0, -1, 0]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.partition_pos_rank(reset_by=mask),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 0],
[0, 0, -1],
[-1, 0, 0],
[0, -1, 0]
]),
index=mask.index,
columns=mask.columns
)
)
def test_pos_rank_fns(self):
pd.testing.assert_frame_equal(
(~mask).vbt.signals.first(),
pd.DataFrame(
np.array([
[False, True, True],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.nth(1),
pd.DataFrame(
np.array([
[False, False, False],
[False, False, True],
[True, False, False],
[False, True, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.nth(2),
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.from_nth(0),
pd.DataFrame(
np.array([
[False, True, True],
[True, False, True],
[True, True, False],
[False, True, True],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
def test_pos_rank_mapped(self):
mask2 = pd.DataFrame([
[True, False, False],
[True, True, False],
[False, True, True],
[True, False, True],
[False, True, False]
], index=mask.index, columns=mask.columns)
mapped = mask2.vbt.signals.pos_rank_mapped()
np.testing.assert_array_equal(
mapped.values,
np.array([0, 1, 0, 0, 1, 0, 0, 1])
)
np.testing.assert_array_equal(
mapped.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2])
)
np.testing.assert_array_equal(
mapped.idx_arr,
np.array([0, 1, 3, 1, 2, 4, 2, 3])
)
assert mapped.wrapper == mask2.vbt.wrapper
def test_partition_pos_rank_mapped(self):
mask2 = pd.DataFrame([
[True, False, False],
[True, True, False],
[False, True, True],
[True, False, True],
[False, True, False]
], index=mask.index, columns=mask.columns)
mapped = mask2.vbt.signals.partition_pos_rank_mapped()
np.testing.assert_array_equal(
mapped.values,
np.array([0, 0, 1, 0, 0, 1, 0, 0])
)
np.testing.assert_array_equal(
mapped.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2])
)
np.testing.assert_array_equal(
mapped.idx_arr,
np.array([0, 1, 3, 1, 2, 4, 2, 3])
)
assert mapped.wrapper == mask2.vbt.wrapper
def test_nth_index(self):
assert mask['a'].vbt.signals.nth_index(0) == pd.Timestamp('2020-01-01 00:00:00')
pd.testing.assert_series_equal(
mask.vbt.signals.nth_index(0),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-02 00:00:00'),
pd.Timestamp('2020-01-03 00:00:00')
], index=mask.columns, name='nth_index', dtype='datetime64[ns]')
)
pd.testing.assert_series_equal(
mask.vbt.signals.nth_index(-1),
pd.Series([
pd.Timestamp('2020-01-04 00:00:00'),
pd.Timestamp('2020-01-05 00:00:00'),
pd.Timestamp('2020-01-03 00:00:00')
], index=mask.columns, name='nth_index', dtype='datetime64[ns]')
)
pd.testing.assert_series_equal(
mask.vbt.signals.nth_index(-2),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-02 00:00:00'),
np.nan
], index=mask.columns, name='nth_index', dtype='datetime64[ns]')
)
pd.testing.assert_series_equal(
mask.vbt.signals.nth_index(0, group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-03 00:00:00')
], index=['g1', 'g2'], name='nth_index', dtype='datetime64[ns]')
)
pd.testing.assert_series_equal(
mask.vbt.signals.nth_index(-1, group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-05 00:00:00'),
pd.Timestamp('2020-01-03 00:00:00')
], index=['g1', 'g2'], name='nth_index', dtype='datetime64[ns]')
)
def test_norm_avg_index(self):
assert mask['a'].vbt.signals.norm_avg_index() == -0.25
pd.testing.assert_series_equal(
mask.vbt.signals.norm_avg_index(),
pd.Series([-0.25, 0.25, 0.0], index=mask.columns, name='norm_avg_index')
)
pd.testing.assert_series_equal(
mask.vbt.signals.norm_avg_index(group_by=group_by),
pd.Series([0.0, 0.0], index=['g1', 'g2'], name='norm_avg_index')
)
def test_index_mapped(self):
mapped = mask.vbt.signals.index_mapped()
np.testing.assert_array_equal(
mapped.values,
np.array([0, 3, 1, 4, 2])
)
np.testing.assert_array_equal(
mapped.col_arr,
np.array([0, 0, 1, 1, 2])
)
np.testing.assert_array_equal(
mapped.idx_arr,
np.array([0, 3, 1, 4, 2])
)
assert mapped.wrapper == mask.vbt.wrapper
def test_total(self):
assert mask['a'].vbt.signals.total() == 2
pd.testing.assert_series_equal(
mask.vbt.signals.total(),
pd.Series([2, 2, 1], index=mask.columns, name='total')
)
pd.testing.assert_series_equal(
mask.vbt.signals.total(group_by=group_by),
pd.Series([4, 1], index=['g1', 'g2'], name='total')
)
def test_rate(self):
assert mask['a'].vbt.signals.rate() == 0.4
pd.testing.assert_series_equal(
mask.vbt.signals.rate(),
pd.Series([0.4, 0.4, 0.2], index=mask.columns, name='rate')
)
pd.testing.assert_series_equal(
mask.vbt.signals.rate(group_by=group_by),
pd.Series([0.4, 0.2], index=['g1', 'g2'], name='rate')
)
def test_total_partitions(self):
assert mask['a'].vbt.signals.total_partitions() == 2
pd.testing.assert_series_equal(
mask.vbt.signals.total_partitions(),
pd.Series([2, 2, 1], index=mask.columns, name='total_partitions')
)
pd.testing.assert_series_equal(
mask.vbt.signals.total_partitions(group_by=group_by),
pd.Series([4, 1], index=['g1', 'g2'], name='total_partitions')
)
def test_partition_rate(self):
assert mask['a'].vbt.signals.partition_rate() == 1.0
pd.testing.assert_series_equal(
mask.vbt.signals.partition_rate(),
pd.Series([1.0, 1.0, 1.0], index=mask.columns, name='partition_rate')
)
pd.testing.assert_series_equal(
mask.vbt.signals.partition_rate(group_by=group_by),
pd.Series([1.0, 1.0], index=['g1', 'g2'], name='partition_rate')
)
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Total', 'Rate [%]', 'First Index',
'Last Index', 'Norm Avg Index [-1, 1]', 'Distance: Min',
'Distance: Max', 'Distance: Mean', 'Distance: Std', 'Total Partitions',
'Partition Rate [%]', 'Partition Length: Min', 'Partition Length: Max',
'Partition Length: Mean', 'Partition Length: Std',
'Partition Distance: Min', 'Partition Distance: Max',
'Partition Distance: Mean', 'Partition Distance: Std'
], dtype='object')
pd.testing.assert_series_equal(
mask.vbt.signals.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'),
1.6666666666666667,
33.333333333333336,
pd.Timestamp('2020-01-02 00:00:00'),
pd.Timestamp('2020-01-04 00:00:00'),
0.0,
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
np.nan,
1.6666666666666667,
100.0,
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
np.nan
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mask.vbt.signals.stats(column='a'),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'),
2,
40.0,
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-04 00:00:00'),
-0.25,
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
np.nan,
2,
100.0,
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
np.nan
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mask.vbt.signals.stats(column='a', settings=dict(to_timedelta=False)),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'), 5, 2, 40.0,
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-04 00:00:00'), -0.25, 3.0,
3.0, 3.0, np.nan, 2, 100.0, 1.0, 1.0, 1.0, 0.0, 3.0, 3.0, 3.0, np.nan
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mask.vbt.signals.stats(column='a', settings=dict(other=mask['b'], from_other=True)),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'),
2,
40.0,
0,
0.0,
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-04 00:00:00'),
-0.25,
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'),
2,
100.0,
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
np.nan
],
index=pd.Index([
'Start', 'End', 'Period', 'Total', 'Rate [%]', 'Total Overlapping',
'Overlapping Rate [%]', 'First Index', 'Last Index',
'Norm Avg Index [-1, 1]', 'Distance <- Other: Min',
'Distance <- Other: Max', 'Distance <- Other: Mean',
'Distance <- Other: Std', 'Total Partitions', 'Partition Rate [%]',
'Partition Length: Min', 'Partition Length: Max',
'Partition Length: Mean', 'Partition Length: Std',
'Partition Distance: Min', 'Partition Distance: Max',
'Partition Distance: Mean', 'Partition Distance: Std'
], dtype='object'),
name='a'
)
)
pd.testing.assert_series_equal(
mask.vbt.signals.stats(column='g1', group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'),
4,
40.0,
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-05 00:00:00'),
0.0,
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'),
4,
100.0,
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
|
pd.Timedelta('3 days 00:00:00')
|
pandas.Timedelta
|
"""
.. module:: volatility
:synopsis: Volatility Indicators.
.. moduleauthor:: <NAME> (Bukosabino)
"""
import numpy as np
import pandas as pd
from ta.utils import IndicatorMixin
class AverageTrueRange(IndicatorMixin):
"""Average True Range (ATR)
The indicator provide an indication of the degree of price volatility.
Strong moves, in either direction, are often accompanied by large ranges,
or large True Ranges.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:average_true_range_atr
Args:
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
close(pandas.Series): dataset 'Close' column.
window(int): n period.
fillna(bool): if True, fill nan values.
"""
def __init__(
self,
high: pd.Series,
low: pd.Series,
close: pd.Series,
window: int = 14,
fillna: bool = False,
):
self._high = high
self._low = low
self._close = close
self._window = min(window, len(self._close))
self._fillna = fillna
self._run()
def _run(self):
close_shift = self._close.shift(1)
true_range = self._true_range(self._high, self._low, close_shift)
atr = np.zeros(len(self._close))
#print(len(atr), ' window: ', self._window, len(true_range))
atr[self._window - 1] = true_range[0 : self._window].mean()
for i in range(self._window, len(atr)):
atr[i] = (atr[i - 1] * (self._window - 1) + true_range.iloc[i]) / float(
self._window
)
self._atr = pd.Series(data=atr, index=true_range.index)
def average_true_range(self) -> pd.Series:
"""Average True Range (ATR)
Returns:
pandas.Series: New feature generated.
"""
atr = self._check_fillna(self._atr, value=0)
return pd.Series(atr, name="atr")
class BollingerBands(IndicatorMixin):
"""Bollinger Bands
https://school.stockcharts.com/doku.php?id=technical_indicators:bollinger_bands
Args:
close(pandas.Series): dataset 'Close' column.
window(int): n period.
window_dev(int): n factor standard deviation
fillna(bool): if True, fill nan values.
"""
def __init__(
self,
close: pd.Series,
window: int = 20,
window_dev: int = 2,
fillna: bool = False,
):
self._close = close
self._window = window
self._window_dev = window_dev
self._fillna = fillna
self._run()
def _run(self):
min_periods = 0 if self._fillna else self._window
self._mavg = self._close.rolling(self._window, min_periods=min_periods).mean()
self._mstd = self._close.rolling(self._window, min_periods=min_periods).std(
ddof=0
)
self._hband = self._mavg + self._window_dev * self._mstd
self._lband = self._mavg - self._window_dev * self._mstd
def bollinger_mavg(self) -> pd.Series:
"""Bollinger Channel Middle Band
Returns:
pandas.Series: New feature generated.
"""
mavg = self._check_fillna(self._mavg, value=-1)
return pd.Series(mavg, name="mavg")
def bollinger_hband(self) -> pd.Series:
"""Bollinger Channel High Band
Returns:
pandas.Series: New feature generated.
"""
hband = self._check_fillna(self._hband, value=-1)
return pd.Series(hband, name="hband")
def bollinger_lband(self) -> pd.Series:
"""Bollinger Channel Low Band
Returns:
pandas.Series: New feature generated.
"""
lband = self._check_fillna(self._lband, value=-1)
return pd.Series(lband, name="lband")
def bollinger_wband(self) -> pd.Series:
"""Bollinger Channel Band Width
From: https://school.stockcharts.com/doku.php?id=technical_indicators:bollinger_band_width
Returns:
pandas.Series: New feature generated.
"""
wband = ((self._hband - self._lband) / self._mavg) * 100
wband = self._check_fillna(wband, value=0)
return pd.Series(wband, name="bbiwband")
def bollinger_pband(self) -> pd.Series:
"""Bollinger Channel Percentage Band
From: https://school.stockcharts.com/doku.php?id=technical_indicators:bollinger_band_perce
Returns:
pandas.Series: New feature generated.
"""
pband = (self._close - self._lband) / (self._hband - self._lband)
pband = self._check_fillna(pband, value=0)
return pd.Series(pband, name="bbipband")
def bollinger_hband_indicator(self) -> pd.Series:
"""Bollinger Channel Indicator Crossing High Band (binary).
It returns 1, if close is higher than bollinger_hband. Else, it returns 0.
Returns:
pandas.Series: New feature generated.
"""
hband = pd.Series(
np.where(self._close > self._hband, 1.0, 0.0), index=self._close.index
)
hband = self._check_fillna(hband, value=0)
return pd.Series(hband, index=self._close.index, name="bbihband")
def bollinger_lband_indicator(self) -> pd.Series:
"""Bollinger Channel Indicator Crossing Low Band (binary).
It returns 1, if close is lower than bollinger_lband. Else, it returns 0.
Returns:
pandas.Series: New feature generated.
"""
lband = pd.Series(
np.where(self._close < self._lband, 1.0, 0.0), index=self._close.index
)
lband = self._check_fillna(lband, value=0)
return pd.Series(lband, name="bbilband")
class KeltnerChannel(IndicatorMixin):
"""KeltnerChannel
Keltner Channels are a trend following indicator used to identify reversals with channel breakouts and
channel direction. Channels can also be used to identify overbought and oversold levels when the trend
is flat.
https://school.stockcharts.com/doku.php?id=technical_indicators:keltner_channels
Args:
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
close(pandas.Series): dataset 'Close' column.
window(int): n period.
window_atr(int): n atr period. Only valid if original_version param is False.
fillna(bool): if True, fill nan values.
original_version(bool): if True, use original version as the centerline (SMA of typical price)
if False, use EMA of close as the centerline. More info:
https://school.stockcharts.com/doku.php?id=technical_indicators:keltner_channels
"""
def __init__(
self,
high: pd.Series,
low: pd.Series,
close: pd.Series,
window: int = 20,
window_atr: int = 10,
fillna: bool = False,
original_version: bool = True,
):
self._high = high
self._low = low
self._close = close
self._window = window
self._window_atr = window_atr
self._fillna = fillna
self._original_version = original_version
self._run()
def _run(self):
min_periods = 1 if self._fillna else self._window
if self._original_version:
self._tp = (
((self._high + self._low + self._close) / 3.0)
.rolling(self._window, min_periods=min_periods)
.mean()
)
self._tp_high = (
(((4 * self._high) - (2 * self._low) + self._close) / 3.0)
.rolling(self._window, min_periods=0)
.mean()
)
self._tp_low = (
(((-2 * self._high) + (4 * self._low) + self._close) / 3.0)
.rolling(self._window, min_periods=0)
.mean()
)
else:
self._tp = self._close.ewm(
span=self._window, min_periods=min_periods, adjust=False
).mean()
atr = AverageTrueRange(
close=self._close,
high=self._high,
low=self._low,
window=self._window_atr,
fillna=self._fillna,
).average_true_range()
self._tp_high = self._tp + (2 * atr)
self._tp_low = self._tp - (2 * atr)
def keltner_channel_mband(self) -> pd.Series:
"""Keltner Channel Middle Band
Returns:
pandas.Series: New feature generated.
"""
tp_middle = self._check_fillna(self._tp, value=-1)
return pd.Series(tp_middle, name="mavg")
def keltner_channel_hband(self) -> pd.Series:
"""Keltner Channel High Band
Returns:
pandas.Series: New feature generated.
"""
tp_high = self._check_fillna(self._tp_high, value=-1)
return pd.Series(tp_high, name="kc_hband")
def keltner_channel_lband(self) -> pd.Series:
"""Keltner Channel Low Band
Returns:
pandas.Series: New feature generated.
"""
tp_low = self._check_fillna(self._tp_low, value=-1)
return pd.Series(tp_low, name="kc_lband")
def keltner_channel_wband(self) -> pd.Series:
"""Keltner Channel Band Width
Returns:
pandas.Series: New feature generated.
"""
wband = ((self._tp_high - self._tp_low) / self._tp) * 100
wband = self._check_fillna(wband, value=0)
return
|
pd.Series(wband, name="bbiwband")
|
pandas.Series
|
import datetime as dt
import numpy as np
import pandas as pd
from pandas.testing import assert_series_equal, assert_frame_equal
import pytest
from solarforecastarbiter.datamodel import Observation
from solarforecastarbiter.validation import tasks, validator
from solarforecastarbiter.validation.quality_mapping import (
LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING,
DAILY_VALIDATION_FLAG)
@pytest.fixture()
def make_observation(single_site):
def f(variable):
return Observation(
name='test', variable=variable, interval_value_type='mean',
interval_length=pd.Timedelta('1hr'), interval_label='beginning',
site=single_site, uncertainty=0.1, observation_id='OBSID',
provider='Organization 1', extra_parameters='')
return f
@pytest.fixture()
def default_index(single_site):
return [pd.Timestamp('2019-01-01T08:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T09:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T10:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T11:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T13:00:00', tz=single_site.timezone)]
@pytest.fixture()
def daily_index(single_site):
out = pd.date_range(start='2019-01-01T08:00:00',
end='2019-01-01T19:00:00',
freq='1h',
tz=single_site.timezone)
return out.append(
pd.Index([pd.Timestamp('2019-01-02T09:00:00',
tz=single_site.timezone)]))
def test_validate_ghi(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_ghi_limits_QCRad',
'check_ghi_clearsky',
'detect_clearsky_ghi']]
obs = make_observation('ghi')
data = pd.Series([10, 1000, -100, 500, 300], index=default_index)
flags = tasks.validate_ghi(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'],
pd.Series([0, 1, 0, 1, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_validate_mostly_clear(mocker, make_observation):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_ghi_limits_QCRad',
'check_ghi_clearsky',
'detect_clearsky_ghi']]
obs = make_observation('ghi').replace(interval_length=pd.Timedelta('5min'))
index = pd.date_range(start='2019-04-01T11:00', freq='5min',
tz=obs.site.timezone, periods=11)
data = pd.Series([742, 749, 756, 763, 769, 774, 779, 784, 789, 793, 700],
index=index)
flags = tasks.validate_ghi(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'],
pd.Series([1] * 10 + [0], index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_apply_immediate_validation(
mocker, make_observation, default_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
val = tasks.apply_immediate_validation(obs, data)
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED']
]
assert_frame_equal(val, out)
def test_apply_immediate_validation_already_validated(
mocker, make_observation, default_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(0, 18), (100, 18), (200, 18), (-1, 19), (1500, 18)],
index=default_index,
columns=['value', 'quality_flag'])
val = tasks.apply_immediate_validation(obs, data)
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED']
]
assert_frame_equal(val, out)
@pytest.mark.parametrize('var', ['air_temperature', 'wind_speed', 'dni', 'dhi',
'poa_global', 'relative_humidity'])
def test_apply_immediate_validation_other(
mocker, make_observation, default_index, var):
mock = mocker.MagicMock()
mocker.patch.dict(
'solarforecastarbiter.validation.tasks.IMMEDIATE_VALIDATION_FUNCS',
{var: mock})
obs = make_observation(var)
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
tasks.apply_immediate_validation(obs, data)
assert mock.called
@pytest.mark.parametrize('var', ['availability', 'curtailment', 'event',
'net_load'])
def test_apply_immediate_validation_defaults(
mocker, make_observation, default_index, var):
mock = mocker.spy(tasks, 'validate_defaults')
obs = make_observation(var)
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
tasks.apply_immediate_validation(obs, data)
assert mock.called
def test_fetch_and_validate_observation_ghi(mocker, make_observation,
default_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED']
]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_fetch_and_validate_observation_ghi_nones(
mocker, make_observation, default_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(None, 1)] * 5, index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
base = (
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] |
LATEST_VERSION_FLAG
)
out['quality_flag'] = [
base | DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
base,
base,
base,
base | DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY']
]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_fetch_and_validate_observation_not_listed(mocker, make_observation,
default_index):
obs = make_observation('curtailment')
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
LATEST_VERSION_FLAG,
LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_dni(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_dni_limits_QCRad']]
obs = make_observation('dni')
data = pd.Series([10, 1000, -100, 500, 500], index=default_index)
flags = tasks.validate_dni(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 0, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_dni(mocker, make_observation,
default_index):
obs = make_observation('dni')
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED']]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_dhi(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_dhi_limits_QCRad']]
obs = make_observation('dhi')
data = pd.Series([10, 1000, -100, 200, 200], index=default_index)
flags = tasks.validate_dhi(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_dhi(mocker, make_observation,
default_index):
obs = make_observation('dhi')
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED']]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_poa_global(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_poa_clearsky']]
obs = make_observation('poa_global')
data = pd.Series([10, 1000, -400, 300, 300], index=default_index)
flags = tasks.validate_poa_global(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_poa_global(mocker, make_observation,
default_index):
obs = make_observation('poa_global')
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED']]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_air_temp(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_temperature_limits']]
obs = make_observation('air_temperature')
data = pd.Series([10, 1000, -400, 30, 20], index=default_index)
flags = tasks.validate_air_temperature(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_air_temperature(
mocker, make_observation, default_index):
obs = make_observation('air_temperature')
data = pd.DataFrame(
[(0, 0), (200, 0), (20, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['OK'] |
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_wind_speed(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_wind_limits']]
obs = make_observation('wind_speed')
data = pd.Series([10, 1000, -400, 3, 20], index=default_index)
flags = tasks.validate_wind_speed(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_wind_speed(
mocker, make_observation, default_index):
obs = make_observation('wind_speed')
data = pd.DataFrame(
[(0, 0), (200, 0), (15, 0), (1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['OK'] |
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_relative_humidity(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_rh_limits']]
obs = make_observation('relative_humidity')
data = pd.Series([10, 101, -400, 60, 20], index=default_index)
flags = tasks.validate_relative_humidity(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_relative_humidity(
mocker, make_observation, default_index):
obs = make_observation('relative_humidity')
data = pd.DataFrame(
[(0, 0), (200, 0), (15, 0), (40, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['OK'] |
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_ac_power(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_ac_power_limits']]
obs = make_observation('ac_power')
data = pd.Series([0, 1, -1, 0.001, 0.001], index=default_index)
flags = tasks.validate_ac_power(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_ac_power(mocker, make_observation,
default_index):
obs = make_observation('ac_power')
data = pd.DataFrame(
[(0, 0), (1, 0), (-1, 0), (0.001, 1), (0.001, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG
]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_dc_power(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_dc_power_limits']]
obs = make_observation('dc_power')
data = pd.Series([0, 1, -1, 0.001, 0.001], index=default_index)
flags = tasks.validate_dc_power(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_dc_power(mocker, make_observation,
default_index):
obs = make_observation('dc_power')
data = pd.DataFrame(
[(0, 0), (1, 0), (-1, 0), (0.001, 1), (0.001, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG
]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_daily_ghi(mocker, make_observation, daily_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_ghi_limits_QCRad',
'check_ghi_clearsky',
'detect_clearsky_ghi',
'detect_stale_values',
'detect_interpolation']]
obs = make_observation('ghi')
data = pd.Series(
# 8 9 10 11 12 13 14 15 16 17 18 19 23
[10, 1000, -100, 500, 300, 300, 300, 300, 100, 0, 100, 0, 0],
index=daily_index)
flags = tasks.validate_daily_ghi(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'],
pd.Series([0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY'],
pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['STALE VALUES'],
pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'],
)
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_ghi_daily(mocker, make_observation,
daily_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(10, 0), (1000, 0), (-100, 0), (500, 0), (300, 0),
(300, 0), (300, 0), (300, 0), (100, 0), (0, 0),
(100, 1), (0, 0), (0, 0)],
index=daily_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
BASE_FLAG = LATEST_VERSION_FLAG | DAILY_VALIDATION_FLAG
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['STALE VALUES'] |
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['STALE VALUES'] |
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] |
BASE_FLAG
]
assert post_mock.called
posted_df = pd.concat([cal[0][1] for cal in post_mock.call_args_list])
assert_frame_equal(posted_df, out)
def test_fetch_and_validate_observation_ghi_zeros(mocker, make_observation,
daily_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(0, 0)] * 13,
index=daily_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
base = (
DESCRIPTION_MASK_MAPPING['STALE VALUES'] |
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'] |
LATEST_VERSION_FLAG | DAILY_VALIDATION_FLAG
)
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG |
DAILY_VALIDATION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG |
DAILY_VALIDATION_FLAG,
base,
base,
base,
base,
base,
base,
base,
base | DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
base | DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
base | DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
base | DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY']
]
assert post_mock.called
posted_df = pd.concat([cal[0][1] for cal in post_mock.call_args_list])
assert_frame_equal(posted_df, out)
def test_validate_daily_dc_power(mocker, make_observation, daily_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'detect_stale_values',
'detect_interpolation']]
obs = make_observation('dc_power')
data = pd.Series(
# 8 9 10 11 12 13 14 15 16 17 18 19 23
[0, 1000, -100, 500, 300, 300, 300, 300, 100, 0, 100, 0, 0],
index=daily_index)
flags = tasks.validate_daily_dc_power(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'],
pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['STALE VALUES'],
pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'],
)
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_dc_power_daily(
mocker, make_observation, daily_index):
obs = make_observation('dc_power')
data = pd.DataFrame(
[(10, 0), (1000, 0), (-100, 0), (500, 0), (300, 0),
(300, 0), (300, 0), (300, 0), (100, 0), (0, 0),
(100, 1), (0, 0), (0, 0)],
index=daily_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
BASE_FLAG = LATEST_VERSION_FLAG | DAILY_VALIDATION_FLAG
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['STALE VALUES'] |
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['STALE VALUES'] |
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] |
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] |
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] |
BASE_FLAG
]
assert post_mock.called
posted_df = pd.concat([cal[0][1] for cal in post_mock.call_args_list])
assert_frame_equal(posted_df, out)
def test_validate_daily_ac_power(mocker, make_observation, daily_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'detect_stale_values',
'detect_interpolation',
'detect_clipping']]
obs = make_observation('ac_power')
data = pd.Series(
# 8 9 10 11 12 13 14 15 16 17 18 19 23
[0, 100, -100, 100, 300, 300, 300, 300, 100, 0, 100, 0, 0],
index=daily_index)
flags = tasks.validate_daily_ac_power(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'],
pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['STALE VALUES'],
pd.Series([0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'],
pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['CLIPPED VALUES']
)
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_ac_power_daily(
mocker, make_observation, daily_index):
obs = make_observation('ac_power')
data = pd.DataFrame(
[(10, 0), (100, 0), (-100, 0), (100, 0), (300, 0),
(300, 0), (300, 0), (300, 0), (100, 0), (0, 0),
(100, 1), (0, 0), (0, 0)],
index=daily_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
BASE_FLAG = LATEST_VERSION_FLAG | DAILY_VALIDATION_FLAG
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['STALE VALUES'] |
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLIPPED VALUES'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['STALE VALUES'] |
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLIPPED VALUES'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] |
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] |
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] |
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] |
BASE_FLAG
]
assert post_mock.called
posted_df = pd.concat([cal[0][1] for cal in post_mock.call_args_list])
assert_frame_equal(posted_df, out)
@pytest.mark.parametrize('var', ['air_temperature', 'wind_speed', 'dni', 'dhi',
'poa_global', 'relative_humidity', 'net_load',
])
def test_fetch_and_validate_observation_other(var, mocker, make_observation,
daily_index):
obs = make_observation(var)
data = pd.DataFrame(
[(0, 0), (100, 0), (-100, 0), (100, 0), (300, 0),
(300, 0), (300, 0), (300, 0), (100, 0), (0, 0),
(100, 1), (0, 0), (0, 0)],
index=daily_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
validated = pd.Series(2, index=daily_index)
validate_mock = mocker.MagicMock(return_value=validated)
mocker.patch.dict(
'solarforecastarbiter.validation.tasks.IMMEDIATE_VALIDATION_FUNCS',
{var: validate_mock})
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
assert post_mock.called
assert validate_mock.called
@pytest.mark.parametrize('var', ['air_temperature', 'wind_speed', 'dni', 'dhi',
'poa_global', 'relative_humidity'])
def test_apply_daily_validation_other(
mocker, make_observation, daily_index, var):
mock = mocker.MagicMock()
mocker.patch.dict(
'solarforecastarbiter.validation.tasks.IMMEDIATE_VALIDATION_FUNCS',
{var: mock})
mocks = [mock,
mocker.spy(tasks, '_validate_stale_interpolated')]
obs = make_observation(var)
data = pd.DataFrame({
'value': [
# 8 9 10 11 12 13 14 15 16 17 18 19 23
10, 1900, -100, 500, 300, 300, 300, 300, 100, 0, 100, 0, 0],
'quality_flag': 0}, index=daily_index)
out = tasks.apply_daily_validation(obs, data)
assert (out['quality_flag'] | DAILY_VALIDATION_FLAG).all()
for mock in mocks:
assert mock.called
@pytest.mark.parametrize('var', ['net_load'])
def test_apply_daily_validation_defaults(
mocker, make_observation, daily_index, var):
mocks = [mocker.spy(tasks, 'validate_defaults'),
mocker.spy(tasks, '_validate_stale_interpolated')]
obs = make_observation(var)
data = pd.DataFrame({
'value': [
# 8 9 10 11 12 13 14 15 16 17 18 19 23
10, 1900, -100, 500, 300, 300, 300, 300, 100, 0, 100, 0, 0],
'quality_flag': 0}, index=daily_index)
out = tasks.apply_daily_validation(obs, data)
assert (out['quality_flag'] | DAILY_VALIDATION_FLAG).all()
for mock in mocks:
assert mock.called
def test_apply_daily_validation(mocker, make_observation, daily_index):
obs = make_observation('ac_power')
data = pd.DataFrame({
'value': [
# 8 9 10 11 12 13 14 15 16 17 18 19 23
0, 100, -100, 100, 300, 300, 300, 300, 100, 0, 100, 0, 0],
'quality_flag': 94},
index=daily_index)
out = tasks.apply_daily_validation(obs, data)
qf = (pd.Series(LATEST_VERSION_FLAG, index=data.index),
pd.Series(DAILY_VALIDATION_FLAG, index=data.index),
pd.Series([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'],
pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['STALE VALUES'],
pd.Series([0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'],
pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['CLIPPED VALUES']
)
exp = data.copy()
exp['quality_flag'] = sum(qf)
assert_frame_equal(exp, out)
def test_apply_daily_validation_not_enough(mocker, make_observation):
obs = make_observation('ghi')
data = pd.DataFrame(
[(0, 0)],
index=pd.date_range(start='2019-01-01T0000Z',
end='2019-01-01T0100Z',
tz='UTC',
freq='1h'),
columns=['value', 'quality_flag'])
with pytest.raises(IndexError):
tasks.apply_daily_validation(obs, data)
def test_fetch_and_validate_all_observations(mocker, make_observation,
daily_index):
obs = [make_observation('dhi'), make_observation('dni')]
obs += [make_observation('ghi').replace(provider='Organization 2')]
data = pd.DataFrame(
[(0, 0), (100, 0), (-100, 0), (100, 0), (300, 0),
(300, 0), (300, 0), (300, 0), (100, 0), (0, 0),
(100, 1), (0, 0), (0, 0)],
index=daily_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.list_observations',
return_value=obs)
mocker.patch('solarforecastarbiter.io.api.APISession.get_user_info',
return_value={'organization': obs[0].provider})
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
validated = pd.Series(2, index=daily_index)
validate_mock = mocker.MagicMock(return_value=validated)
mocker.patch.dict(
'solarforecastarbiter.validation.tasks.IMMEDIATE_VALIDATION_FUNCS',
{'dhi': validate_mock, 'dni': validate_mock})
tasks.fetch_and_validate_all_observations(
'', data.index[0], data.index[-1], only_missing=False)
assert post_mock.called
assert validate_mock.call_count == 2
def test_fetch_and_validate_all_observations_only_missing(
mocker, make_observation, daily_index):
obs = [make_observation('dhi'), make_observation('dni')]
obs += [make_observation('ghi').replace(provider='Organization 2')]
data = pd.DataFrame(
[(0, 0), (100, 0), (-100, 0), (100, 0), (300, 0),
(300, 0), (300, 0), (300, 0), (100, 0), (0, 0),
(100, 1), (0, 0), (0, 0)],
index=daily_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.list_observations',
return_value=obs)
mocker.patch('solarforecastarbiter.io.api.APISession.get_user_info',
return_value={'organization': obs[0].provider})
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values_not_flagged', # NOQA
return_value=np.array(['2019-01-01', '2019-01-02'],
dtype='datetime64[D]'))
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_all_observations(
'', data.index[0], data.index[-1], only_missing=True)
assert post_mock.called
assert (post_mock.call_args_list[0][0][1].index.date ==
dt.date(2019, 1, 1)).all()
assert (post_mock.call_args_list[1][0][1].index.date ==
dt.date(2019, 1, 2)).all()
assert (post_mock.call_args_list[2][0][1].index.date ==
dt.date(2019, 1, 1)).all()
assert (post_mock.call_args_list[3][0][1].index.date ==
dt.date(2019, 1, 2)).all()
def test_fetch_and_validate_observation_only_missing(
mocker, make_observation, daily_index):
obs = make_observation('ac_power')
data = pd.DataFrame(
[(0, 0), (100, 0), (-100, 0), (100, 0), (300, 0),
(300, 0), (300, 0), (300, 0), (100, 0), (0, 0),
(100, 1), (0, 0), (0, 0)],
index=daily_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch('solarforecastarbiter.io.api.APISession.get_user_info',
return_value={'organization': obs.provider})
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values_not_flagged', # NOQA
return_value=np.array(['2019-01-01', '2019-01-02'],
dtype='datetime64[D]'))
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'token', 'obsid', data.index[0], data.index[-1], only_missing=True)
assert post_mock.called
assert (post_mock.call_args_list[0][0][1].index.date ==
dt.date(2019, 1, 1)).all()
assert (post_mock.call_args_list[1][0][1].index.date ==
dt.date(2019, 1, 2)).all()
def test__group_continuous_week_post(mocker, make_observation):
split_dfs = [
pd.DataFrame([(0, LATEST_VERSION_FLAG)],
columns=['value', 'quality_flag'],
index=pd.date_range(
start='2020-05-03T00:00',
end='2020-05-03T23:59',
tz='UTC',
freq='1h')),
# new week split
pd.DataFrame([(0, LATEST_VERSION_FLAG)],
columns=['value', 'quality_flag'],
index=pd.date_range(
start='2020-05-04T00:00',
end='2020-05-04T11:59',
tz='UTC',
freq='1h')),
# missing 12
pd.DataFrame(
[(0, LATEST_VERSION_FLAG | DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'])] + # NOQA
[(1, LATEST_VERSION_FLAG)] * 7,
columns=['value', 'quality_flag'],
index=pd.date_range(
start='2020-05-04T13:00',
end='2020-05-04T20:00',
tz='UTC',
freq='1h')),
# missing a week+
pd.DataFrame(
[(9, LATEST_VERSION_FLAG | DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'])] + # NOQA
[(3, LATEST_VERSION_FLAG)] * 7,
columns=['value', 'quality_flag'],
index=pd.date_range(
start='2020-05-13T09:00',
end='2020-05-13T16:59',
tz='UTC',
freq='1h')),
]
ov = pd.concat(split_dfs, axis=0)
obs = make_observation('ghi')
session = mocker.MagicMock()
tasks._group_continuous_week_post(session, obs, ov)
call_list = session.post_observation_values.call_args_list
assert len(call_list) == 4
for i, cal in enumerate(call_list):
assert_frame_equal(split_dfs[i], cal[0][1])
@pytest.mark.parametrize('vals,func', [
(pd.DataFrame({'value': 0, 'quality_flag': 4}, index=pd.DatetimeIndex(
[pd.Timestamp.utcnow()], name='timestamp')),
'apply_immediate_validation'),
(pd.DataFrame({'value': [0.0] * 5 + [None] * 10, 'quality_flag': 4},
index=pd.date_range('now', name='timestamp', freq='2h',
periods=15)),
'apply_immediate_validation'),
(pd.DataFrame({'value': [0.0] * 15 + [None] * 11, 'quality_flag': 4},
index=pd.date_range('now', name='timestamp', freq='1h',
periods=26)),
'apply_daily_validation'),
])
def test_apply_validation(make_observation, mocker, vals, func):
obs = make_observation('ac_power')
fmock = mocker.patch.object(tasks, func, autospec=True)
tasks.apply_validation(obs, vals)
assert fmock.called
def test_apply_validation_empty(make_observation, mocker):
obs = make_observation('dhi')
daily = mocker.patch.object(tasks, 'apply_daily_validation')
immediate = mocker.patch.object(tasks, 'apply_immediate_validation')
data = pd.DataFrame({'value': [], 'quality_flag': []},
index=pd.DatetimeIndex([], name='timestamp'))
out = tasks.apply_validation(obs, data)
assert_frame_equal(out, data)
assert not daily.called
assert not immediate.called
def test_apply_validation_bad_df(make_observation, mocker):
obs = make_observation('dhi')
data = pd.DataFrame()
with pytest.raises(TypeError):
tasks.apply_validation(obs, data)
with pytest.raises(TypeError):
tasks.apply_validation(obs, pd.Series(
index=pd.DatetimeIndex([]),
dtype=float))
def test_apply_validation_agg(aggregate, mocker):
data = pd.DataFrame({'value': [1], 'quality_flag': [0]},
index=pd.DatetimeIndex(
['2020-01-01T00:00Z'], name='timestamp'))
out = tasks.apply_validation(aggregate, data)
assert_frame_equal(data, out)
def test_find_unvalidated_time_ranges(mocker):
session = mocker.MagicMock()
session.get_observation_values_not_flagged.return_value = np.array(
['2019-04-13', '2019-04-14', '2019-04-15', '2019-04-16', '2019-04-18',
'2019-05-22', '2019-05-23'], dtype='datetime64[D]')
obs = mocker.MagicMock()
obs.observation_id = ''
obs.site.timezone = 'UTC'
out = list(tasks._find_unvalidated_time_ranges(
session, obs, '2019-01-01T00:00Z', '2020-01-01T00:00Z'))
assert out == [
(pd.Timestamp('2019-04-13T00:00Z'), pd.Timestamp('2019-04-17T00:00Z')),
(pd.Timestamp('2019-04-18T00:00Z'), pd.Timestamp('2019-04-19T00:00Z')),
(pd.Timestamp('2019-05-22T00:00Z'), pd.Timestamp('2019-05-24T00:00Z')),
]
def test_find_unvalidated_time_ranges_all(mocker):
session = mocker.MagicMock()
session.get_observation_values_not_flagged.return_value = np.array(
['2019-04-13', '2019-04-14', '2019-04-15', '2019-04-16'],
dtype='datetime64[D]')
obs = mocker.MagicMock()
obs.observation_id = ''
obs.site.timezone = 'Etc/GMT+7'
out = list(tasks._find_unvalidated_time_ranges(
session, obs, '2019-01-01T00:00Z', '2020-01-01T00:00Z'))
assert out == [
(pd.Timestamp('2019-04-13T00:00-07:00'),
|
pd.Timestamp('2019-04-17T00:00-07:00')
|
pandas.Timestamp
|
from lib.detection_strategies import *
import threading
import numpy as np
import pyautogui
from pyautogui import press, hotkey, click, scroll, typewrite, moveRel, moveTo, position
import time
from subprocess import call
import os
from lib.system_toggles import mute_sound, toggle_speechrec, toggle_eyetracker, turn_on_sound
import pandas as pd
import matplotlib.pyplot as plt
class TestMode:
def __init__(self, modeSwitcher):
self.mode = "regular"
self.modeSwitcher = modeSwitcher
def start( self ):
self.mode = "regular"
self.centerXPos, self.centerYPos = pyautogui.position()
toggle_eyetracker()
mute_sound()
self.testdata = []
self.starttime = time.time()
self.preventDoubleClickInPlotMode = time.time()
self.plot_in_seconds( 15.00 )
def handle_input( self, dataDicts ):
## Alter the data dicts into the right format for plotting
dataRow = {'time': int((time.time() - self.starttime ) * 1000) / 1000 }
for column in dataDicts[-1]:
dataRow['intensity'] = dataDicts[-1][ column ]['intensity']
dataRow[column] = dataDicts[-1][ column ]['percent']
if( dataDicts[-1][ column ]['winner'] ):
dataRow['winner'] = column
if( self.mode == "regular" ):
self.testdata.append( dataRow )
## Allow any loud sound to click to let us close the plot once every second
elif( dataRow['intensity'] > 2000 and ( time.time() - self.preventDoubleClickInPlotMode ) > 1 ):
click()
self.preventDoubleClickInPlotMode = time.time()
def plot_in_seconds( self, time ):
t = threading.Timer( time , self.display_results)
t.daemon = True
t.start()
def display_results( self ):
print( "Plotting results - Use any loud sound to click" )
time.sleep( 2 )
self.mode = "plotting"
self.preventDoubleClickInPlotMode = time.time()
plt.style.use('seaborn-darkgrid')
palette = plt.get_cmap('Set1')
num = 0
bottom=0
self.testdata =
|
pd.DataFrame(data=self.testdata)
|
pandas.DataFrame
|
from itertools import product as it_product
from typing import List, Dict
import numpy as np
import os
import pandas as pd
from scipy.stats import spearmanr, wilcoxon
from provided_code.constants_class import ModelParameters
from provided_code.data_loader import DataLoader
from provided_code.dose_evaluation_class import EvaluateDose
from provided_code.general_functions import get_paths, get_predictions_to_optimize
def consolidate_data_for_analysis(cs: ModelParameters, force_new_consolidate: bool = False) \
-> [pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame]:
"""
Consolidated data of all reference plans, dose predictions, and KBP plans. This may take about an hour to run, but
only needs to be run once for a given set of experiments.
Args:
cs: A constants object.
force_new_consolidate: Flag that will force consolidating data, which will overwrite previous data that was
consolidated in previous iterations.
Returns:
df_dose_error: Summary of dose error
df_dvh_metrics: Summary of DVH metric performance (can be converted to DVH error later)
df_clinical_criteria: Summary of clinical criteria performance
df_ref_dvh_metrics: Summary of reference dose DVH metrics
df_ref_clinical_criteria: Summary of reference dose clinical criteria performance
df_objective_data: The data from the objective functions (e.g., weights, objective function values)
df_solve_time: The time it took to solve models
"""
# Run consolidate_data_for_analysis when new predictions or plans
consolidate_data_paths = {'dose': f'{cs.results_data_dir}/dose_error_df.csv',
'dvh': f'{cs.results_data_dir}/dvh_metric_df.csv',
'clinical_criteria': f'{cs.results_data_dir}/clinical_criteria_df.csv',
'ref_dvh': f'{cs.results_data_dir}/reference_metrics.csv',
'ref_clinical_criteria': f'{cs.results_data_dir}/reference_criteria.csv',
'weights': f'{cs.results_data_dir}/weights_df.csv',
'solve_time': f'{cs.results_data_dir}/solve_time_df.csv'
}
# Check if consolidated data already exists
no_consolidated_date = False
for p in consolidate_data_paths.values():
if not os.path.isfile(p):
print(p)
no_consolidated_date = True
os.makedirs(cs.results_data_dir, exist_ok=True) # Make dir for results
# Consolidate data if it doesn't exist yet or force flag is True
if no_consolidated_date or force_new_consolidate:
# Prepare strings for data that will be evaluated
predictions_to_optimize, prediction_names = get_predictions_to_optimize(cs)
patient_names = os.listdir(cs.reference_data_dir)
hold_out_plan_paths = get_paths(cs.reference_data_dir, ext='') # list of paths used for held out testing
# Evaluate dose metrics
patient_data_loader = DataLoader(hold_out_plan_paths, mode_name='evaluation') # Set data loader
dose_evaluator_sample = EvaluateDose(patient_data_loader)
# Make reference dose DVH metrics and clinical criteria
dose_evaluator_sample.make_metrics()
dose_evaluator_sample.melt_dvh_metrics('Reference', 'reference_dose_metric_df').to_csv(
consolidate_data_paths['ref_dvh'])
dose_evaluator_sample.melt_dvh_metrics('Reference', 'reference_criteria_df').to_csv(
consolidate_data_paths['ref_clinical_criteria'])
# Initialize DataFrames for all scores and errors
optimizer_names = os.listdir(cs.plans_dir) # Get names of all optimizers
dose_error_index_dict, dvh_metric_index_dict = make_error_and_metric_indices(patient_names,
dose_evaluator_sample,
optimizer_names)
df_dose_error_indices = pd.MultiIndex.from_product(**dose_error_index_dict)
df_dvh_error_indices = pd.MultiIndex.from_arrays(**dvh_metric_index_dict)
# Make DataFrames
df_dose_error =
|
pd.DataFrame(columns=prediction_names, index=df_dose_error_indices)
|
pandas.DataFrame
|
import os
import pandas as pd
import numpy as np
import pyddem.tdem_tools as tt
in_ext = '/home/atom/ongoing/work_worldwide/tables/table_man_gard_zemp_wout.csv'
df_ext = pd.read_csv(in_ext)
reg_dir = '/home/atom/ongoing/work_worldwide/vol/final'
fn_tarea = '/home/atom/data/inventory_products/RGI/tarea_zemp.csv'
list_fn_reg= [os.path.join(reg_dir,'dh_'+str(i).zfill(2)+'_rgi60_int_base_reg.csv') for i in [1,2,3,4,5,6,7,8,9,10,11,12,16,17,18,19]] + [os.path.join(reg_dir,'dh_13_14_15_rgi60_int_base_reg.csv')] + [os.path.join(reg_dir,'dh_01_02_rgi60_int_base_reg.csv')]
tlim_zemp = [np.datetime64('2006-01-01'),np.datetime64('2016-01-01')]
tlim_wouters = [np.datetime64('2002-01-01'),np.datetime64('2017-01-01')]
tlim_cira = [np.datetime64('2002-01-01'),np.datetime64('2020-01-01')]
tlim_gardner = [np.datetime64('2003-01-01'),np.datetime64('2010-01-01')]
# tlim_shean = [np.datetime64('2000-01-01'),np.datetime64('2018-01-01')]
# tlim_braun = [np.datetime64('2000-01-01'),np.datetime64('2013-01-01')]
list_tlim = [tlim_zemp,tlim_wouters,tlim_cira,tlim_gardner]
list_tag = ['hugonnet_2021_period_zemp','hugonnet_2021_period_wout','hugonnet_2021_period_cira','hugonnet_2021_period_gard']
list_df = []
for fn_reg in list_fn_reg:
df_reg = pd.read_csv(fn_reg)
df_agg = tt.aggregate_all_to_period(df_reg,list_tlim=list_tlim,fn_tarea=fn_tarea,frac_area=1,list_tag=list_tag)
list_df.append(df_agg)
df = pd.concat(list_df)
list_fn_reg_multann = [os.path.join(reg_dir,'dh_'+str(i).zfill(2)+'_rgi60_int_base_reg_subperiods.csv') for i in np.arange(1,20)]
df_all = pd.DataFrame()
for fn_reg_multann in list_fn_reg_multann:
df_all= df_all.append(pd.read_csv(fn_reg_multann))
tlims = [np.datetime64('20'+str(i).zfill(2)+'-01-01') for i in range(21)]
list_df_glob = []
list_df_per = []
for i in range(len(tlims)-1):
period = str(tlims[i])+'_'+str(tlims[i+1])
df_p = df_all[df_all.period==period]
df_global = tt.aggregate_indep_regions_rates(df_p)
df_global['period']=period
df_noperiph = tt.aggregate_indep_regions_rates(df_p[~df_p.reg.isin([5, 19])])
df_noperiph['period']=period
list_df_glob.append(df_global)
list_df_per.append(df_noperiph)
df_glob = pd.concat(list_df_glob)
df_per = pd.concat(list_df_per)
df_glob['reg']=23
df_per['reg']=22
df_glob['tag']='hugonnet_2021_yearly'
df_per['tag']='hugonnet_2021_yearly'
df = pd.concat([df,df_glob,df_per])
tlims = [np.datetime64('20'+str(5*i).zfill(2)+'-01-01') for i in range(5)]
list_df_glob = []
list_df_per = []
for i in range(len(tlims)-1):
period = str(tlims[i])+'_'+str(tlims[i+1])
df_p = df_all[df_all.period==period]
df_global = tt.aggregate_indep_regions_rates(df_p)
df_global['period']=period
df_noperiph = tt.aggregate_indep_regions_rates(df_p[~df_p.reg.isin([5, 19])])
df_noperiph['period']=period
list_df_glob.append(df_global)
list_df_per.append(df_noperiph)
df_glob = pd.concat(list_df_glob)
df_per = pd.concat(list_df_per)
df_glob['reg']=23
df_per['reg']=22
df_glob['tag']='hugonnet_2021_5year'
df_per['tag']='hugonnet_2021_5year'
df = pd.concat([df,df_glob,df_per])
df = df.drop(columns=['dhdt','err_dhdt','dvoldt','err_dvoldt','valid_obs','valid_obs_py','perc_area_meas','perc_area_res','area_nodata'])
#put all to 2-sigma level
df['err_dmdt'] *= 2
df['err_dmdtda'] *= 2
df_gar = df_ext[['reg','gar','gar_err']]
df_gar.columns = ['reg','dmdtda','err_dmdtda']
df_gar['tag']= 'gardner_2013'
df_gar['period'] = str(tlim_gardner[0])+'_'+str(tlim_gardner[1])
df_zemp = df_ext[['reg','zemp','zemp_err']]
df_zemp.columns = ['reg','dmdtda','err_dmdtda']
df_zemp['tag']= 'zemp_2019'
df_zemp['period'] = str(tlim_zemp[0])+'_'+str(tlim_zemp[1])
df_wout = df_ext[['reg','wout','wout_err']]
df_wout.columns = ['reg','dmdtda','err_dmdtda']
df_wout['tag']= 'wouters_2019'
df_wout['period'] = str(tlim_wouters[0])+'_'+str(tlim_wouters[1])
df_cir = df_ext[['reg','cira','cira_err']]
df_cir.columns = ['reg','dmdtda','err_dmdtda']
df_cir['tag']= 'ciraci_2020'
df_cir['period'] = str(tlim_cira[0])+'_'+str(tlim_cira[1])
df =
|
pd.concat([df,df_gar,df_zemp,df_wout,df_cir])
|
pandas.concat
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Helper functions for loading files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import pandas as pd
def filename(env_name, noops, dev_measure, dev_fun, baseline, beta,
value_discount, seed, path='', suffix=''):
"""Generate filename for the given set of parameters."""
noop_str = 'noops' if noops else 'nonoops'
seed_str = '_' + str(seed) if seed else ''
filename_template = ('{env_name}_{noop_str}_{dev_measure}_{dev_fun}' +
'_{baseline}_beta_{beta}_vd_{value_discount}' +
'{suffix}{seed_str}.csv')
full_path = os.path.join(path, filename_template.format(
env_name=env_name, noop_str=noop_str, dev_measure=dev_measure,
dev_fun=dev_fun, baseline=baseline, beta=beta,
value_discount=value_discount, suffix=suffix, seed_str=seed_str))
return full_path
def load_files(baseline, dev_measure, dev_fun, value_discount, beta, env_name,
noops, path, suffix, seed_list, final=True):
"""Load result files generated by run_experiment with the given parameters."""
def try_loading(f, final):
if os.path.isfile(f):
df =
|
pd.read_csv(f, index_col=0)
|
pandas.read_csv
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": pandas.StringDtype(),
"BitErrorsHost21": pandas.StringDtype(),
"BitErrorsHost22": pandas.StringDtype(),
"BitErrorsHost23": pandas.StringDtype(),
"BitErrorsHost24": pandas.StringDtype(),
"BitErrorsHost25": pandas.StringDtype(),
"BitErrorsHost26": pandas.StringDtype(),
"BitErrorsHost27": pandas.StringDtype(),
"BitErrorsHost28": pandas.StringDtype(),
"BitErrorsHost29": pandas.StringDtype(),
"BitErrorsHost30": pandas.StringDtype(),
"BitErrorsHost31": pandas.StringDtype(),
"BitErrorsHost32": pandas.StringDtype(),
"BitErrorsHost33": pandas.StringDtype(),
"BitErrorsHost34": pandas.StringDtype(),
"BitErrorsHost35": pandas.StringDtype(),
"BitErrorsHost36": pandas.StringDtype(),
"BitErrorsHost37": pandas.StringDtype(),
"BitErrorsHost38": pandas.StringDtype(),
"BitErrorsHost39": pandas.StringDtype(),
"BitErrorsHost40": pandas.StringDtype(),
"XORRebuildSuccess": pandas.StringDtype(),
"XORRebuildFail": pandas.StringDtype(),
"BandReloForError": pandas.StringDtype(),
"mrrSuccess": pandas.StringDtype(),
"mrrFail": pandas.StringDtype(),
"mrrNudgeSuccess": pandas.StringDtype(),
"mrrNudgeHarmless": pandas.StringDtype(),
"mrrNudgeFail": pandas.StringDtype(),
"totalErases": pandas.StringDtype(),
"dieOfflineCount": pandas.StringDtype(),
"curtemp": pandas.StringDtype(),
"mintemp": pandas.StringDtype(),
"maxtemp": pandas.StringDtype(),
"oventemp": pandas.StringDtype(),
"allZeroSectors": pandas.StringDtype(),
"ctxRecoveryEvents": pandas.StringDtype(),
"ctxRecoveryErases": pandas.StringDtype(),
"NSversionMinor": pandas.StringDtype(),
"lifeMinTemp": pandas.StringDtype(),
"lifeMaxTemp": pandas.StringDtype(),
"powerCycles": pandas.StringDtype(),
"systemReads": pandas.StringDtype(),
"systemWrites": pandas.StringDtype(),
"readRetryOverflow": pandas.StringDtype(),
"unplannedPowerCycles": pandas.StringDtype(),
"unsafeShutdowns": pandas.StringDtype(),
"defragForcedReloCount": pandas.StringDtype(),
"bandReloForBDR": pandas.StringDtype(),
"bandReloForDieOffline": pandas.StringDtype(),
"bandReloForPFail": pandas.StringDtype(),
"bandReloForWL": pandas.StringDtype(),
"provisionalDefects": pandas.StringDtype(),
"uncorrectableProgErrors": pandas.StringDtype(),
"powerOnSeconds": pandas.StringDtype(),
"bandReloForChannelTimeout": pandas.StringDtype(),
"fwDowngradeCount": pandas.StringDtype(),
"dramCorrectablesTotal": pandas.StringDtype(),
"hb_id": pandas.StringDtype(),
"dramCorrectables1to1": pandas.StringDtype(),
"dramCorrectables4to1": pandas.StringDtype(),
"dramCorrectablesSram": pandas.StringDtype(),
"dramCorrectablesUnknown": pandas.StringDtype(),
"pliCapTestInterval": pandas.StringDtype(),
"pliCapTestCount": pandas.StringDtype(),
"pliCapTestResult": pandas.StringDtype(),
"pliCapTestTimeStamp": pandas.StringDtype(),
"channelHangSuccess": pandas.StringDtype(),
"channelHangFail": pandas.StringDtype(),
"BitErrorsHost41": pandas.StringDtype(),
"BitErrorsHost42": pandas.StringDtype(),
"BitErrorsHost43": pandas.StringDtype(),
"BitErrorsHost44": pandas.StringDtype(),
"BitErrorsHost45": pandas.StringDtype(),
"BitErrorsHost46": pandas.StringDtype(),
"BitErrorsHost47": pandas.StringDtype(),
"BitErrorsHost48": pandas.StringDtype(),
"BitErrorsHost49": pandas.StringDtype(),
"BitErrorsHost50": pandas.StringDtype(),
"BitErrorsHost51": pandas.StringDtype(),
"BitErrorsHost52": pandas.StringDtype(),
"BitErrorsHost53": pandas.StringDtype(),
"BitErrorsHost54": pandas.StringDtype(),
"BitErrorsHost55": pandas.StringDtype(),
"BitErrorsHost56": pandas.StringDtype(),
"mrrNearMiss": pandas.StringDtype(),
"mrrRereadAvg": pandas.StringDtype(),
"readDisturbEvictions": pandas.StringDtype(),
"L1L2ParityError": pandas.StringDtype(),
"pageDefects": pandas.StringDtype(),
"pageProvisionalTotal": pandas.StringDtype(),
"ASICTemp": pandas.StringDtype(),
"PMICTemp": pandas.StringDtype(),
"size": pandas.StringDtype(),
"lastWrite":
|
pandas.StringDtype()
|
pandas.StringDtype
|
# -*- coding: utf-8 -*-
import csv
import os
import platform
import codecs
import re
import sys
from datetime import datetime
import pytest
import numpy as np
from pandas._libs.lib import Timestamp
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex
from pandas import compat
from pandas.compat import (StringIO, BytesIO, PY3,
range, lrange, u)
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas.io.common import URLError
from pandas.io.parsers import TextFileReader, TextParser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = 'Only length-1 decimal markers supported'
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), decimal='')
def test_bad_stream_exception(self):
# Issue 13652:
# This test validates that both python engine
# and C engine will raise UnicodeDecodeError instead of
# c engine raising ParserError and swallowing exception
# that caused read to fail.
handle = open(self.csv_shiftjs, "rb")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup('utf-8')
# stream must be binary UTF8
stream = codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader,
codec.streamwriter)
if compat.PY3:
msg = "'utf-8' codec can't decode byte"
else:
msg = "'utf8' codec can't decode byte"
with tm.assert_raises_regex(UnicodeDecodeError, msg):
self.read_csv(stream)
stream.close()
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
self.read_csv(fname, index_col=0, parse_dates=True)
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
assert isinstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# see gh-8217
# Series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
assert not result._is_view
def test_malformed(self):
# see gh-6607
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#')
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
it.read(5)
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read(3)
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read()
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# skipfooter
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#',
skipfooter=1)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>""" # noqa
pytest.raises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
assert len(df) == 3
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]], dtype=np.int64)
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
tm.assert_index_equal(df.columns,
Index(['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4']))
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
expected = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]})
out = self.read_csv(StringIO(data))
tm.assert_frame_equal(out, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns, pd.Index(['A', 'B', 'C', 'D']))
assert df.index.name == 'index'
assert isinstance(
df.index[0], (datetime, np.datetime64, Timestamp))
assert df.values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns,
pd.Index(['A', 'B', 'C', 'D', 'E']))
assert isinstance(df.index[0], (datetime, np.datetime64, Timestamp))
assert df.loc[:, ['A', 'B', 'C', 'D']].values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = self.read_table(fin, sep=";", encoding="utf-8", header=None)
assert isinstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
pytest.raises(ValueError, self.read_csv, StringIO(data))
def test_read_duplicate_index_explicit(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
result = self.read_table(StringIO(data), sep=',', index_col=0)
expected = self.read_table(StringIO(data), sep=',', ).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# make sure an error isn't thrown
self.read_csv(StringIO(data))
self.read_table(
|
StringIO(data)
|
pandas.compat.StringIO
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2019 Snowflake Computing Inc. All right reserved.
#
import random
import string
import numpy as np
import pandas as pd
import pytest
import snowflake.sqlalchemy
import sqlalchemy
from sqlalchemy import Column, ForeignKey, Integer, MetaData, Sequence, String, Table
def _create_users_addresses_tables(engine_testaccount, metadata):
users = Table('users', metadata,
Column('id', Integer, Sequence('user_id_seq'),
primary_key=True),
Column('name', String),
Column('fullname', String),
)
addresses = Table('addresses', metadata,
Column('id', Integer, Sequence('address_id_seq'),
primary_key=True),
Column('user_id', None, ForeignKey('users.id')),
Column('email_address', String, nullable=False)
)
metadata.create_all(engine_testaccount)
return users, addresses
def _create_users_addresses_tables_without_sequence(engine_testaccount,
metadata):
users = Table('users', metadata,
Column('id', Integer, primary_key=True),
Column('name', String),
Column('fullname', String),
)
addresses = Table('addresses', metadata,
Column('id', Integer, primary_key=True),
Column('user_id', None, ForeignKey('users.id')),
Column('email_address', String, nullable=False)
)
metadata.create_all(engine_testaccount)
return users, addresses
def test_a_simple_read_sql(engine_testaccount):
metadata = MetaData()
users, addresses = _create_users_addresses_tables(
engine_testaccount, metadata)
try:
# inserts data with an implicitly generated id
ins = users.insert().values(name='jack', fullname='<NAME>')
results = engine_testaccount.execute(ins)
assert results.inserted_primary_key == [1], 'sequence value'
results.close()
# inserts data with the given id
conn = engine_testaccount.connect()
ins = users.insert()
conn.execute(ins, id=2, name='wendy', fullname='<NAME>')
df = pd.read_sql("SELECT * FROM users WHERE name =%(name)s",
params={'name': 'jack'}, con=engine_testaccount)
assert len(df.values) == 1
assert df.values[0][0] == 1
assert df.values[0][1] == 'jack'
assert hasattr(df, 'id')
assert hasattr(df, 'name')
assert hasattr(df, 'fullname')
finally:
# drop tables
addresses.drop(engine_testaccount)
users.drop(engine_testaccount)
def get_engine_with_numpy(db_parameters, user=None, password=None,
account=None):
"""
Creates a connection using the parameters defined in JDBC connect string
"""
from sqlalchemy import create_engine
from snowflake.sqlalchemy import URL
if user is not None:
db_parameters['user'] = user
if password is not None:
db_parameters['password'] = password
if account is not None:
db_parameters['account'] = account
from sqlalchemy.pool import NullPool
engine = create_engine(URL(
user=db_parameters['user'],
password=db_parameters['password'],
host=db_parameters['host'],
port=db_parameters['port'],
database=db_parameters['database'],
schema=db_parameters['schema'],
account=db_parameters['account'],
protocol=db_parameters['protocol'],
numpy=True,
), poolclass=NullPool)
return engine
def test_numpy_datatypes(db_parameters):
engine = get_engine_with_numpy(db_parameters)
try:
specific_date = np.datetime64('2016-03-04T12:03:05.123456789')
engine.execute(
"CREATE OR REPLACE TABLE {name}("
"c1 timestamp_ntz)".format(name=db_parameters['name']))
engine.execute(
"INSERT INTO {name}(c1) values(%s)".format(
name=db_parameters['name']), (specific_date,)
)
df = pd.read_sql_query(
"SELECT * FROM {name}".format(
name=db_parameters['name']
), engine
)
assert df.c1.values[0] == specific_date
finally:
engine.execute(
"DROP TABLE IF EXISTS {name}".format(name=db_parameters['name'])
)
engine.dispose()
def test_to_sql(db_parameters):
engine = get_engine_with_numpy(db_parameters)
total_rows = 10000
engine.execute("""
create or replace table src(c1 float)
as select random(123) from table(generator(timelimit=>1))
limit {0}
""".format(total_rows))
engine.execute("""
create or replace table dst(c1 float)
""")
tbl = pd.read_sql_query(
'select * from src', engine)
tbl.to_sql('dst', engine, if_exists='append', chunksize=1000, index=False)
df = pd.read_sql_query(
'select count(*) as cnt from dst', engine
)
assert df.cnt.values[0] == total_rows
def test_no_indexes(engine_testaccount, db_parameters):
conn = engine_testaccount.connect()
data = pd.DataFrame([('1.0.0',), ('1.0.1',)])
with pytest.raises(NotImplementedError) as exc:
data.to_sql('versions', schema=db_parameters['schema'], index=True, index_label='col1', con=conn, if_exists='replace')
assert str(exc.value) == "Snowflake does not support indexes"
def test_timezone(db_parameters):
test_table_name = ''.join([random.choice(string.ascii_letters) for _ in range(5)])
sa_engine = sqlalchemy.create_engine(snowflake.sqlalchemy.URL(
account=db_parameters['account'],
password=db_parameters['password'],
database=db_parameters['database'],
port=db_parameters['port'],
user=db_parameters['user'],
host=db_parameters['host'],
protocol=db_parameters['protocol'],
schema=db_parameters['schema'],
numpy=True,
))
sa_engine2 = sqlalchemy.create_engine(snowflake.sqlalchemy.URL(
account=db_parameters['account'],
password=db_parameters['password'],
database=db_parameters['database'],
port=db_parameters['port'],
user=db_parameters['user'],
host=db_parameters['host'],
protocol=db_parameters['protocol'],
schema=db_parameters['schema'],
timezone='America/Los_Angeles',
numpy='')).raw_connection()
sa_engine.execute("""
CREATE OR REPLACE TABLE {table}(
tz_col timestamp_tz,
ntz_col timestamp_ntz,
decimal_col decimal(10,1),
float_col float
);""".format(table=test_table_name))
try:
sa_engine.execute("""
INSERT INTO {table}
SELECT
current_timestamp(),
current_timestamp()::timestamp_ntz,
to_decimal(.1, 10, 1),
.10;""".format(table=test_table_name))
qry = """
SELECT
tz_col,
ntz_col,
CONVERT_TIMEZONE('America/Los_Angeles', tz_col) AS tz_col_converted,
CONVERT_TIMEZONE('America/Los_Angeles', ntz_col) AS ntz_col_converted,
decimal_col,
float_col
FROM {table};""".format(table=test_table_name)
result = pd.read_sql_query(qry, sa_engine)
result2 = pd.read_sql_query(qry, sa_engine2)
# Check sqlalchemy engine result
assert(pd.api.types.is_datetime64tz_dtype(result.tz_col))
assert(not pd.api.types.is_datetime64tz_dtype(result.ntz_col))
assert(pd.api.types.is_datetime64tz_dtype(result.tz_col_converted))
assert(pd.api.types.is_datetime64tz_dtype(result.ntz_col_converted))
assert(np.issubdtype(result.decimal_col, np.float64))
assert(np.issubdtype(result.float_col, np.float64))
# Check sqlalchemy raw connection result
assert(
|
pd.api.types.is_datetime64tz_dtype(result2.TZ_COL)
|
pandas.api.types.is_datetime64tz_dtype
|
import os
import sys
from pprint import pprint
from altair.vegalite.v4.schema.core import UtcSingleTimeUnit
import ccxt
#import ccxt.async_support as ccxt
from pyti.exponential_moving_average import exponential_moving_average as ema
import pandas as pd
import datetime
import time
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
from IPython.display import clear_output
import numpy as np
import datetime as dt
import pytz
# import mplcursors
import streamlit as st
# from compute2d import compute_2d_histogram
import numpy as np
import pandas as pd
import altair as at
from copy import copy
import plotly.graph_objects as go
# from paracoords import create_paracoords
from mpl_toolkits.mplot3d import axes3d
from matplotlib import cm
import plotly
import plotly.graph_objs as go
import warnings
warnings.filterwarnings('ignore')
# root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# sys.path.append(root + '/python')
# print('CCXT Version:', ccxt.__version__)
def get_last_n_kline_closes(n=50,interval='1h',symbol='BTCUSDT',exchange=None):
if exchange is None:
print('Exchange not initiated')
return None
# exchange = ccxt.binance({
# 'apiKey': g_api_key,
# 'secret': g_secret_key,
# 'enableRateLimit': True,
# 'options': {
# 'defaultType': 'future',
# },
# 'hedgeMode':True
# })
# # symbol = 'BTC/USDT'
# market = exchange.load_markets()
closes = [[dt.datetime.utcfromtimestamp(float(elem[0]) / 1000.),elem[4]] for elem in exchange.fapiPublic_get_klines({'symbol':symbol,'interval':interval})][-n:-1]
dates = [elem[0] for elem in closes]
values = [float(elem[1]) for elem in closes]
df = pd.DataFrame([(elem[0],elem[1]) for elem in zip(dates,values)],columns=['datetime','closes'])
return df
def generate_signal(candles=50,interval='1h',symbol='BTCUSDT',strat='ema_cross_over_under',strat_params={'fast_ema':10,'slow_ema':40},exchange=None):
if exchange is None:
return 'Exchange not Initiated'
allowed_strats = ['ema_diff_peak_trough','ema_cross_over_under','ema_oscillator_peak_trough']
if strat not in allowed_strats:
print('INVALID STRATEGY')
return "NONE"
if strat == 'ema_oscillator_peak_trough':
'''under development'''
return "NONE"
if strat == 'ema_diff_peak_trough':
candles = strat_params['slow_ema'] + 10
current_input=get_last_n_kline_closes(symbol=symbol,n=candles,interval=interval,exchange=exchange)
min_input_length = np.max([float(strat_params['fast_ema']),float(strat_params['slow_ema'])])
if len(list(current_input['closes'].values))<min_input_length:
return "INPUT HAS TOO FEW ELEMENTS"
closes = current_input['closes'].astype(float)
# closes = closes[:-1]
# closes['close'] = closes['close'].astype(float)
ema_diff = pd.DataFrame(ema(closes.tolist(),strat_params['fast_ema']) - ema(closes.tolist(),strat_params['slow_ema']),columns=['ema_diff'])
p = strat_params['slow_ema']+1
closes = closes[p:].reset_index(drop=True)
ema_diff = ema_diff[p:].reset_index(drop=True)
last = ema_diff.values[-1]
second_last = ema_diff.values[-2]
third_last = ema_diff.values[-3]
# short if local peak
if last < second_last and third_last < second_last:
return 'SHORT'
# long if local trough
if last > second_last and third_last > second_last:
return 'LONG'
return "NONE"
if strat == 'ema_cross_over_under':
candles = strat_params['slow_ema'] + 10
current_input=get_last_n_kline_closes(symbol=symbol,n=candles,interval=interval,exchange=exchange)
min_input_length = np.max([float(strat_params['fast_ema']),float(strat_params['slow_ema'])])
if len(list(current_input['closes'].values))<min_input_length:
return "INPUT HAS TOO FEW ELEMENTS"
closes = current_input['closes'].astype(float)
# closes = closes[:-1]
# closes['close'] = closes['close'].astype(float)
ema_diff = pd.DataFrame(ema(closes.tolist(),strat_params['fast_ema']) - ema(closes.tolist(),strat_params['slow_ema']),columns=['ema_diff'])
p = strat_params['slow_ema']+1
closes = closes[p:].reset_index(drop=True)
ema_diff = ema_diff[p:].reset_index(drop=True)
last = ema_diff.values[-1]
second_last = ema_diff.values[-2]
third_last = ema_diff.values[-3]
# long if diff cross over 0
if last > 0 and second_last < 0:
return "LONG"
# short if diff cross under 0
if last < 0 and second_last > 0:
return "SHORT"
return "NONE"
def get_open_positions(mode='live',exchange=None):
if exchange is None:
return 'Exchange Not Initiated'
allowed_modes = ['live','paper']
if mode not in allowed_modes:
return "INVALID MODE"
if mode == 'live':
# exchange = ccxt.binance({
# 'apiKey': g_api_key,
# 'secret': g_secret_key,
# 'enableRateLimit': True,
# 'options': {
# 'defaultType': 'future',
# },
# 'hedgeMode':True
# })
# markets = exchange.load_markets()
#exchange.verbose=True
# market = exchange.market(symbol)
positions = [elem for elem in exchange.fapiPrivate_get_positionrisk() if float(elem['positionAmt'])!=0]
if len(positions)==0:
return 0
return positions
if mode == 'paper':
paper_trades = pd.read_csv('paper_trades.csv',index_col=0)
if paper_trades.position_type.iloc[-1] == '-':
return 0
return paper_trades.iloc[-1]
def get_balance(mode='live',asset='USDT',exchange=None):
if exchange is None:
return 'Exchange not initiated'
allowed_modes = ['paper','live']
if mode not in allowed_modes:
print("INVALID MODE")
return None
# exchange = ccxt.binance({
# 'apiKey': g_api_key,
# 'secret': g_secret_key,
# 'enableRateLimit': True,
# 'options': {
# 'defaultType': 'future',
# },
# 'hedgeMode':True
# })
# markets = exchange.load_markets()
if mode == 'live':
live_balance = str(float([float(elem['balance']) for elem in exchange.fapiPrivate_get_balance() if elem['asset']==asset][0]))
unrealized_pnl = str(sum([float(elem['unRealizedProfit']) for elem in exchange.fapiPrivateV2_get_positionrisk() if float(elem['positionAmt']) >0]))
unrealized_pnl_percent = str(float(unrealized_pnl)/float(live_balance))
balance = {'wallet_balance': live_balance,
'unrealized_pnl_percent': unrealized_pnl_percent}
return balance
if mode == 'paper':
paper_trades = pd.read_csv('paper_trades.csv',index_col=0)
if paper_trades.paper_equity.iloc[-1] == '-':
paper_balance = paper_trades.paper_equity.iloc[-2]
else:
paper_balance = paper_trades.paper_equity.iloc[-1]
if paper_trades.entry_price.iloc[-1] == '-':
entry = None
else:
entry = paper_trades.entry_price.iloc[-1]
if paper_trades.position_type.iloc[-1] == 'LONG':
position = 1
if paper_trades.position_type.iloc[-1] == 'SHORT':
position = -1
else:
position = 0
if entry is not None and position != 0:
if paper_trades.exit_price.iloc[-1] == '-':
symbol = paper_trades.market_name.iloc[-1]
last_price = float(exchange.fapiPublic_get_ticker_price({'symbol':symbol})['price'])
pnl = (last_price-float(entry))/float(entry)*100*float(position)
else:
pnl = 0
else:
pnl = 0
balance = {'wallet_balance':paper_balance,'unrealized_pnl_percent':pnl}
return balance
def close_all_open_positions(mode='live',exchange=None):
if exchange is None:
return 'Exchange not Initiated'
allowed_modes = ['paper','live']
if mode not in allowed_modes:
return "INVALID MODE"
if mode == 'live':
# exchange = ccxt.binance({
# 'apiKey': g_api_key,
# 'secret': g_secret_key,
# 'enableRateLimit': True,
# 'options': {
# 'defaultType': 'future',
# },
# 'hedgeMode':True
# })
# markets = exchange.load_markets()
#exchange.verbose=True
# market = exchange.market(symbol)
open_positions = get_open_positions(mode=mode,exchange=exchange)
if open_positions == 0:
return None
if np.sign(float(open_positions[0]['positionAmt'])) == -1.0:
opp_side = "BUY"
if np.sign(float(open_positions[0]['positionAmt'])) == 1.0:
opp_side = "SELL"
baseqty= abs(float(open_positions[0]['positionAmt']))
symbol = open_positions[0]['symbol']
positionSide = open_positions[0]['positionSide']
order = exchange.fapiPrivatePostOrder({'symbol':symbol, 'type':"MARKET", 'side':opp_side,'positionSide':positionSide ,'quantity':baseqty})
return order
if mode == 'paper':
paper_trades = pd.read_csv('paper_trades.csv',index_col=0)
if paper_trades.position_type.iloc[-1] == '-':
return None
if paper_trades.exit_price.iloc[-1] != '-':
return None
# exchange = ccxt.binance({
# 'apiKey': g_api_key,
# 'secret': g_secret_key,
# 'enableRateLimit': True,
# 'options': {
# 'defaultType': 'future',
# },
# 'hedgeMode':True
# })
# markets = exchange.load_markets()
symbol = paper_trades.market_name.iloc[-1]
entry_price = paper_trades.entry_price.iloc[-1]
leverage= paper_trades.leverage.iloc[-1]
leverage = int(leverage)
if paper_trades.position_type.iloc[-1] == 'LONG':
position = 1
exit_price = 0.999*float(exchange.fapiPublic_get_ticker_price({'symbol':symbol})['price'])
exit_time = datetime.datetime.utcnow()
if paper_trades.position_type.iloc[-1] == 'SHORT':
position = -1
exit_price = 1.001*float(exchange.fapiPublic_get_ticker_price({'symbol':symbol})['price'])
exit_time = datetime.datetime.utcnow()
trade_pnl_pct = float(position)*float(leverage)*(float(exit_price)-float(entry_price))/float(entry_price)*100
balance = float(paper_trades.paper_equity.iloc[-2])*(1+trade_pnl_pct/100)
paper_trades.exit_time.iloc[-1] = exit_time
paper_trades.exit_price.iloc[-1] = exit_price
paper_trades.trade_pnl_pct.iloc[-1] = trade_pnl_pct
paper_trades.paper_equity.iloc[-1] = balance
paper_trades.to_csv('paper_trades.csv')
trade = paper_trades.iloc[-1]
return trade
def open_market_order(mode='live',balance=1,symbol='BTCUSDT',leverage="5",side="BUY",hedge_mode="BOTH",exchange=None):
if exchange is None:
return 'Exchange not initiated'
allowed_modes = ['paper','live']
if mode not in allowed_modes:
return "INVALID MODE"
if mode == 'live':
closed_position = close_all_open_positions(mode=mode,exchange=exchange)
quoteqty = float([elem for elem in exchange.fapiPrivate_get_balance({'asset':"USDT"}) if elem['asset']=='USDT'][0]['balance']) * balance
price = float(exchange.fapiPublic_get_ticker_price({'symbol':symbol})['price'])
baseqty = "{:.3f}".format(quoteqty*float(leverage)/price)
baseqty = float(baseqty)-float([elem for elem in exchange.fapiPublic_get_exchangeinfo()['symbols'] if elem['symbol']==symbol][0]['filters'][2]['minQty'])
baseqty = "{:.3f}".format(baseqty)
baseqty = str(baseqty)
lev_req = exchange.fapiPrivate_post_leverage({'symbol':symbol,'leverage':leverage})
order = exchange.fapiPrivatePostOrder({'symbol':symbol, 'type':"MARKET", 'side':side,'positionSide':hedge_mode ,'quantity':baseqty})
return order,closed_position
if mode == 'paper':
closed_position = close_all_open_positions(mode=mode,exchange=exchange)
paper_trades = pd.read_csv('paper_trades.csv',index_col=0)
if side == 'BUY':
position_type = 'LONG'
entry_price = 1.001*float(exchange.fapiPublic_get_ticker_price({'symbol':symbol})['price'])
entry_time = datetime.datetime.utcnow()
if side == 'SELL':
position_type = 'SHORT'
entry_price = 0.999*float(exchange.fapiPublic_get_ticker_price({'symbol':symbol})['price'])
entry_time = datetime.datetime.utcnow()
trade = pd.DataFrame([[symbol,position_type,entry_time,'-', entry_price,'-',leverage,'-','-']],columns=['market_name','position_type','entry_time','exit_time','entry_price','exit_price','leverage','trade_pnl_pct','paper_equity'])
paper_trades = paper_trades.append(trade,ignore_index=True)
paper_trades.to_csv('paper_trades.csv')
return paper_trades.iloc[-1],closed_position
# def get_strat_performance(strat='ema_cross_over_under',leverage=1,strat_params={'fast_ema':4,'slow_ema':20},interval='4h',symbol='BTCUSDT',candles=50,input=None,exchange=None):
# if exchange is None:
# return 'Exchange not initiated'
# allowed_strats = ['ema_cross_over_under','ema_diff_peak_trough']
# if strat not in allowed_strats:
# print("INVALID STRATEGY")
# return None
# if input == None:
# current_input=get_last_n_kline_closes(symbol=symbol,n=candles,interval=interval,exchange=exchange)
# else:
# current_input = input
# closes = pd.DataFrame(current_input,columns=['close'])
# closes = closes[:-1]
# closes['close'] = closes['close'].astype(float)
# ema_diff = pd.DataFrame(ema(closes['close'].tolist(),strat_params['fast_ema']) - ema(closes['close'].tolist(),strat_params['slow_ema']),columns=['ema_diff'])
# p = strat_params['slow_ema']+1
# closes = closes[p:].reset_index(drop=True)
# ema_diff = ema_diff[p:].reset_index(drop=True)
# if strat == 'ema_cross_over_under':
# signal = [0]+[1 if float(ema_diff.loc[index]) > 0 and float(ema_diff.loc[index-1]) < 0 else -1 if float(ema_diff.loc[index]) < 0 and float(ema_diff.loc[index-1]) > 0 else 0 for index in ema_diff.index[1:]]
# if strat == 'ema_diff_peak_trough':
# signal = [0,0]+ [-1 if float(ema_diff.loc[index]) < float(ema_diff.loc[index-1]) and float(ema_diff.loc[index-1]) > float(ema_diff.loc[index-2]) else 1 if float(ema_diff.loc[index]) > float(ema_diff.loc[index-1]) and float(ema_diff.loc[index-1]) < float(ema_diff.loc[index-2]) else 0 for index in ema_diff.index[2:]]
# trades = list()
# for idx in range(len(signal)):
# if signal[idx] != 0:
# trades.append([closes.loc[idx],signal[idx]])
# result = list()
# for idx in range(len(trades)):
# if idx > 0:
# position = trades[idx-1][1] * leverage
# performance = position * ((trades[idx][0]['close'] - trades[idx-1][0]['close']) / trades[idx-1][0]['close'])*100
# trade = [position,performance]
# result.append(trade)
# equity_curve = list()
# principal = 1
# for elem in result:
# principal = principal * (1 + elem[1]/100)
# # print(principal)
# equity_curve.append(principal)
# pd.DataFrame(equity_curve).plot()
# trade_pnl = list()
# principal = 1
# for elem in result:
# pnl=elem[1]
# # print(principal)
# trade_pnl.append(pnl)
# pd.DataFrame(trade_pnl).plot(kind='bar')
def get_strat_price_ti_plot(strat='ema_cross_over_under',strat_params={'fast_ema':4,'slow_ema':20},symbol='DEFIUSDT',leverage=1,decay=0.995,interval='1d',candles=50,exchange=None,animate=False,data=None):
if exchange is None:
return 'Exchange not initiated'
allowed_strats = ['ema_cross_over_under','ema_diff_peak_trough','ema_oscillator_peak_trough']
if strat not in allowed_strats:
print("INVALID STRATEGY")
return None
if strat == 'ema_oscillator_peak_trough':
'''in devlopment'''
# current_input=list(get_last_n_kline_closes(symbol=symbol,n=candles,interval=interval,exchange=exchange)['closes'].values)
# closes = pd.DataFrame(current_input,columns=['close'])
# # closes = closes[:-1]
# closes['close'] = closes['close'].astype(float)
# ema_oscillator = list()
return None
if data is None:
current_input=get_last_n_kline_closes(symbol=symbol,n=candles,interval=interval,exchange=exchange)
if data == 'complete':
current_input = pd.read_parquet('C:\\Users\\ZankarSS\\Downloads\\BTC-USDT.parquet')['close'].astype(float).values
current_input = pd.read_csv('Binance_BTCUSDT_d.csv').iloc[::-1][['date','close']]
current_input['datetime'] = [pd.Timestamp(elem) for elem in current_input['date'].values]
current_input['closes'] = [np.float64(elem) for elem in current_input['close'].values]
del current_input['date']
del current_input['close']
current_input.reset_index(drop=True)
# dates = True
if strat == 'ema_cross_over_under':
min_input_length = np.max([float(strat_params['fast_ema']),float(strat_params['slow_ema'])])
if len(list(current_input['closes'].values))<min_input_length:
return "INPUT HAS TOO FEW ELEMENTS"
closes = current_input['closes'].astype(float)
datetimes = current_input['datetime']
# closes = closes[:-1]
# closes['close'] = closes['close'].astype(float)
indicator = pd.DataFrame(ema(closes.tolist(),strat_params['fast_ema']) - ema(closes.tolist(),strat_params['slow_ema']),columns=['ema_diff'])
p = strat_params['slow_ema']+1
closes = closes[p:].reset_index(drop=True)
indicator = indicator[p:].reset_index(drop=True)
datetimes = datetimes[p:].reset_index(drop=True)
signal = [0] + [1 if float(indicator.loc[index]) > 0 and float(indicator.loc[index-1]) < 0 else -1 if float(indicator.loc[index]) < 0 and float(indicator.loc[index-1]) > 0 else 0 for index in indicator.index[1:]]
if strat == 'ema_diff_peak_trough':
# current_input=get_last_n_kline_closes(symbol=symbol,n=candles,interval=interval,exchange=exchange)
min_input_length = np.max([float(strat_params['fast_ema']),float(strat_params['slow_ema'])])
if len(list(current_input['closes'].values))<min_input_length:
return "INPUT HAS TOO FEW ELEMENTS"
closes = current_input['closes'].astype(float)
datetimes = current_input['datetime']
# closes = closes[:-1]
# closes['close'] = closes['close'].astype(float)
indicator = pd.DataFrame(ema(closes.tolist(),strat_params['fast_ema']) - ema(closes.tolist(),strat_params['slow_ema']),columns=['ema_diff'])
p = strat_params['slow_ema']+1
closes = closes[p:].reset_index(drop=True)
indicator = indicator[p:].reset_index(drop=True)
datetimes = datetimes[p:].reset_index(drop=True)
signal = [0,0]+ [-1 if float(indicator.loc[index]) < float(indicator.loc[index-1]) and float(indicator.loc[index-1]) > float(indicator.loc[index-2]) else 1 if float(indicator.loc[index]) > float(indicator.loc[index-1]) and float(indicator.loc[index-1]) < float(indicator.loc[index-2]) else 0 for index in indicator.index[2:]]
navs = list()
current_position = 0
current_nav = 1
current_position_entry = 0
cumulative_nav = 1
for idx in indicator.index[2:]:
# if idx == 0 or idx == 1:
# navs.append(current_nav)
# continue
if current_position == 1:
current_position_entry = current_position_entry * float(1/float(decay))
current_nav = (float(closes[idx]) / float(current_position_entry)) * float(cumulative_nav)
navs.append(current_nav)
if current_position == -1:
current_position_entry = current_position_entry * float(decay)
current_nav = (1 + ((float(current_position_entry) - float(closes[idx])) / float(current_position_entry))) * float(cumulative_nav)
navs.append(current_nav)
if current_position == 0:
navs.append(current_nav)
if signal[idx] != current_position and signal[idx] != 0:
current_position = signal[idx]
current_position_entry = closes[idx]
cumulative_nav = current_nav
navs = [1,1] + navs
# for elem in zip(closes.values,signal):
# if elem[1] == 0:
# if last_position == 0:
# last_nav =
# navs.append(last_nav)
# if last_position != 0:
# last_nav =
# navs.append(last_nav)
# if elem[1] == 1:
# last_position =1
# last_nav =
# navs.append(last_nav)
# if elem[1] == -1:
# last_position = -1
# last_nav =
# navs.append(last_nav)
dynamic_closes = list()
dynamic_signal = list()
dynamic_indicator = list()
dynamic_dates = list()
dynamic_nav = list()
assert len(closes) == len(signal) == len(indicator) == len(datetimes) == len(navs)
for elem in zip(closes.values,signal,indicator.values,datetimes.values,navs):
dynamic_closes.append(elem[0])
dynamic_signal.append(elem[1])
dynamic_indicator.append(elem[2])
dynamic_dates.append(elem[3])
dynamic_nav.append(elem[4])
clear_output(wait=True)
if animate is False and elem != [elem for elem in zip(closes.values,signal,indicator.values,datetimes.values,navs)][-1]:
continue
fig, (ax1,ax2,ax3,ax4) = plt.subplots(nrows=4, sharex=True, subplot_kw=dict(frameon=True),figsize=(20,20)) # frameon=False removes frames
# x = range(len(dynamic_signal))
# plt.subplots_adjust(hspace=.0)
ax1.grid()
ax1.plot(dynamic_dates, dynamic_closes, color='green',linewidth=2)
for i in range(len(dynamic_signal)):
if dynamic_signal[i] == 1:
ax1.axvline(pd.DataFrame(dynamic_dates).iloc[pd.DataFrame(dynamic_dates).index.values[i]],color='g')
if dynamic_signal[i] == -1:
ax1.axvline(pd.DataFrame(dynamic_dates).iloc[pd.DataFrame(dynamic_dates).index.values[i]],color='r')
# closes.plot()
ax1.axhline(dynamic_closes[-1],color='k')
ax1.legend([str(symbol)+': '+ str(float(dynamic_closes[-1]))] )
# ax1.set_xlabel('Days')
ax1.set_ylabel('Price')
ax2.grid()
ax2.plot(dynamic_dates, dynamic_indicator, color='lightgreen', linestyle='--',linewidth=2)
# indicator.plot()
for i in range(len(dynamic_signal)):
if dynamic_signal[i] == 1:
ax2.axvline(pd.DataFrame(dynamic_dates).iloc[pd.DataFrame(dynamic_dates).index.values[i]],color='g')
if dynamic_signal[i] == -1:
ax2.axvline(pd.DataFrame(dynamic_dates).iloc[pd.DataFrame(dynamic_dates).index.values[i]],color='r')
ax2.axhline(0,color='k',linestyle='--')
ax2.legend([float(dynamic_indicator[-1])])
ax2.set_ylabel('Trend Indicator')
ax3.grid()
ax3.plot(dynamic_dates, dynamic_nav, color='darkmagenta',linewidth=2)
# leg_strat = ax3.legend(strat_plot,'Strategy: '+ str(dynamic_nav[-1]))
# bh = [dynamic_closes[idx]/dynamic_closes[0] for idx in range(len(dynamic_closes))]
# buy_hold_plot = ax3.plot(dynamic_dates, bh, color='y',linewidth=2)
# leg_bh = ax3.legend(buy_hold_plot,'Strategy: '+ str(bh[-1]))
for i in range(len(dynamic_signal)):
if dynamic_signal[i] == 1:
ax3.axvline(pd.DataFrame(dynamic_dates).iloc[pd.DataFrame(dynamic_dates).index.values[i]],color='g')
if dynamic_signal[i] == -1:
ax3.axvline(pd.DataFrame(dynamic_dates).iloc[
|
pd.DataFrame(dynamic_dates)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 29 21:55:02 2021
@author: dariu
"""
import numpy as np
import pandas as pd
import os
from tqdm import tqdm
import pacmap
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import umap
from sklearn.cluster import KMeans
from sklearn.cluster import DBSCAN
#import sklearn.cluster
from sklearn.decomposition import PCA
from sklearn import metrics
from sklearn.cluster import OPTICS, cluster_optics_dbscan
import matplotlib.gridspec as gridspec
path = "C:\\Users\dariu\\Documents\\Master Wirtschaftsinformatik\\Data Challenges\Data\\"
directorys = [
['training_setA/training/', 'p0'],
['training_setB/training_setB/', 'p1']
]
#%%
dfs = []
for z, (directory, file_head) in enumerate(directorys):
for i, filename in enumerate(tqdm(os.listdir(path + directory))):
df_temp = pd.read_csv(path + directory + filename, skiprows=0, sep='|')
# patient_gender = df_temp["Gender"][1]
# if df_temp["Age"][1] >= 40:
dfs.append(df_temp)
df =
|
pd.concat(dfs)
|
pandas.concat
|
##########
# Built-in
##########
import glob
import logging
from pathlib import Path
from typing import Dict
########
# Libs #
########
import pandas as pd
logger = logging.getLogger(__name__)
class CustomPreprocessor():
def __init__(self, cfg: object):
"""Custom preprocessor for malaria datasets at
https://github.com/rfordatascience/tidytuesday/tree/master/data/2018/2018-11-13.
:param cfg: python configuration file imported as a module
:type cfg: module object
"""
# attributes from python configuration file
self.folder_path = getattr(cfg, "DATA_FOLDER_RELATIVE_PATH", None)
self.rename_dict = getattr(cfg, "RENAME_DICT", None)
self.uk_list = getattr(cfg, "UK_LIST", None)
self.income_demo = getattr(cfg, "INCOME_DEMO", None)
##################
# Helper functions
##################
def get_df_list(self, folder_path: str) -> list:
"""Get a list of csv filepaths to load as DataFrames
:param folder_path: relative file path to raw data folder containing csv files
:type folder_path: str
:return: a list of absolute filepaths to load as DataFrames
:rtype: list
"""
full_folder_path = Path(__file__).parents[3] / folder_path
df_list = [
file
for file in glob.glob(
f'{full_folder_path}/*.csv'
)
]
return df_list
def get_entity_type(
self,
code: str,
entity: str,
income_demo: list
)-> str:
"""Get entity type value for each row based on multiple conditions
:param code: 'code' column value in the dataframe indicating country code for that row
:type code: str
:param entity: 'entity' column value in the dataframe indicating entity for that row
:type entity: str
:param income_demo: list of entities to be categorized under 'Income/Demographic' entity_type
:type income_demo: list
:return: value for entity_type for that row
:rtype: str
"""
if entity == 'World':
entity_type = 'World'
elif not
|
pd.isnull(code)
|
pandas.isnull
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 12 18:24:12 2020
@author: omar.elfarouk
"""
import pandas
import numpy
import seaborn
import scipy
import matplotlib.pyplot as plt
data =
|
pandas.read_csv('gapminder.csv', low_memory=False)
|
pandas.read_csv
|
# Task2 Get market data from Binance
### Required
#### 1. **Use Binance Python SDK** to get public data
# import libraries
import time
import dateparser
import pytz
import json
import pandas as pd
from datetime import datetime
from binance.client import Client
# write functions to convert time and inverval
def date_to_milliseconds(date_str):
epoch = datetime.utcfromtimestamp(0).replace(tzinfo=pytz.utc)
d = dateparser.parse(date_str)
if d.tzinfo is None or d.tzinfo.utcoffset(d) is None:
d = d.replace(tzinfo=pytz.utc)
return int((d - epoch).total_seconds() * 1000.0)
def interval_to_milliseconds(interval):
ms = None
seconds_per_unit = {
"m": 60,
"h": 60 * 60,
"d": 24 * 60 * 60,
"w": 7 * 24 * 60 * 60
}
unit = interval[-1]
if unit in seconds_per_unit:
try:
ms = int(interval[:-1]) * seconds_per_unit[unit] * 1000
except ValueError:
pass
return ms
# write function to get candle/klines data
def get_historical_klines(symbol, interval, start_str, end_str=None):
client = Client("", "")
output_data = []
limit = 500
timeframe = interval_to_milliseconds(interval)
start_ts = date_to_milliseconds(start_str)
end_ts = None
if end_str:
end_ts = date_to_milliseconds(end_str)
idx = 0
symbol_existed = False
while True:
temp_data = client.get_klines(
symbol=symbol,
interval=interval,
limit=limit,
startTime=start_ts,
endTime=end_ts
)
if not symbol_existed and len(temp_data):
symbol_existed = True
if symbol_existed:
output_data = output_data + temp_data
start_ts = temp_data[len(temp_data) - 1][0] + timeframe
else:
start_ts = start_ts + timeframe
idx = idx + 1
if len(temp_data) < limit:
break
if idx % 3 == 0:
time.sleep(1)
return output_data
# write function to format candle/klines data
def format_klines(df):
formatted_klines = pd.DataFrame(df, columns=['Open time', 'Open', 'High', 'Low', 'Close', 'Volume',
'Close time', 'Quote asset volume', 'Number of trades', 'Taker buy base asset volume',
'Taker buy quote asset volume', 'Ignore'], index=None)
formatted_klines = formatted_klines.drop(['Ignore'], axis=1)
formatted_klines['Open time'] = pd.to_datetime(formatted_klines['Open time'], unit='ms')
formatted_klines['Close time'] =
|
pd.to_datetime(formatted_klines['Close time'], unit='ms')
|
pandas.to_datetime
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 3 17:09:00 2020
@author: krishna
"""
#----------Here I had applied the algorithis which needs scaling with 81 and 20 features-------------------
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data=pd.read_csv('Phishing.csv')
column_names=list(data.columns)
data['URL_Type_obf_Type'].value_counts()
#creating a category of malicious and non-malicious
# data['category']='malicious'
# data['category'][7930:15711]='non-malicious'
# data['category'].value_counts()
#shuffling the dataframe
shuffled_dataset=data.sample(frac=1).reset_index(drop=True)
#dropping the categorical value
# categorical_data=shuffled_dataset[['URL_Type_obf_Type','category']]
# data1=shuffled_dataset.drop(['URL_Type_obf_Type','category'],axis=1)
#checking for na and inf values
shuffled_dataset.replace([np.inf,-np.inf],np.nan,inplace=True) #handling the infinite value
shuffled_dataset.fillna(shuffled_dataset.mean(),inplace=True) #handling the na value
#checking if any value in data1 now contains infinite and null value or not
null_result=shuffled_dataset.isnull().any(axis=0)
inf_result=shuffled_dataset is np.inf
#scaling the dataset with standard scaler
shuffled_x=shuffled_dataset.drop(['URL_Type_obf_Type'],axis=1)
shuffled_y=shuffled_dataset[['URL_Type_obf_Type']]
from sklearn.preprocessing import StandardScaler
sc_x=StandardScaler()
shuffled_dataset_scaled=sc_x.fit_transform(shuffled_x)
shuffled_dataset_scaled=pd.DataFrame(shuffled_dataset_scaled)
shuffled_dataset_scaled.columns=shuffled_x.columns
dataset_final=pd.concat([shuffled_dataset_scaled,shuffled_y],axis=1)
dataset_final.drop(['ISIpAddressInDomainName'],inplace=True,axis=1) #dropping this column since it always contain zero
#Preparing the dataset with the reduced features of K-Best
# reduced_features=['SymbolCount_Domain','domain_token_count','tld','Entropy_Afterpath','NumberRate_AfterPath','ArgUrlRatio','domainUrlRatio','URLQueries_variable','SymbolCount_FileName','delimeter_Count','argPathRatio','delimeter_path','pathurlRatio','SymbolCount_Extension','SymbolCount_URL','NumberofDotsinURL','Arguments_LongestWordLength','SymbolCount_Afterpath','CharacterContinuityRate','domainlength']
# reduced_features.append('URL_Type_obf_Type')
# reduced_features.append('category')
# shuffled_dataset1=shuffled_dataset[reduced_features]
#Applying the top 30 features
phising_columns=[]
dataset_final=dataset_final[list]
#splitting the dataset into train set and test set
from sklearn.model_selection import train_test_split
train_set,test_set=train_test_split(dataset_final,test_size=0.2,random_state=42)
#sorting the train_set and test set
pd.DataFrame.sort_index(train_set,axis=0,ascending=True,inplace=True)
pd.DataFrame.sort_index(test_set,axis=0,ascending=True,inplace=True)
#splitting further ito train_x,train_y,test_x,test_x ----Multiclass classification-----
train_y=train_set['URL_Type_obf_Type'] #train data for binary classification
train_y_binary=train_set['category']
train_x=train_set.drop(['URL_Type_obf_Type','category'],axis=1,inplace=True)
train_x=train_set
test_y=test_set['URL_Type_obf_Type']
test_y_binary=test_set['category'] #test data for binary classsification
test_x=test_set.drop(['URL_Type_obf_Type','category'],axis=1,inplace=True)
test_x=test_set
#Encoding the categorical variables
#for SVM classification
train_y_svm=train_y
test_y_svm=test_y
#for other types of classification
train_y=
|
pd.get_dummies(train_y)
|
pandas.get_dummies
|
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from MyAIGuide.utilities.dataFrameUtilities import (
subset_period,
insert_data_to_tracker_mean_steps,
adjust_var_and_place_in_data,
insert_rolling_mean_columns,
insert_relative_values_columns
)
def create_test_dataframe(start_date, num_periods):
"""This creates a dummy dataframe for testing. It has date index
starting at given start date with a number of periods.
Params:
start_date: initial date for the index
num_periods: number of index values
"""
i = pd.date_range(start_date, periods=num_periods, freq='1D')
sLength = len(i)
empty = pd.Series(np.zeros(sLength)).values
d = {
'col1': empty + 1,
'col2': empty + 3,
'tracker_mean_steps': empty
}
return pd.DataFrame(data=d, index=i)
def test_subset_period():
# create empty (full of 0s) test dataframe
test_data = create_test_dataframe('2020-07-01', 4)
# only 1 day
period1 = ('2020-07-01', '2020-07-01')
# usual period of more than 1 day
period2 = ('2020-07-01', '2020-07-02')
# wrong period with start_date > end_date
period3 = ('2020-07-01', '2020-06-30')
# generate expected dataframes
expected_data1 = create_test_dataframe('2020-07-01', 1)
expected_data2 = create_test_dataframe('2020-07-01', 2)
expected_data3 = create_test_dataframe('2020-07-01', 0)
# run the function with the test data
result1 = subset_period(test_data, period1[0], period1[1])
result2 = subset_period(test_data, period2[0], period2[1])
# attention, function does not raise warning when start_date > end_date
result3 = subset_period(test_data, period3[0], period3[1])
# compare results and expected dataframes
assert_frame_equal(result1, expected_data1)
assert_frame_equal(result2, expected_data2)
assert_frame_equal(result3, expected_data3)
def test_insert_data_to_tracker_mean_steps():
# create empty (full of 0s) test dataframe
test_data = create_test_dataframe('2020-07-01', 4)
# only 1 day
period1 = ('2020-07-01', '2020-07-01')
# usual period of more than 1 day
period2 = ('2020-07-01', '2020-07-02')
# wrong period with start_date > end_date
period3 = ('2020-07-01', '2020-06-30')
# generate expected dataframes
expected_data1 = create_test_dataframe('2020-07-01', 4)
expected_data1['tracker_mean_steps'] = [1.0, 0.0, 0.0, 0.0]
expected_data2 = create_test_dataframe('2020-07-01', 4)
expected_data2['tracker_mean_steps'] = [1.0, 1.0, 0.0, 0.0]
expected_data3 = create_test_dataframe('2020-07-01', 4)
# run the function with the test data
result1 = insert_data_to_tracker_mean_steps(period1, test_data, 'col1', 'tracker_mean_steps')
result2 = insert_data_to_tracker_mean_steps(period2, test_data, 'col1', 'tracker_mean_steps')
# attention, function does not raise warning when start_date > end_date
result3 = insert_data_to_tracker_mean_steps(period3, test_data, 'col1', 'tracker_mean_steps')
# compare results and expected dataframes
assert_frame_equal(result1, expected_data1)
assert_frame_equal(result2, expected_data2)
|
assert_frame_equal(result3, expected_data3)
|
pandas.testing.assert_frame_equal
|
from __future__ import print_function
import os
import stat
from errno import ENOENT, EIO
from fuse import Operations, FuseOSError
import threading
import time
import pandas as pd
from fuse import FUSE
def str_to_time(s):
t =
|
pd.to_datetime(s)
|
pandas.to_datetime
|
"""Tests for the sdv.constraints.tabular module."""
import pandas as pd
from sdv.constraints.tabular import (
ColumnFormula, CustomConstraint, GreaterThan, UniqueCombinations)
def dummy_transform():
pass
def dummy_reverse_transform():
pass
def dummy_is_valid():
pass
class TestCustomConstraint():
def test___init__(self):
"""Test the ``CustomConstraint.__init__`` method.
The ``transform``, ``reverse_transform`` and ``is_valid`` methods
should be replaced by the given ones, importing them if necessary.
Setup:
- Create dummy functions (created above this class).
Input:
- dummy transform and revert_transform + is_valid FQN
Output:
- Instance with all the methods replaced by the dummy versions.
"""
is_valid_fqn = __name__ + '.dummy_is_valid'
# Run
instance = CustomConstraint(
transform=dummy_transform,
reverse_transform=dummy_reverse_transform,
is_valid=is_valid_fqn
)
# Assert
assert instance.transform == dummy_transform
assert instance.reverse_transform == dummy_reverse_transform
assert instance.is_valid == dummy_is_valid
class TestUniqueCombinations():
def test___init__(self):
"""Test the ``UniqueCombinations.__init__`` method.
It is expected to create a new Constraint instance and receiving the names of
the columns that need to produce unique combinations.
Side effects:
- instance._colums == columns
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns)
# Assert
assert instance._columns == columns
def test__valid_separator_valid(self):
"""Test ``_valid_separator`` for a valid separator.
If the separator and data are valid, result is ``True``.
Input:
- Table data (pandas.DataFrame)
Output:
- True (bool).
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance._separator = '#'
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
is_valid = instance._valid_separator(table_data)
# Assert
assert is_valid
def test__valid_separator_non_valid_separator_contained(self):
"""Test ``_valid_separator`` passing a column that contains the separator.
If any of the columns contains the separator string, result is ``False``.
Input:
- Table data (pandas.DataFrame) with a column that contains the separator string ('#')
Output:
- False (bool).
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance._separator = '#'
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', '#', 'f'],
'c': ['g', 'h', 'i']
})
is_valid = instance._valid_separator(table_data)
# Assert
assert not is_valid
def test__valid_separator_non_valid_name_joined_exists(self):
"""Test ``_valid_separator`` passing a column whose name is obtained after joining
the column names using the separator.
If the column name obtained after joining the column names using the separator
already exists, result is ``False``.
Input:
- Table data (pandas.DataFrame) with a column name that will be obtained by joining
the column names and the separator.
Output:
- False (bool).
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance._separator = '#'
# Run
table_data = pd.DataFrame({
'b#c': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
is_valid = instance._valid_separator(table_data)
# Assert
assert not is_valid
def test_fit(self):
"""Test the ``UniqueCombinations.fit`` method.
The ``UniqueCombinations.fit`` method is expected to:
- Call ``UniqueCombinations._valid_separator``.
- Find a valid separator for the data and generate the joint column name.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
instance.fit(table_data)
# Asserts
expected_combinations = set(table_data[columns].itertuples(index=False))
assert instance._separator == '#'
assert instance._joint_column == 'b#c'
assert instance._combinations == expected_combinations
def test_is_valid_true(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_false(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['D', 'E', 'F'],
'c': ['g', 'h', 'i']
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``UniqueCombinations.transform`` method.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns concatenated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b#c': ['d#g', 'e#h', 'f#i']
})
pd.testing.assert_frame_equal(expected_out, out)
def reverse_transform(self):
"""Test the ``UniqueCombinations.reverse_transform`` method.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
transformed_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b#c': ['d#g', 'e#h', 'f#i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(transformed_data)
# Run
out = instance.reverse_transform(transformed_data)
# Assert
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
pd.testing.assert_frame_equal(expected_out, out)
class TestGreaterThan():
def test___init___strict_false(self):
"""Test the ``GreaterThan.__init__`` method.
It is expected to create a new Constraint instance and receiving ``low`` and ``high``,
names of the columns that contain the low and high value.
Input:
- low = 'a'
- high = 'b'
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._strict == False
"""
# Run
instance = GreaterThan(low='a', high='b')
# Asserts
assert instance._low == 'a'
assert instance._high == 'b'
assert instance._strict is False
def test___init___strict_true(self):
"""Test the ``GreaterThan.__init__`` method.
It is expected to create a new Constraint instance and receiving ``low`` and ``high``,
names of the columns that contain the low and high value. It also receives ``strict``,
a bool that indicates the comparison of the values should be strict.
Input:
- low = 'a'
- high = 'b'
- strict = True
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._stric == True
"""
# Run
instance = GreaterThan(low='a', high='b', strict=True)
# Asserts
assert instance._low == 'a'
assert instance._high == 'b'
assert instance._strict is True
def test_fit(self):
"""Test the ``GreaterThan.fit`` method.
It is expected to return the dtype of the ``high`` column.
Input:
- Table data (pandas.DataFrame)
Output:
- dtype of the ``high`` column.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
expected = table_data['b'].dtype
assert instance._dtype == expected
def test_is_valid_true_strict(self):
"""Test the ``GreaterThan.is_valid`` method when the column values are valid
and the comparison is strict.
If the columns satisfy the constraint, result is a series of ``True`` values.
Input:
- Table data, where the values of the ``low`` column are lower
than the values of the ``high`` column (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_false_strict(self):
"""Test the ``GreaterThan.is_valid`` method when the column values are not valid
and the comparison is strict.
If the columns do not satisfy the costraint, result is a series of ``False`` values.
Input:
- Table data, where the values of the ``low`` column are higher or equal
than the values of the ``high`` column (pandas.DataFrame)
Output:
- Series of ``False`` values (pandas.Series)
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [1, 1, 1],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([False, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_true_not_strict(self):
"""Test the ``GreaterThan.is_valid`` method when the column values are valid
and the comparison is not strict.
If the columns satisfy the constraint, result is a series of ``True`` values.
Input:
- Table data, where the values of the ``low`` column are lower or equal
than the values of the ``high`` column (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 3],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_false_not_strict(self):
"""Test the ``GreaterThan.is_valid`` method when the column values are not valid
and the comparison is not strict.
If the columns do not satisfy the costraint, result is a series of ``False`` values.
Input:
- Table data, where the values of the ``low`` column are higher
than the values of the ``high`` column (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [0, 1, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([False, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``GreaterThan.transform`` method.
The ``GreaterThan.transform`` method is expected to:
- Transform the original table data.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed (pandas.DataFrame)
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [1.3862944, 1.3862944, 1.3862944]
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform(self):
"""Test the ``GreaterThan.reverse_transform`` method.
The ``GreaterThan.reverse_transform`` method is expected to:
- Return the original table data.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Table data (pandas.DataFrame)
Side effects:
- Since ``reverse_transform`` uses the class variable ``_dtype``, the ``fit`` method
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance = GreaterThan(low='a', high='b', strict=True)
instance.fit(table_data)
# Run
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [55, 149, 405],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def new_column(data):
"""Formula to be used for the ``TestColumnFormula`` class."""
return data['a'] + data['b']
class TestColumnFormula():
def test___init__(self):
"""Test the ``ColumnFormula.__init__`` method.
It is expected to create a new Constraint instance
and import the formula to use for the computation.
Input:
- column = 'c'
- formula = new_column
"""
# Setup
column = 'c'
# Run
instance = ColumnFormula(column=column, formula=new_column)
# Assert
assert instance._column == column
assert instance._formula == new_column
def test_is_valid_valid(self):
"""Test the ``ColumnFormula.is_valid`` method for a valid data.
If the data fulfills the formula, result is a series of ``True`` values.
Input:
- Table data fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_valid(self):
"""Test the ``ColumnFormula.is_valid`` method for a non-valid data.
If the data does not fulfill the formula, result is a series of ``False`` values.
Input:
- Table data not fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``False`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [1, 2, 3]
})
instance = ColumnFormula(column=column, formula=new_column)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([False, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``ColumnFormula.transform`` method.
It is expected to drop the indicated column from the table.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data without the indicated column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
})
|
pd.testing.assert_frame_equal(expected_out, out)
|
pandas.testing.assert_frame_equal
|
# -*- coding: utf-8 -*-
from flask import Flask, render_template, request, redirect, url_for
import numpy as np
import pandas as pd
import numpy as np
import lightgbm as lgb
import os
import pickle
from sklearn import metrics
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier as RandomForest
from sklearn.model_selection import GridSearchCV
app = Flask(__name__)
def pred(n) :
name = n.split(',')
## CSV読み込み
train = pd.read_csv("data/train.csv")
ARR1 = []
for i in range(1,13,2) :
for j in range(1,16,1) :
ARR1.append([1,name[0],name[2], 2019, i, j])
test1 = pd.DataFrame(ARR1, columns=['w_judge','w_name','e_name','year','month','day'])
### 欠損値の削除
train = train.dropna()
test1 = test1.dropna()
train = train.drop(columns=['e_judge'])
train = train.drop(columns=['ruler'])
train = train.drop(columns=['w_rank'])
train = train.drop(columns=['e_rank'])
# データセットを結合
train1 = pd.concat([train,test1], ignore_index=True)
### Category Encorder
for column in ['w_judge']:
le = LabelEncoder()
le.fit(train1[column])
train1[column] = le.transform(train1[column])
le.fit(test1[column])
test1[column] = le.transform(test1[column])
### OneHot Encording
oh_w_class =
|
pd.get_dummies(train1.w_name)
|
pandas.get_dummies
|
import pprint
import numpy as np
import pandas as pd
from features import mfcc_features, logfbank_features, zcr_features, ssc_features
from utils import get_fname_label_pairs
def generate_feature_mat(folder="a", train=True):
"""
Generate feature matrix
"""
training_df = get_fname_label_pairs(folder=folder, train=train)
features_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
Series,
date_range,
)
import pandas._testing as tm
from pandas.core.api import Int64Index
class TestDataFrameTruncate:
def test_truncate(self, datetime_frame, frame_or_series):
ts = datetime_frame[::3]
if frame_or_series is Series:
ts = ts.iloc[:, 0]
start, end = datetime_frame.index[3], datetime_frame.index[6]
start_missing = datetime_frame.index[2]
end_missing = datetime_frame.index[7]
# neither specified
truncated = ts.truncate()
tm.assert_equal(truncated, ts)
# both specified
expected = ts[1:3]
truncated = ts.truncate(start, end)
tm.assert_equal(truncated, expected)
truncated = ts.truncate(start_missing, end_missing)
tm.assert_equal(truncated, expected)
# start specified
expected = ts[1:]
truncated = ts.truncate(before=start)
tm.assert_equal(truncated, expected)
truncated = ts.truncate(before=start_missing)
tm.assert_equal(truncated, expected)
# end specified
expected = ts[:3]
truncated = ts.truncate(after=end)
|
tm.assert_equal(truncated, expected)
|
pandas._testing.assert_equal
|
# -*- coding: utf-8 -*-
import requests as req
import pandas as pd
#import json
import csv
import os
import matplotlib.pyplot as plt
from pandas.tools.plotting import scatter_matrix
from common import globals as glob
from common import utils
from . import wb_check_quality as cq
def get_WDI_CSV_FILE_NAME(year):
csv_file_name = glob.WDI_CSV_FILE
csv_file_name = csv_file_name.replace('__YEAR__', year)
return csv_file_name
def get_wdi_name_list():
#first read the indicators which need to be retrieved. These have been selected offline
#and stored in a file called WDI_Series.csv. Refer to
#https://datahelpdesk.worldbank.org/knowledgebase/topics/125589-developer-information.
#The API end point for all the World Development Indicators (WDI) is
#http://api.worldbank.org/indicators?format=json.
count = 0
#create a list of WDI indicators and add each indicator parsed from the file to this list
wdi_names = []
with open(glob.WDI_FILE_NAME) as csvfile:
wdi_list = csv.reader(csvfile)
for row in wdi_list:
wdi_names.append(row[0])
count += 1
glob.log.info('total number of indicators %d' %(count))
return wdi_names
def get_wdi_data(wdi, wdi_data, year):
#example URL http://api.worldbank.org/countries/ALL/indicators/IC.REG.DURS?date=2015&format=json&per_page=10000
#the WB_API_ENDPOINT has a token called __YEAR__ which needs to be replaced with the exact year
api = glob.WB_API_ENDPOINT + wdi + glob.WB_API_SUFFIX
api = api.replace('__YEAR__', year)
r = req.get(api)
#if the return code is not 200 ok then its an errors
if r.ok != True:
glob.log.error('Error while retrieving information about WDI %s, server sent status code %d' %(wdi, r.status_code))
glob.log.error('Here is everything that was sent by the server...')
glob.log.error(r.text)
else:
#looks like we got the response
glob.log.info('successfully received a response from the WB API endpoint ' + api)
#parse out the response. The response is a json array by country
#we want to get the {country, value} tuple and store it in the input dict
# the format is such that data if intrest starts from the second element in the json
# see response to example URL mentioned above
resp = r.json()[1]
num_elems = len(resp)
for i in range(num_elems):
elem = resp[i]
id = elem['country']['id']
if id not in wdi_data.keys():
wdi_data[id] = {}
wdi_data[id]['name'] = elem['country']['value']
#check if the value is valid or null, if null then put a np.Nan
if
|
pd.notnull(elem['value'])
|
pandas.notnull
|
import pandas as pd
x = pd.Series([1, 3, 5, 7, 9])
# print(pd.__version__)
# print()
# print(x)
mp = {"Bir": 1, "İki": 2, "Üç": 3, "Dört": 4}
y =
|
pd.Series(mp)
|
pandas.Series
|
"""This module contains tests for recoding"""
from unittest import TestCase
import datetime
import pandas as pd
from kernel.recoding import recode, recode_dates, recode_ordinal, recode_nominal, recode_range
from kernel.util import reduce_string
class TestStringGeneralization(TestCase):
"""Class containing tests for string generalization"""
def test_generalization(self):
postcode = 'NE9 5YE'
generalized = reduce_string(postcode)
self.assertNotEqual(postcode, generalized)
def test_single_step_generalization(self):
postcode_1 = 'HP2 7PW'
postcode_2 = 'HP2 7PF'
generalized_1 = reduce_string(postcode_1)
generalized_2 = reduce_string(postcode_2)
self.assertNotEqual(postcode_1, postcode_2)
self.assertEqual(generalized_1, generalized_2)
def test_multistep_generalization(self):
postcode_1 = 'HP2 7PW'
postcode_2 = 'HP2 4DY'
number_of_generalization_steps = 0
while(postcode_1 != postcode_2):
if (len(postcode_1) > len(postcode_2)):
postcode_1 = reduce_string(postcode_1)
else:
postcode_2 = reduce_string(postcode_2)
number_of_generalization_steps = number_of_generalization_steps + 1
self.assertEqual(postcode_1, postcode_2)
self.assertEqual(number_of_generalization_steps, 6)
def test_total_generalization(self):
postcode_1 = 'HP2 7PW'
postcode_2 = 'CF470JD'
number_of_generalization_steps = 0
while(postcode_1 != postcode_2):
if (len(postcode_1) > len(postcode_2)):
postcode_1 = reduce_string(postcode_1)
else:
postcode_2 = reduce_string(postcode_2)
number_of_generalization_steps = number_of_generalization_steps + 1
self.assertEqual(postcode_1, postcode_2)
self.assertEqual(number_of_generalization_steps, 14)
self.assertEqual(postcode_1, '*')
class TestRangeGeneralization(TestCase):
"""Class containing tests for range generalization"""
def test_range_of_ints_generalization(self):
numbers = [2, 5, 27, 12, 3]
generalized = recode_range(pd.Series(numbers))
self.assertIsInstance(generalized, range)
self.assertEqual(generalized, range(2, 28))
def test_range_of_floats_generalization(self):
numbers = [8.7, 4.12, 27.3, 18]
generalized = recode_range(pd.Series(numbers))
self.assertIsInstance(generalized, range)
self.assertEqual(generalized, range(4, 29))
class TestDateGeneralization(TestCase):
"""Class containing tests for date generalization"""
def test_time_generalization(self):
date_1 = datetime.datetime(2020, 9, 28, 12, 32, 00)
date_2 = datetime.datetime(2020, 9, 28, 15, 27, 48)
series = pd.Series([date_1, date_2])
generalized = recode_dates(series)
self.assertEqual(generalized, datetime.datetime(2020, 9, 28))
def test_day_generalization(self):
date_1 = datetime.datetime(2020, 9, 27, 12, 32, 00)
date_2 = datetime.datetime(2020, 9, 28, 15, 27, 48)
series = pd.Series([date_1, date_2])
generalized = recode_dates(series)
self.assertEqual(generalized.to_timestamp(), datetime.datetime(2020, 9, 1))
def test_month_generalization(self):
date_1 = datetime.datetime(2020, 10, 27, 12, 32, 00)
date_2 = datetime.datetime(2020, 9, 28, 15, 27, 48)
series = pd.Series([date_1, date_2])
generalized = recode_dates(series)
self.assertEqual(generalized.to_timestamp(), datetime.datetime(2020, 1, 1))
def test_year_generalization(self):
date_1 = datetime.datetime(2021, 10, 27, 12, 32, 00)
date_2 = datetime.datetime(2020, 9, 28, 15, 27, 48)
series = pd.Series([date_1, date_2])
generalized = recode_dates(series)
self.assertEqual(generalized, range(2020, 2022))
class TestOrdinalGeneralization(TestCase):
"""Class containing tests for ordinal generalization"""
def test_ordinal_generalization_raises_exception(self):
categories = ['A', 'B', 'C']
values = ['A', 'A', 'A']
series = pd.Series(pd.Categorical(values, categories, ordered=False))
self.assertRaises(Exception, recode_ordinal, series)
def test_ordinal_generalization_with_single_category(self):
categories = ['A', 'B', 'C']
values = ['A', 'A', 'A']
series = pd.Series(pd.Categorical(values, categories, ordered=True))
generalized = recode_ordinal(series)
self.assertEqual(generalized, 'A')
def test_ordinal_generalization_with_multiple_categories(self):
categories = set(['A', 'B', 'C'])
values = ['B', 'A', 'B', 'C', 'A']
series = pd.Series(
|
pd.Categorical(values, categories, ordered=True)
|
pandas.Categorical
|
#%%
import pandas as pd
import prot.stats
from tqdm import tqdm
# Load the dataset (s)
condition_data = pd.read_csv('../../data/schmidt2016_longform.csv')
genes =
|
pd.read_csv('../../data/schmidt2016_genes_processes.csv')
|
pandas.read_csv
|
import base64
import datetime
import io
import os
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from xlrd.xldate import xldate_as_datetime
from yattag import Doc
plt.rcParams.update({"figure.autolayout": True})
import matplotlib.gridspec as gridspec
import pandas as pd
import scipy.stats
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
import logging
"""
TF_CPP_MIN_LOG_LEVEL:
Defaults to 0, so all logs are shown. Set TF_CPP_MIN_LOG_LEVEL to 1 to filter out INFO logs, 2 to additionally filter out WARNING, 3 to additionally filter out ERROR.
"""
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "1"
from tensorflow import keras
class NNetwork(object):
def __init__(self, network_count=200, epochs=1000):
logging.getLogger().setLevel(logging.INFO)
self.xl_dateformat = r"%Y-%m-%dT%H:%M"
self.model = None
self.pretrained_networks = []
self.software_version = "2.0.1"
self.input_filename = None
self.today = str(datetime.date.today())
self.avg_time_elapsed = 0
self.predictors_scaler = MinMaxScaler(feature_range=(-1, 1))
self.targets_scaler = MinMaxScaler(feature_range=(-1, 1))
self.history = None
self.file = None
self.skipped_rows = []
self.ruleset = []
self.layer1_neurons = 12
self.network_count = network_count
self.epochs = epochs
self.predictors = None
self.targets = None
self.predictions = None
self.avg_case_results_am = None
self.avg_case_results_pm = None
self.worst_case_results_am = None
self.worst_case_results_pm = None
self.WB_bandwidth = None
self.post_process_check = False # Is post-processed better than raw. If False, uses raw results, if true, uses post-processed results
self.optimizer = keras.optimizers.Nadam(lr=0.01, beta_1=0.9, beta_2=0.999)
self.model = keras.models.Sequential()
self.model.add(
keras.layers.Dense(self.layer1_neurons, input_dim=5, activation="tanh")
)
self.model.add(keras.layers.Dense(1, activation="linear"))
self.model.compile(loss="mse", optimizer=self.optimizer, metrics=["mse"])
def import_data_from_csv(self, filename):
"""
Imports data to the network by a comma-separated values (CSV) file.
Load data to a network that are stored in .csv file format.
The data loaded from this method can be used both for training reasons as
well as to make predictions.
:param filename: String containing the filename of the .csv file containing the input data (e.g "input_data.csv")
"""
df = pd.read_csv(filename)
self.file = df.copy()
global FRC_IN
global FRC_OUT
global WATTEMP
global COND
# Locate the fields used as inputs/predictors and outputs in the loaded file
# and split them
if "se1_frc" in self.file.columns:
FRC_IN = "se1_frc"
WATTEMP = "se1_wattemp"
COND = "se1_cond"
FRC_OUT = "se4_frc"
elif "ts_frc1" in self.file.columns:
FRC_IN = "ts_frc1"
WATTEMP = "ts_wattemp"
COND = "ts_cond"
FRC_OUT = "hh_frc1"
elif "ts_frc" in self.file.columns:
FRC_IN = "ts_frc"
WATTEMP = "ts_wattemp"
COND = "ts_cond"
FRC_OUT = "hh_frc"
# Standardize the DataFrame by specifying rules
# To add a new rule, call the method execute_rule with the parameters (description, affected_column, query)
self.execute_rule("Invalid tapstand FRC", FRC_IN, self.file[FRC_IN].isnull())
self.execute_rule("Invalid household FRC", FRC_OUT, self.file[FRC_OUT].isnull())
self.execute_rule(
"Invalid tapstand date/time",
"ts_datetime",
self.valid_dates(self.file["ts_datetime"]),
)
self.execute_rule(
"Invalid household date/time",
"hh_datetime",
self.valid_dates(self.file["hh_datetime"]),
)
self.skipped_rows = df.loc[df.index.difference(self.file.index)]
self.file.reset_index(drop=True, inplace=True) # fix dropped indices in pandas
# Locate the rows of the missing data
drop_threshold = 0.90 * len(self.file.loc[:, [FRC_IN]])
nan_rows_watt = self.file.loc[self.file[WATTEMP].isnull()]
if len(nan_rows_watt) < drop_threshold:
self.execute_rule(
"Missing Water Temperature Measurement",
WATTEMP,
self.file[WATTEMP].isnull(),
)
nan_rows_cond = self.file.loc[self.file[COND].isnull()]
if len(nan_rows_cond) < drop_threshold:
self.execute_rule("Missing EC Measurement", COND, self.file[COND].isnull())
self.skipped_rows = df.loc[df.index.difference(self.file.index)]
self.file.reset_index(drop=True, inplace=True)
start_date = self.file["ts_datetime"]
end_date = self.file["hh_datetime"]
durations = []
all_dates = []
collection_time = []
for i in range(len(start_date)):
try:
# excel type
start = float(start_date[i])
end = float(end_date[i])
start = xldate_as_datetime(start, datemode=0)
if start.hour > 12:
collection_time = np.append(collection_time, 1)
else:
collection_time = np.append(collection_time, 0)
end = xldate_as_datetime(end, datemode=0)
except ValueError:
# kobo type
start = start_date[i][:16].replace("/", "-")
end = end_date[i][:16].replace("/", "-")
start = datetime.datetime.strptime(start, self.xl_dateformat)
if start.hour > 12:
collection_time = np.append(collection_time, 1)
else:
collection_time = np.append(collection_time, 0)
end = datetime.datetime.strptime(end, self.xl_dateformat)
durations.append((end - start).total_seconds())
all_dates.append(datetime.datetime.strftime(start, self.xl_dateformat))
self.durations = durations
self.time_of_collection = collection_time
self.avg_time_elapsed = np.mean(durations)
# Extract the column of dates for all data and put them in YYYY-MM-DD format
self.file["formatted_date"] = all_dates
predictors = {
FRC_IN: self.file[FRC_IN],
"elapsed time": (np.array(self.durations) / 3600),
"time of collection (0=AM, 1=PM)": self.time_of_collection,
}
self.targets = self.file.loc[:, FRC_OUT]
self.var_names = [
"Tapstand FRC (mg/L)",
"Elapsed Time",
"time of collection (0=AM, 1=PM)",
]
self.predictors =
|
pd.DataFrame(predictors)
|
pandas.DataFrame
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2021/12/2 11:22
Desc: 新浪财经-债券-沪深可转债-实时行情数据和历史行情数据
http://vip.stock.finance.sina.com.cn/mkt/#hskzz_z
"""
import datetime
import json
import re
import pandas as pd
import requests
from bs4 import BeautifulSoup
from py_mini_racer import py_mini_racer
from tqdm import tqdm
from akshare.bond.cons import (
zh_sina_bond_hs_cov_count_url,
zh_sina_bond_hs_cov_payload,
zh_sina_bond_hs_cov_url,
zh_sina_bond_hs_cov_hist_url,
)
from akshare.stock.cons import hk_js_decode
from akshare.utils import demjson
def _get_zh_bond_hs_cov_page_count() -> int:
"""
新浪财经-行情中心-债券-沪深可转债的总页数
http://vip.stock.finance.sina.com.cn/mkt/#hskzz_z
:return: 总页数
:rtype: int
"""
params = {
"node": "hskzz_z",
}
r = requests.get(zh_sina_bond_hs_cov_count_url, params=params)
page_count = int(re.findall(re.compile(r"\d+"), r.text)[0]) / 80
if isinstance(page_count, int):
return page_count
else:
return int(page_count) + 1
def bond_zh_hs_cov_spot() -> pd.DataFrame:
"""
新浪财经-债券-沪深可转债的实时行情数据; 大量抓取容易封IP
http://vip.stock.finance.sina.com.cn/mkt/#hskzz_z
:return: 所有沪深可转债在当前时刻的实时行情数据
:rtype: pandas.DataFrame
"""
big_df = pd.DataFrame()
page_count = _get_zh_bond_hs_cov_page_count()
zh_sina_bond_hs_payload_copy = zh_sina_bond_hs_cov_payload.copy()
for page in tqdm(range(1, page_count + 1), leave=False):
zh_sina_bond_hs_payload_copy.update({"page": page})
res = requests.get(zh_sina_bond_hs_cov_url, params=zh_sina_bond_hs_payload_copy)
data_json = demjson.decode(res.text)
big_df = big_df.append(pd.DataFrame(data_json), ignore_index=True)
return big_df
def bond_zh_hs_cov_daily(symbol: str = "sz123111") -> pd.DataFrame:
"""
新浪财经-债券-沪深可转债的历史行情数据, 大量抓取容易封 IP
http://vip.stock.finance.sina.com.cn/mkt/#hskzz_z
:param symbol: 沪深可转债代码; e.g., sh010107
:type symbol: str
:return: 指定沪深可转债代码的日 K 线数据
:rtype: pandas.DataFrame
"""
r = requests.get(
zh_sina_bond_hs_cov_hist_url.format(
symbol, datetime.datetime.now().strftime("%Y_%m_%d")
)
)
js_code = py_mini_racer.MiniRacer()
js_code.eval(hk_js_decode)
dict_list = js_code.call(
"d", r.text.split("=")[1].split(";")[0].replace('"', "")
) # 执行js解密代码
data_df = pd.DataFrame(dict_list)
data_df['date'] = pd.to_datetime(data_df["date"]).dt.date
return data_df
def _code_id_map() -> dict:
"""
东方财富-股票和市场代码
http://quote.eastmoney.com/center/gridlist.html#hs_a_board
:return: 股票和市场代码
:rtype: dict
"""
url = "http://80.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:1 t:2,m:1 t:23",
"fields": "f12",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df["market_id"] = 1
temp_df.columns = ["sh_code", "sh_id"]
code_id_dict = dict(zip(temp_df["sh_code"], temp_df["sh_id"]))
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:6,m:0 t:80",
"fields": "f12",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df_sz = pd.DataFrame(data_json["data"]["diff"])
temp_df_sz["sz_id"] = 0
code_id_dict.update(dict(zip(temp_df_sz["f12"], temp_df_sz["sz_id"])))
return code_id_dict
def bond_zh_hs_cov_min(
symbol: str = "sh113570",
period: str = '15',
adjust: str = '',
start_date: str = "1979-09-01 09:32:00",
end_date: str = "2222-01-01 09:32:00",
) -> pd.DataFrame:
"""
东方财富网-可转债-分时行情
https://quote.eastmoney.com/concept/sz128039.html
:param symbol: 转债代码
:type symbol: str
:param period: choice of {'1', '5', '15', '30', '60'}
:type period: str
:param adjust: choice of {'', 'qfq', 'hfq'}
:type adjust: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:return: 分时行情
:rtype: pandas.DataFrame
"""
market_type = {'sh': '1', 'sz': '0'}
if period == '1':
url = 'https://push2.eastmoney.com/api/qt/stock/trends2/get'
params = {
"fields1": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58",
"ut": "fa5fd1943c7b386f172d6893dbfba10b",
"ndays": "5",
"iscr": "0",
'iscca': '0',
"secid": f"{market_type[symbol[:2]]}.{symbol[2:]}",
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]["trends"]])
temp_df.columns = [
"时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"最新价",
]
temp_df.index = pd.to_datetime(temp_df["时间"])
temp_df = temp_df[start_date:end_date]
temp_df.reset_index(drop=True, inplace=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df['时间'] = pd.to_datetime(temp_df['时间']).astype(str) # 带日期时间
return temp_df
else:
adjust_map = {
'': '0',
'qfq': '1',
'hfq': '2',
}
url = 'https://push2his.eastmoney.com/api/qt/stock/kline/get'
params = {
'fields1': 'f1,f2,f3,f4,f5,f6',
'fields2': 'f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61',
'ut': '7eea3edcaed734bea9cbfc24409ed989',
'klt': period,
'fqt': adjust_map[adjust],
'secid': f"{market_type[symbol[:2]]}.{symbol[2:]}",
'beg': '0',
'end': '20500000',
'_': '1630930917857',
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]["klines"]])
temp_df.columns = [
"时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df.index = pd.to_datetime(temp_df["时间"])
temp_df = temp_df[start_date:end_date]
temp_df.reset_index(drop=True, inplace=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"])
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"])
temp_df["涨跌额"] = pd.to_
|
numeric(temp_df["涨跌额"])
|
pandas.to_numeric
|
import warnings
from datetime import datetime, timedelta
import pandas as pd
import psycopg2
class MarketDataCleaner(object):
"""Get data from main_market table and preprocess it into pandas.Dataframe"""
def __init__(self):
# DB connection and cursor instances.
self.conn = psycopg2.connect()
def clean(self):
# Load all rows from the main_price.
market_df = self._get_df()
# Convert all the datetimes to UTC time zone.
market_df['date'] = pd.to_datetime(market_df['date'], utc=True)
# Add day and hour columns for better work with date.
market_df['daycol'] = market_df['date'].dt.date
market_df['hourcol'] = market_df['date'].dt.hour
# Remove data points which share the same date&hour.
print('Start removing data points with same date and hour')
ids_to_drop = []
grouped_by_dayhour = market_df.groupby(['daycol', 'hourcol'])
for _, df in grouped_by_dayhour:
if df.shape[0] != 1:
for value in df.index.values[1:]:
ids_to_drop.append(value)
market_df = market_df.drop(ids_to_drop)
# Check if there are Null values.
print('There are {0} NA values main_market'.format(
market_df.isnull().sum().sum()))
# Compare with real hourly data points - fill missing values.
cur_date = datetime.now()
finish_date = datetime(2016, 1, 1)
hour_timedelta = timedelta(hours=1)
while cur_date > finish_date:
filter_day = market_df['daycol'] == cur_date.date()
filter_hour = market_df['hourcol'] == cur_date.hour
if market_df[filter_day & filter_hour].empty:
print(
'Found empty value from market_data at {0}'.format(cur_date))
df_to_add_data = {
'date': [cur_date],
'globalmarketcap': [market_df[filter_day].mean()['globalmarketcap']],
'mchoursentiment': [market_df[filter_day].mean()['mchoursentiment']],
'mchourprediction': [market_df[filter_day].mean()['mchourprediction']],
'mchourtrend': [market_df[filter_day].mean()['mchourtrend']],
'globalvolume': [market_df[filter_day].mean()['globalvolume']],
'daycol': [cur_date.date()],
'hourcol': [cur_date.hour]
}
df_to_add = pd.DataFrame(df_to_add_data)
market_df.append(df_to_add, ignore_index=True)
cur_date -= hour_timedelta
# Return cleaned data.
return market_df
def _get_df(self):
select_query = """select * from main_market;"""
data_df =
|
pd.read_sql_query(select_query, self.conn, index_col='id')
|
pandas.read_sql_query
|
import argparse
import glob
import pandas as pd
import os
from deeppipeline.kvs import GlobalKVS
import torch.optim.lr_scheduler as lr_scheduler
def gen_image_id(fname, sample_id):
prj, slice_num = fname.split('/')[-1].split('.')[0].split('_')
return f'{sample_id}_{slice_num}_{prj}'
def init_metadata():
kvs = GlobalKVS()
imgs = glob.glob(os.path.join(kvs['args'].dataset, '*', 'imgs', '*.png'))
imgs.sort(key=lambda x: x.split('/')[-1])
masks = glob.glob(os.path.join(kvs['args'].dataset, '*', 'masks', '*.png'))
masks.sort(key=lambda x: x.split('/')[-1])
sample_id = list(map(lambda x: x.split('/')[-3], imgs))
subject_id = list(map(lambda x: x.split('/')[-3].split('_')[0], imgs))
metadata = pd.DataFrame(data={'img_fname': imgs, 'mask_fname': masks,
'sample_id': sample_id, 'subject_id': subject_id})
metadata['sample_subject_proj'] = metadata.apply(lambda x: gen_image_id(x.img_fname, x.sample_id), 1)
grades =
|
pd.read_csv(kvs['args'].grades)
|
pandas.read_csv
|
import numpy as np
import pandas as pd
import xarray as xr
import copy
import warnings
try:
from plotly import graph_objs as go
plotly_installed = True
except:
plotly_installed = False
# warnings.warn("PLOTLY not installed so interactive plots are not available. This may result in unexpected funtionality")
global_3d_mapper = np.repeat(0, 256 * 4).reshape(256, -1)
global_3d_mapper[ord('T'), :] = np.array([0, 0, 0, 1])
global_3d_mapper[ord('C'), :] = np.array([0, 1, 0, 0])
global_3d_mapper[ord('A'), :] = np.array([1, 0, 0, 0])
global_3d_mapper[ord('G'), :] = np.array([0, 0, 1, 0])
def compare_sequence_matrices(seq_arr1, seq_arr2, flip=False, treat_as_match=[], ignore_characters=[], return_num_bases=False):
"""
This will "align" seq_arr1 to seq_arr2. It will calculate which positions in each sequence defined by seq_arr1 matches each position in each sequence defined by seq_arr2
seq_arr1 = NxP matrix where N = # of sequences represented in seq_arr1 and P represents each base pair position/the length of the string
seq_arr2 = MxP matrix where M = # of sequences represented in seq_arr1 and P represents each base pair position/the length of the string
This operation will return a NxPxM boolean matrix where each position represents whether the base pair in sequence N and the base pair in sequence M represented at position P match
In other words, if bool_arr = compare_sequence_matrices(A, B) then the total hamming distance between the second and third sequence in matrices A and B respective can be found as
>>> bool_arr.sum(axis=1)[1][2]
Args:
seq_arr1 (np.array): MxP matrix of sequences represented as array of numbers
seq_arr2 (np.array): NxP matrix of sequences represented as array of numbers
flip (bool): If False then "true" means that letters are equal at specified positoin, If True then return positions that are NOT equal to one another
treat_as_match (list of chars): Treat any positions that have any of these letters in either matricies as True
ignore_characters (list of chars): Ignore positions that have letters in either matricies at specified positions
.. warning:: datatype
When ignore character is defined, the array is passed back as a np.float dtype because it must accomodate np.nan
return_num_bases (False): If true then it will return a second parameter that defines the number of non nan values between alignments
Returns: NxPxM array of boolean values
"""
assert seq_arr1.shape[1] == seq_arr2.shape[1], 'Matrices do not match!'
# use np.int8 because it ends upbeing faster
seq_arr1 = seq_arr1.view(np.uint8)
seq_arr2 = seq_arr2.view(np.uint8)
# this will return true of pos X in seqA and seqB are equal
diff_arr = (seq_arr1[..., np.newaxis].view(np.uint8) == seq_arr2.T[np.newaxis, ...])
# print(diff_arr.shape)
if treat_as_match:
# treat any of these letters at any positions as true regardles of whether they match in respective pairwise sequences
if not isinstance(treat_as_match, list):
treat_as_match = [treat_as_match]
treat_as_match = [ord(let) for let in treat_as_match]
# now we have to ignore characters that are equal to specific values
# return True for any positions that is equal to "treat_as_true"
ignore_pos = ((seq_arr1 == treat_as_match[0])[..., np.newaxis]) | ((seq_arr2 == treat_as_match[0])[..., np.newaxis].T)
for chr_p in treat_as_match[1:]:
ignore_pos = ignore_pos | ((seq_arr1 == chr_p)[..., np.newaxis]) | ((seq_arr2 == chr_p)[..., np.newaxis].T)
# now adjust boolean results to ignore any positions == treat_as_true
diff_arr = (diff_arr | ignore_pos) # if flip is False else (diffs | ignore_pos)
if flip is False:
diff_arr = diff_arr # (~(~diffarr))
else:
diff_arr = ~diff_arr # (~diffarr)
# print(diff_arr.shape)
if ignore_characters:
# do not treat these characters as true OR false
if not isinstance(ignore_characters, list):
ignore_characters = [ignore_characters]
ignore_characters = [ord(let) for let in ignore_characters]
# now we have to ignore characters that are equal to specific values
ignore_pos = (seq_arr1 == ignore_characters[0])[..., np.newaxis] | ((seq_arr2 == ignore_characters[0])[..., np.newaxis].T)
for chr_p in ignore_characters[1:]:
ignore_pos = ignore_pos | ((seq_arr1 == chr_p)[..., np.newaxis]) | ((seq_arr2 == chr_p)[..., np.newaxis]).T
diff_arr = diff_arr.astype(np.float)
diff_arr[ignore_pos] = np.nan
diff_arr = diff_arr
if return_num_bases:
num_bases = np.apply_along_axis(
arr=diff_arr,
axis=1,
func1d=lambda x: len(x[~np.isnan(x)])
)
return diff_arr, num_bases
else:
return diff_arr
def numpy_value_counts_bin_count(arr, weights=None):
"""
Use the 'bin count' function in numpy to calculate the unique values in every column of a dataframe
clocked at about 3-4x faster than pandas_value_counts (df.apply(pd.value_counts))
Args:
arr (dataframe, or np array): Should represent rows as sequences and columns as positions. All values should be int
weights (np array): Should be a list of weights to place on each
"""
if not isinstance(arr, np.ndarray):
raise Exception('The provided parameter for arr is not a dataframe or numpy array')
if len(arr.shape) == 1:
# its a ONE D array, lets make it two D
arr = arr.reshape(-1, 1)
arr = arr.view(np.uint8)
# returns an array of length equal to the the max value in array + 1. each element represents number of times an integer appeared in array.
bins = [
np.bincount(arr[:, x], weights=weights)
for x in range(arr.shape[1])
]
indices = [np.nonzero(x)[0] for x in bins] # only look at non zero bins
series = [pd.Series(y[x], index=x) for (x, y) in zip(indices, bins)]
return pd.concat(series, axis=1).fillna(0)
def get_quality_dist(
arr, col_names=None, bins='even', exclude_null_quality=True, sample=None,
percentiles=[10, 25, 50, 75, 90], stats=['mean', 'median', 'max', 'min'],
plotly_sampledata_size=20, use_multiindex=True,
):
"""
Returns the distribution of quality across the given sequence, similar to FASTQC quality seq report.
Args:
arr (np.array): a matrix of quality scores where rows represent a sequence and columns represent a position
col_names (list): column header for the numpy array (either from xarray or pandas)
bins(list of ints or tuples, or 'fastqc', or 'even'): bins defines how to group together the columns/sequence positions when aggregating the statistics.
.. note:: bins='fastqc' or 'even'
if bins is not a set of numbers and instead one of the two predefined strings ('fastqc' and 'even') then calculation of bins will be defined as follows:
1. fastqc: Identical to the bin ranges used by fastqc report
2. even: Creates 10 evenly sized bins based on sequence lengths
percentiles (list of floats, default=[10, 25, 50, 75, 90]): value passed into numpy quantiles function.
exclude_null_quality (boolean, default=True): do not include quality scores of 0 in the distribution
sample (int, default=None): If defined, then we will only calculate the distribution on a random subsampled population of sequences
plotly_sampledata_size (int, default=20): Number of values to store in a sample numpy array used for creating box plots in plotly
.. note:: min size
note the minimum value for a sampledata size is 10
Returns:
data (DataFrame): contains the distribution information at every bin (min value, max value, desired precentages and quartiles)
graphs (plotly object): contains plotly graph objects for generating plots of the data afterwards
Examples:
Show the median of the quality at the first ten positions in the sequence
>>> table = SeqTable(['AAAAAAAAAA', 'AAAAAAAAAC', 'CCCCCCCCCC'], qualitydata=['6AA9-C9--6C', '6AA!1C9BA6C', '6AA!!C9!-6C'])
>>> box_data, graphs = table.get_quality_dist(bins=range(10), percentiles=[50])
Now repeat the example from above, except group together all values from the first 5 bases and the next 5 bases
i.e. All qualities between positions 0-4 will be grouped together before performing median, and all qualities between 5-9 will be grouped together). Also, return the bottom 10 and upper 90 percentiles in the statsitics
>>> box_data, graphs = table.get_quality_dist(bins=[(0,4), (5,9)], percentiles=[10, 50, 90])
We can also plot the results as a series of boxplots using plotly
>>> from plotly.offline import init_notebook_mode, iplot, plot, iplot_mpl
# assuming ipython..
>>> init_notebook_mode()
>>> plotly.iplot(graphs)
# using outside of ipython
>>> plotly.plot(graphs)
"""
from collections import OrderedDict
current_stats = ['min', 'max', 'mean', 'median']
assert set(stats).issubset(set(current_stats)), "The stats provided are not currently supported. We only support {0}".format(','.join(current_stats))
# current base positions in dataframe
if col_names is None:
col_names = np.arange(1, arr.shape[1] + 1)
else:
assert len(col_names) == arr.shape[1], 'Column names does not match shape'
# print(bins)
if bins is 'fastqc':
# use default bins as defined by fastqc report
bins = [
(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9),
(10, 14), (15, 19), (20, 24), (25, 29), (30, 34), (35, 39), (40, 44), (45, 49), (50, 54), (55, 59), (60, 64),
(65, 69), (70, 74), (80, 84), (85, 89), (90, 94), (95, 99),
(100, 104), (105, 109), (110, 114), (115, 119), (120, 124), (125, 129), (130, 134), (135, 139), (140, 144), (145, 149), (150, 154), (155, 159), (160, 164), (165, 169), (170, 174), (175, 179), (180, 184), (185, 189), (190, 194), (195, 199),
(200, 204), (205, 209), (210, 214), (215, 219), (220, 224), (225, 229), (230, 234), (235, 239), (240, 244), (245, 249), (250, 254), (255, 259), (260, 264), (265, 269), (270, 274), (275, 279), (280, 284), (285, 289), (290, 294), (295, 299),
] + [(p, p + 9) for p in np.arange(300, arr.shape[1], 10)]
bins = [x if isinstance(x, int) else (x[0], x[1]) for x in bins]
elif bins is 'even':
# create an equal set of 10 bins based on df shape
binsize = int(arr.shape[1] / 10)
bins = []
for x in range(0, arr.shape[1], binsize):
c1 = col_names[x]
c2 = col_names[min(x + binsize - 1, arr.shape[1] - 1)]
bins.append((c1, c2))
# print(bins)
else:
# just in case its a generator (i.e. range function)
# convert floats to ints, otherwise keep original
bins = [(int(x), int(x)) if isinstance(x, float) else x if isinstance(x, tuple) else (x, x) for x in bins]
binnames = OrderedDict()
for b in bins:
if b[0] < min(col_names) or b[0] > max(col_names):
continue
# create names for each bin
if isinstance(b, int):
binnames[str(b)] = (b, b)
elif len(b) == 2:
binnames[str(b[0]) + '-' + str(b[1])] = (b[0], b[1])
temp = xr.DataArray(
arr[np.random.choice(arr.shape[0], sample), :] if sample else arr,
dims=('read', 'position'),
coords={'position': col_names}
)
# define the quantile percentages we will return for each quality bin
percentiles = [round(p, 0) for p in percentiles]
per = copy.copy(percentiles)
# ensure that the following percentiles will ALWAYS be present
program_required = [0, 10, 25, 50, 75, 90, 100]
to_add_manually = set(program_required) - set(per)
# update percentil list
per = sorted(per + list(to_add_manually))
# loop through each of the binnames/bin counts
binned_data = OrderedDict()
binned_data_stats = OrderedDict()
graphs = [] # for storing plotly graphs
plotlychosendata = pd.DataFrame(0, index=list(binnames.keys()), columns=['min', 'max', 'mean', 'median'])
for name, binned_cols in binnames.items():
userchosen_stats = {}
userchosen = {}
if isinstance(binned_cols, int):
# not binning together multiple positions in sequence
binned_cols = (binned_cols, binned_cols)
# create a list of all column/base positions listed within this bin
# set_cols = set(list(range(binned_cols[0], binned_cols[1] + 1)))
# identify columns in dataframe that intersect with columns listed above
# sel_cols = list(col_names_set & set_cols)
# select qualities within bin, unwind list into a single list
p = list(set(np.arange(binned_cols[0], binned_cols[1] + 1)) & set(temp.position.values)) # make sure positions are present in columns
bin_qual = temp.sel(position=p).values.ravel()
if exclude_null_quality:
quantile_res = np.percentile(bin_qual[bin_qual > 0], per)
mean_val = bin_qual[bin_qual > 0].mean()
plotlychosendata.loc[name, 'mean'] = mean_val
if 'mean' in stats:
userchosen_stats['mean'] = mean_val
else:
mean_val = bin_qual[bin_qual > 0].mean()
quantile_res = np.percentile(bin_qual, per)
plotlychosendata.loc[name, 'mean'] = mean_val
if 'mean' in stats:
userchosen_stats['mean'] = mean_val
storevals = []
for p, qnt in zip(per, quantile_res):
if p == 0:
plotlychosendata.loc[name, 'min'] = qnt
if 'min' in stats:
userchosen_stats['min'] = qnt
if p == 100:
plotlychosendata.loc[name, 'max'] = qnt
if 'max' in stats:
userchosen_stats['max'] = qnt
if p in program_required:
# store the values required by the program in storevals
storevals.append(qnt)
if p in percentiles:
# store original quantile values desired by user in variable percentiles
userchosen[str(int(p)) + '%'] = qnt
if p == 50:
# store median
median = qnt
if 'median' in stats:
userchosen_stats['median'] = qnt
plotlychosendata.loc[name, 'median'] = qnt
userchosen = pd.Series(userchosen)
if plotly_sampledata_size < 10:
warnings.warn('Warning, the desired plotly_sampledata_size is too low, value has been changed to 10')
plotly_sampledata_size = 10
# next a fake set of data that we can pass into plotly for making boxplots. datas descriptive statistics will match current set
sample_data = np.zeros(plotly_sampledata_size)
# these indices in subsets indicates the 5% index values for the provided sample_data_size
subsets = [int(x) for x in np.arange(0, 1.00, 0.05) * plotly_sampledata_size]
# we hardcoded the values in program_required, so we can add those values into fake subsets
sample_data[0:subsets[1]] = storevals[1] # store min value in these indices
sample_data[subsets[1]:subsets[3]] = storevals[1] # store bottom 10% of data within 5-15% data range
sample_data[subsets[3]:subsets[7]] = storevals[2] # store 25% of data
sample_data[subsets[7]:subsets[13]] = storevals[3] # store median of data
sample_data[subsets[13]:subsets[17]] = storevals[4] # store 75% of data
sample_data[subsets[17]:subsets[19]] = storevals[5] # store max val
sample_data[subsets[19]:] = storevals[5] # store max val
color = 'red' if median < 20 else 'blue' if median < 30 else 'green'
if plotly_installed is True:
# create a box plot using the fake sample_data, again this is better for memory resources since plotly stores all datapoints in javascript
plotdata = go.Box(
y=sample_data,
pointpos=0,
name=name,
boxpoints=False,
fillcolor=color,
showlegend=False,
line={
'color': 'black',
'width': 0.7
},
marker=dict(
color='rgb(107, 174, 214)',
size=3
)
)
else:
warnings.warn('PLOTLY not installed. No graph object data was returned')
plotdata = None
graphs.append(plotdata)
binned_data[name] = userchosen
binned_data_stats[name] = userchosen_stats
if plotly_installed is True:
# also include a scatter plot for the minimum value, maximum value, and mean in distribution
scatter_min = go.Scatter(x=list(plotlychosendata.index), y=plotlychosendata['min'], mode='markers', name='min', showlegend=False)
scatter_max = go.Scatter(x=list(plotlychosendata.index), y=plotlychosendata['max'], mode='markers', name='max')
scatter_mean = go.Scatter(
x=list(plotlychosendata.index),
y=plotlychosendata['mean'], line=dict(shape='spline'),
name='mean'
)
graphs.extend([scatter_min, scatter_max, scatter_mean])
if use_multiindex is True:
stats_df = pd.concat([pd.DataFrame(binned_data), pd.DataFrame(binned_data_stats)], keys=['percentile', 'stats'])
else:
stats_df = pd.concat([
|
pd.DataFrame(binned_data)
|
pandas.DataFrame
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from numba import njit
###############################################################################
#Non-Standard Imports
###############################################################################
import addpath
import dunlin as dn
import dunlin._utils_model.dun_file_reader as dfr
import dunlin._utils_model.ode_coder as odc
if __name__ == '__main__':
dun_data0 = dfr.read_file('dun_test_files/M20.dun')
dun_data1 = dfr.read_file('dun_test_files/M21.dun')
model_data0 = dun_data0['M1']
model_data1 = dun_data1['M2']
model_data2 = dun_data1['M3']
###############################################################################
#Part 1: Low Level Code Generation
###############################################################################
funcs = model_data0['funcs']
vrbs = model_data0['vrbs']
rxns = model_data0['rxns']
states = model_data0['states']
#Test func def
name, args = 'test_func', ['a', 'b']
code = odc.make_def(name, *args)
test_func = f'{code}\n\treturn [a, b]'
exec(test_func)
a, b = 1, 2
assert test_func(a, b) == [1, 2]
#Test code generation for local functions
code = odc.funcs2code(funcs)
test_func = f'def test_func(v, x, k):\n{code}\n\treturn MM(v, x, k)'
exec(test_func)
assert test_func(2, 4, 6) == 0.8
#Test local variable
code = odc.vrbs2code(vrbs)
test_func = f'def test_func(x2, k1):\n{code}\n\treturn sat2'
exec(test_func)
assert test_func(1, 1) == 0.5
#Parse single reaction
stripper = lambda *s: ''.join(s).replace(' ', '').strip()
r = odc._parse_rxn(*rxns['r0'])
assert {'x0': '-1', 'x1': '-2', 'x2': '+1'} == r[0]
assert stripper(rxns['r0'][1], '-', rxns['r0'][2]) == stripper(r[1])
r = odc._parse_rxn(*rxns['r1'])
assert {'x2': '-1', 'x3': '+1'} == r[0]
assert stripper(rxns['r1'][1]) == stripper(r[1])
r = odc._parse_rxn(*rxns['r2'])
assert {'x3': '-1'} == r[0]
assert stripper(rxns['r2'][1]) == stripper(r[1])
#Test code generation for multiple reactions
code = odc.rxns2code(model_data0)
MM = lambda v, x, k: 0
sat2 = 0.5
test_func = f'def test_func(x0, x1, x2, x3, x4, p0, p1, p2, p3, p4):\n{code}\treturn [d_x0, d_x1, d_x2, d_x3, d_x4]'
exec(test_func)
r = test_func(1, 1, 1, 1, 1, 1, 1, 1, 1, 1)
assert r == [-1.0, -2.0, 0.5, -0.5, 1]
#Test code generation for hierarchical models
#We need to create the "submodel"
MM = lambda v, x, k: 0
code = odc.rxns2code(model_data1)
test_func0 = 'def model_M2(*args): return np.array([1, 1])'
exec(test_func0)
code = odc.rxns2code(model_data2)
test_func = f'def test_func(t, x0, x1, x2, x3, p0, p1, p2, p3, k2):\n{code}\treturn [d_x0, d_x1, d_x2, d_x3]'
exec(test_func)
r = test_func(0, 1, 1, 1, 1, 1, 1, 1, 1, 1)
assert r == [-1, 2, 1, 1]
temp = dun_data1['M3']['rxns']['r1']
dun_data1['M3']['rxns']['r1'] = {'submodel': 'M2',
'substates': {'xx0': 'x1', 'xx1': 'x2'},
'subparams': {'pp0' : 'p0', 'pp1' : 'p1', 'kk1': 'k2'}
}
try:
code = odc.rxns2code(model_data2)
except NotImplementedError as e:
assert True
else:
assert False
dun_data1['M3']['rxns']['r1'] = temp
###############################################################################
#Part 2: High Level Code Generation
###############################################################################
template0 = odc.make_template(model_data0)
template1 = odc.make_template(model_data1)
template2 = odc.make_template(model_data2)
params = model_data0['params']
exvs = model_data0['exvs']
events = model_data0['events']
modify = model_data0['modify']
#Generate code for ode rhs
code = odc.rhs2code(template0, model_data0)[1]
test_func = code.replace('model_M1', 'test_func')
exec(test_func)
t = 0
y = np.ones(5)
p = pd.DataFrame(params).values[0]
dy = test_func(t, y, p)
assert all( dy == np.array([-0.5, -1, 0, -1.5 , 2]) )
#Generate code for sim
code = odc.sim2code(template0, model_data0)[1]
test_func = code.replace('sim_M1', 'test_func')
exec(test_func)
t = np.array([0, 1])
y = np.ones((5, 2))
p = pd.DataFrame(params).values[0]
r = test_func(t, y, p)
answer = {'x0' : np.array([1., 1.]), 'x1' : np.array([1., 1.]),
'x2' : np.array([1., 1.]), 'x3' : np.array([1., 1.]),
'x4' : np.array([1., 1.]), 'sat2': np.array([0.5, 0.5]),
'd_x0': np.array([-0.5, -0.5]), 'd_x1': np.array([-1., -1.]),
'd_x2': np.array([0., 0.]), 'd_x3': np.array([-1.5, -1.5]),
'd_x4': np.array([2., 2.]), 't' : np.array([0, 1])
}
for k, v in answer.items():
assert np.all(v == r[k])
#Generate code for exv
codes = odc.exvs2code(template0, model_data0)
test_func = codes['r0'][1].replace('exv_M1_r0', 'test_func')
exec(test_func)
t = np.array([0, 1])
y = np.ones((5, 2))
p = pd.DataFrame(params).values[0]
r = test_func(t, y, p)
assert all(r == 0.5)
#Generate code for single event trigger
trigger = events['e0'][0]
code = odc.trigger2code('e0', trigger, template0, model_data0)[1]
test_func = code.replace('trigger_M1_e0', 'test_func')
exec(test_func)
t = 10
y = np.array([0, 1, 1, 1, 1])
p = pd.DataFrame(params).values[0]
r = test_func(t, y, p)
assert r == 0.5
#Generate code for single event assignment
assignment = events['e0'][1]
code = odc.assignment2code('e0', assignment, template0, model_data0)[1]
test_func = code.replace('assignment_M1_e0', 'test_func')
exec(test_func)
t = 10
y = np.array([0, 1, 1, 1, 1])
p = pd.DataFrame(params).values[0]
r = test_func(t, y, p)
assert r[0][0] == 5
assert r[1][0] == 0.5
#Generate code for single event
codes = odc.event2code('e0', template0, model_data0)
test_func = codes['trigger'][1].replace('trigger_M1_e0', 'test_func')
exec(test_func)
t = 10
y = np.array([0, 1, 1, 1, 1])
p = pd.DataFrame(params).values[0]
r = test_func(t, y, p)
assert r == 0.5
test_func = codes['assignment'][1].replace('assignment_M1_e0', 'test_func')
exec(test_func)
t = 10
y = np.array([0, 1, 1, 1, 1])
p = pd.DataFrame(params).values[0]
r = test_func(t, y, p)
assert r[0][0] == 5
assert r[1][0] == 0.5
#Generate code for all events
codes = odc.events2code(template0, model_data0)
test_func = codes['e0']['trigger'][1].replace('trigger_M1_e0', 'test_func')
exec(test_func)
t = 10
y = np.array([0, 1, 1, 1, 1])
p = pd.DataFrame(params).values[0]
r = test_func(t, y, p)
assert r == 0.5
test_func = codes['e0']['assignment'][1].replace('assignment_M1_e0', 'test_func')
exec(test_func)
t = 10
y = np.array([0, 1, 1, 1, 1])
p = pd.DataFrame(params).values[0]
r = test_func(t, y, p)
assert r[0][0] == 5
assert r[1][0] == 0.5
#Generate modify
code = odc.modify2code(template0, model_data0)[1]
test_func = code.replace('modify_M1', 'test_func')
exec(test_func)
t = 10
y = np.array([0, 1, 1, 1, 1])
p = pd.DataFrame(params).values[0]
r = test_func(y, p, scenario=1)
assert all( r[0] == np.array([10, 1, 1, 1, 1]) )
assert all( r[1] == p)
###############################################################################
#Part 3A: Function Generation
###############################################################################
#Generate single function from code
code = 'x = lambda t: t+1'
scope = {}
test_func = odc.code2func(['x', code])
assert test_func(5) == 6
#Generate multiple functions from codes
#The second function requires access to the first one
codes = {'fx': ['x', 'def x(t):\n\treturn t+1'],
'fy': ['y', 'def y(t):\n\treturn x(t)+2']
}
r = odc.code2func(codes)
test_func = r['fx']
assert test_func(5) == 6
test_func = r['fy']
assert test_func(5) == 8
###############################################################################
#Part 3B: Function Generation
###############################################################################
template0 = odc.make_template(model_data0)
template1 = odc.make_template(model_data1)
template2 = odc.make_template(model_data2)
params = model_data0['params']
exvs = model_data0['exvs']
events = model_data0['events']
modify = model_data0['modify']
#Generate rhs function
func = odc.rhs2func(template0, model_data0)
t = 0
y = np.ones(5)
p = pd.DataFrame(params).values[0]
dy = func(t, y, p)
assert all( dy == np.array([-0.5, -1, 0, -1.5 , 2]) )
#Generate exv functions
funcs = odc.exvs2func(template0, model_data0)
func = funcs['r0']
t = np.array([0, 1])
y = np.ones((5, 2))
p = pd.DataFrame(params).values[0]
r = func(t, y, p)
assert all(r == 0.5)
#Generate event functions for one event
funcs = odc.event2func('e0', template0, model_data0)
func = funcs['trigger']
t = 10
y = np.array([0, 1, 1, 1, 1])
p = pd.DataFrame(params).values[0]
r = func(t, y, p)
assert r == 0.5
func = funcs['assignment']
t = 10
y = np.array([0, 1, 1, 1, 1])
p = pd.DataFrame(params).values[0]
r = func(t, y, p)
assert r[0][0] == 5
assert r[1][0] == 0.5
#Generate event functions for all events
funcs = odc.events2func(template0, model_data0)
func = funcs['e0']['trigger']
t = 10
y = np.array([0, 1, 1, 1, 1])
p = pd.DataFrame(params).values[0]
r = func(t, y, p)
assert r == 0.5
func = funcs['e0']['assignment']
t = 10
y = np.array([0, 1, 1, 1, 1])
p = pd.DataFrame(params).values[0]
r = func(t, y, p)
assert r[0][0] == 5
assert r[1][0] == 0.5
#Generate modify
func = odc.modify2func(template0, model_data0)
t = 10
y = np.array([0, 1, 1, 1, 1])
p =
|
pd.DataFrame(params)
|
pandas.DataFrame
|
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
NaT,
PeriodIndex,
Series,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.tests.frame.common import _check_mixed_float
class TestFillNA:
def test_fillna_datetime(self, datetime_frame):
tf = datetime_frame
tf.loc[tf.index[:5], "A"] = np.nan
tf.loc[tf.index[-5:], "A"] = np.nan
zero_filled = datetime_frame.fillna(0)
assert (zero_filled.loc[zero_filled.index[:5], "A"] == 0).all()
padded = datetime_frame.fillna(method="pad")
assert np.isnan(padded.loc[padded.index[:5], "A"]).all()
assert (
padded.loc[padded.index[-5:], "A"] == padded.loc[padded.index[-5], "A"]
).all()
msg = "Must specify a fill 'value' or 'method'"
with pytest.raises(ValueError, match=msg):
datetime_frame.fillna()
msg = "Cannot specify both 'value' and 'method'"
with pytest.raises(ValueError, match=msg):
datetime_frame.fillna(5, method="ffill")
def test_fillna_mixed_type(self, float_string_frame):
mf = float_string_frame
mf.loc[mf.index[5:20], "foo"] = np.nan
mf.loc[mf.index[-10:], "A"] = np.nan
# TODO: make stronger assertion here, GH 25640
mf.fillna(value=0)
mf.fillna(method="pad")
def test_fillna_mixed_float(self, mixed_float_frame):
# mixed numeric (but no float16)
mf = mixed_float_frame.reindex(columns=["A", "B", "D"])
mf.loc[mf.index[-10:], "A"] = np.nan
result = mf.fillna(value=0)
_check_mixed_float(result, dtype={"C": None})
result = mf.fillna(method="pad")
_check_mixed_float(result, dtype={"C": None})
def test_fillna_empty(self):
# empty frame (GH#2778)
df = DataFrame(columns=["x"])
for m in ["pad", "backfill"]:
df.x.fillna(method=m, inplace=True)
df.x.fillna(method=m)
def test_fillna_different_dtype(self):
# with different dtype (GH#3386)
df = DataFrame(
[["a", "a", np.nan, "a"], ["b", "b", np.nan, "b"], ["c", "c", np.nan, "c"]]
)
result = df.fillna({2: "foo"})
expected = DataFrame(
[["a", "a", "foo", "a"], ["b", "b", "foo", "b"], ["c", "c", "foo", "c"]]
)
tm.assert_frame_equal(result, expected)
return_value = df.fillna({2: "foo"}, inplace=True)
tm.assert_frame_equal(df, expected)
assert return_value is None
def test_fillna_limit_and_value(self):
# limit and value
df = DataFrame(np.random.randn(10, 3))
df.iloc[2:7, 0] = np.nan
df.iloc[3:5, 2] = np.nan
expected = df.copy()
expected.iloc[2, 0] = 999
expected.iloc[3, 2] = 999
result = df.fillna(999, limit=1)
tm.assert_frame_equal(result, expected)
def test_fillna_datelike(self):
# with datelike
# GH#6344
df = DataFrame(
{
"Date": [NaT, Timestamp("2014-1-1")],
"Date2": [Timestamp("2013-1-1"), NaT],
}
)
expected = df.copy()
expected["Date"] = expected["Date"].fillna(df.loc[df.index[0], "Date2"])
result = df.fillna(value={"Date": df["Date2"]})
tm.assert_frame_equal(result, expected)
def test_fillna_tzaware(self):
# with timezone
# GH#15855
df = DataFrame({"A": [Timestamp("2012-11-11 00:00:00+01:00"), NaT]})
exp = DataFrame(
{
"A": [
Timestamp("2012-11-11 00:00:00+01:00"),
Timestamp("2012-11-11 00:00:00+01:00"),
]
}
)
tm.assert_frame_equal(df.fillna(method="pad"), exp)
df = DataFrame({"A": [NaT, Timestamp("2012-11-11 00:00:00+01:00")]})
exp = DataFrame(
{
"A": [
Timestamp("2012-11-11 00:00:00+01:00"),
Timestamp("2012-11-11 00:00:00+01:00"),
]
}
)
tm.assert_frame_equal(df.fillna(method="bfill"), exp)
def test_fillna_tzaware_different_column(self):
# with timezone in another column
# GH#15522
df = DataFrame(
{
"A": date_range("20130101", periods=4, tz="US/Eastern"),
"B": [1, 2, np.nan, np.nan],
}
)
result = df.fillna(method="pad")
expected = DataFrame(
{
"A": date_range("20130101", periods=4, tz="US/Eastern"),
"B": [1.0, 2.0, 2.0, 2.0],
}
)
tm.assert_frame_equal(result, expected)
def test_na_actions_categorical(self):
cat = Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
vals = ["a", "b", np.nan, "d"]
df = DataFrame({"cats": cat, "vals": vals})
cat2 = Categorical([1, 2, 3, 3], categories=[1, 2, 3])
vals2 = ["a", "b", "b", "d"]
df_exp_fill = DataFrame({"cats": cat2, "vals": vals2})
cat3 = Categorical([1, 2, 3], categories=[1, 2, 3])
vals3 = ["a", "b", np.nan]
df_exp_drop_cats = DataFrame({"cats": cat3, "vals": vals3})
cat4 = Categorical([1, 2], categories=[1, 2, 3])
vals4 = ["a", "b"]
df_exp_drop_all = DataFrame({"cats": cat4, "vals": vals4})
# fillna
res = df.fillna(value={"cats": 3, "vals": "b"})
tm.assert_frame_equal(res, df_exp_fill)
msg = "Cannot setitem on a Categorical with a new category"
with pytest.raises(ValueError, match=msg):
df.fillna(value={"cats": 4, "vals": "c"})
res = df.fillna(method="pad")
tm.assert_frame_equal(res, df_exp_fill)
# dropna
res = df.dropna(subset=["cats"])
tm.assert_frame_equal(res, df_exp_drop_cats)
res = df.dropna()
tm.assert_frame_equal(res, df_exp_drop_all)
# make sure that fillna takes missing values into account
c = Categorical([np.nan, "b", np.nan], categories=["a", "b"])
df = DataFrame({"cats": c, "vals": [1, 2, 3]})
cat_exp = Categorical(["a", "b", "a"], categories=["a", "b"])
df_exp = DataFrame({"cats": cat_exp, "vals": [1, 2, 3]})
res = df.fillna("a")
tm.assert_frame_equal(res, df_exp)
def test_fillna_categorical_nan(self):
# GH#14021
# np.nan should always be a valid filler
cat = Categorical([np.nan, 2, np.nan])
val = Categorical([np.nan, np.nan, np.nan])
df = DataFrame({"cats": cat, "vals": val})
# GH#32950 df.median() is poorly behaved because there is no
# Categorical.median
median = Series({"cats": 2.0, "vals": np.nan})
res = df.fillna(median)
v_exp = [np.nan, np.nan, np.nan]
df_exp = DataFrame({"cats": [2, 2, 2], "vals": v_exp}, dtype="category")
tm.assert_frame_equal(res, df_exp)
result = df.cats.fillna(np.nan)
tm.assert_series_equal(result, df.cats)
result = df.vals.fillna(np.nan)
tm.assert_series_equal(result, df.vals)
idx = DatetimeIndex(
["2011-01-01 09:00", "2016-01-01 23:45", "2011-01-01 09:00", NaT, NaT]
)
df = DataFrame({"a": Categorical(idx)})
tm.assert_frame_equal(df.fillna(value=NaT), df)
idx = PeriodIndex(["2011-01", "2011-01", "2011-01", NaT, NaT], freq="M")
df = DataFrame({"a": Categorical(idx)})
tm.assert_frame_equal(df.fillna(value=NaT), df)
idx = TimedeltaIndex(["1 days", "2 days", "1 days", NaT, NaT])
df = DataFrame({"a": Categorical(idx)})
tm.assert_frame_equal(df.fillna(value=NaT), df)
@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) implement downcast
def test_fillna_downcast(self):
# GH#15277
# infer int64 from float64
df = DataFrame({"a": [1.0, np.nan]})
result = df.fillna(0, downcast="infer")
expected = DataFrame({"a": [1, 0]})
tm.assert_frame_equal(result, expected)
# infer int64 from float64 when fillna value is a dict
df = DataFrame({"a": [1.0, np.nan]})
result = df.fillna({"a": 0}, downcast="infer")
expected = DataFrame({"a": [1, 0]})
tm.assert_frame_equal(result, expected)
@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) object upcasting
def test_fillna_dtype_conversion(self):
# make sure that fillna on an empty frame works
df = DataFrame(index=["A", "B", "C"], columns=[1, 2, 3, 4, 5])
result = df.dtypes
expected = Series([np.dtype("object")] * 5, index=[1, 2, 3, 4, 5])
tm.assert_series_equal(result, expected)
result = df.fillna(1)
expected = DataFrame(1, index=["A", "B", "C"], columns=[1, 2, 3, 4, 5])
tm.assert_frame_equal(result, expected)
# empty block
df = DataFrame(index=range(3), columns=["A", "B"], dtype="float64")
result = df.fillna("nan")
expected = DataFrame("nan", index=range(3), columns=["A", "B"])
tm.assert_frame_equal(result, expected)
# equiv of replace
df = DataFrame({"A": [1, np.nan], "B": [1.0, 2.0]})
for v in ["", 1, np.nan, 1.0]:
expected = df.replace(np.nan, v)
result = df.fillna(v)
tm.assert_frame_equal(result, expected)
@td.skip_array_manager_invalid_test
def test_fillna_datetime_columns(self):
# GH#7095
df = DataFrame(
{
"A": [-1, -2, np.nan],
"B":
|
date_range("20130101", periods=3)
|
pandas.date_range
|
import pytest
import numpy as np
import pandas as pd
from pypbl.elicitation import BayesPreference
from pypbl.priors import Normal, Exponential
@pytest.fixture
def basic_model():
data =
|
pd.DataFrame({'x': [1, 0, 1], 'y': [0, 1, 1]}, index=['item 0', 'item 1', 'item 2'])
|
pandas.DataFrame
|
from more_itertools import unique_everseen
from collections import Counter
from lxml import etree
import pandas as pd
import numpy as np
from iteration import *
import os
def save_yearly_data(years, dirin, dirout):
all_data = []
for y in years:
paths = []
rootdir = dirin+str(y)+'/'
print(rootdir)
for subdir_month, dirs, files in os.walk(rootdir):
paths.append(subdir_month)
year = min(paths, key=len)
paths.remove(year)
monthly_data = []
list_keys = []
for path in paths:
if path != year:
print(path)
monthly = get_all_monthly_data(path)
monthly_data.append(monthly)
list_keys.append(path[-13:-6])
# df_Master = pd.concat(monthly_data, keys=list_keys)
df_Master =
|
pd.concat(monthly_data)
|
pandas.concat
|
# This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import pandas as pd
import numba as nb
import time
from warnings import warn
import scipy.sparse as sp
from scipy.sparse import coo_matrix, csc_matrix
from scipy.sparse import hstack as hs, vstack as vs
from scipy.sparse.linalg import factorized, spsolve, inv
from matplotlib import pyplot as plt
from GridCal.Engine import *
def SysMat(Y, Ys, pq, pvpq):
"""
Computes the system Jacobian matrix in polar coordinates
Args:
Ybus: Admittance matrix
V: Array of nodal voltages
Ibus: Array of nodal current injections
pq: Array with the indices of the PQ buses
pvpq: Array with the indices of the PV and PQ buses
Returns:
The system Jacobian matrix
"""
A11 = -Ys.imag[np.ix_(pvpq, pvpq)]
A12 = Y.real[np.ix_(pvpq, pq)]
A21 = -Ys.real[np.ix_(pq, pvpq)]
A22 = -Y.imag[np.ix_(pq, pq)]
Asys = sp.vstack([sp.hstack([A11, A12]),
sp.hstack([A21, A22])], format="csc")
return Asys
def compute_acptdf(Ybus, Yseries, Yf, Yt, Cf, V, pq, pv, distribute_slack):
"""
Compute the AC-PTDF
:param Ybus: admittance matrix
:param Yf: Admittance matrix of the buses "from"
:param Yt: Admittance matrix of the buses "to"
:param Cf: Connectivity branch - bus "from"
:param V: voltages array
:param Ibus: array of currents
:param pq: array of pq node indices
:param pv: array of pv node indices
:return: AC-PTDF matrix (branches, buses)
"""
n = len(V)
pvpq = np.r_[pv, pq]
npq = len(pq)
# compute the Jacobian
J = SysMat(Ybus, Yseries, pq, pvpq)
if distribute_slack:
dP = np.ones((n, n)) * (-1 / (n - 1))
for i in range(n):
dP[i, i] = 1.0
else:
dP = np.eye(n, n)
# compose the compatible array (the Q increments are considered zero
dQ = np.zeros((npq, n))
# dQ = np.eye(n, n)[pq, :]
dS = np.r_[dP[pvpq, :], dQ]
# solve the voltage increments
dx = spsolve(J, dS)
# compute branch derivatives
If = Yf * V
E = V / np.abs(V)
Vdiag = sp.diags(V)
Vdiag_conj = sp.diags(np.conj(V))
Ediag = sp.diags(E)
Ediag_conj = sp.diags(np.conj(E))
If_diag_conj = sp.diags(np.conj(If))
Yf_conj = Yf.copy()
Yf_conj.data = np.conj(Yf_conj.data)
Yt_conj = Yt.copy()
Yt_conj.data = np.conj(Yt_conj.data)
dSf_dVa = 1j * (If_diag_conj * Cf * Vdiag - sp.diags(Cf * V) * Yf_conj * Vdiag_conj)
dSf_dVm = If_diag_conj * Cf * Ediag - sp.diags(Cf * V) * Yf_conj * Ediag_conj
# compose the final AC-PTDF
dPf_dVa = dSf_dVa.real[:, pvpq]
dPf_dVm = dSf_dVm.real[:, pq]
PTDF = sp.hstack((dPf_dVa, dPf_dVm)) * dx
return PTDF
def make_lodf(circuit: SnapshotCircuit, PTDF, correct_values=True):
"""
:param circuit:
:param PTDF: PTDF matrix in numpy array form
:return:
"""
nl = circuit.nbr
# compute the connectivity matrix
Cft = circuit.C_branch_bus_f - circuit.C_branch_bus_t
H = PTDF * Cft.T
# old code
# h = sp.diags(H.diagonal())
# LODF = H / (np.ones((nl, nl)) - h * np.ones(nl))
# divide each row of H by the vector 1 - H.diagonal
# LODF = H / (1 - H.diagonal())
# replace possible nan and inf
# LODF[LODF == -np.inf] = 0
# LODF[LODF == np.inf] = 0
# LODF = np.nan_to_num(LODF)
# this loop avoids the divisions by zero
# in those cases the LODF column should be zero
LODF = np.zeros((nl, nl))
div = 1 - H.diagonal()
for j in range(H.shape[1]):
if div[j] != 0:
LODF[:, j] = H[:, j] / div[j]
# replace the diagonal elements by -1
# old code
# LODF = LODF - sp.diags(LODF.diagonal()) - sp.eye(nl, nl), replaced by:
for i in range(nl):
LODF[i, i] = - 1.0
if correct_values:
i1, j1 = np.where(LODF > 1)
for i, j in zip(i1, j1):
LODF[i, j] = 1
i2, j2 = np.where(LODF < -1)
for i, j in zip(i2, j2):
LODF[i, j] = -1
return LODF
def get_branch_time_series(circuit: TimeCircuit, PTDF):
"""
:param grid:
:return:
"""
# option 2: call the power directly
P = circuit.Sbus.real
Pbr = np.dot(PTDF, P).T * circuit.Sbase
return Pbr
def multiple_failure_old(flows, LODF, beta, delta, alpha):
"""
:param flows: array of all the pre-contingency flows
:param LODF: Line Outage Distribution Factors Matrix
:param beta: index of the first failed line
:param delta: index of the second failed line
:param alpha: index of the line where you want to see the effects
:return: post contingency flow in the line alpha
"""
# multiple contingency matrix
M = np.ones((2, 2))
M[0, 1] = -LODF[beta, delta]
M[1, 0] = -LODF[delta, beta]
# normal flows of the lines beta and delta
F = flows[[beta, delta]]
# contingency flows after failing the ines beta and delta
Ff = np.linalg.solve(M, F)
# flow delta in the line alpha after the multiple contingency of the lines beta and delta
L = LODF[alpha, :][[beta, delta]]
dFf_alpha = np.dot(L, Ff)
return F[alpha] + dFf_alpha
def multiple_failure(flows, LODF, failed_idx):
"""
From the paper:
Multiple Element Contingency Screening
IEEE TRANSACTIONS ON POWER SYSTEMS, VOL. 26, NO. 3, AUGUST 2011
<NAME> and <NAME>
:param flows: array of all the pre-contingency flows (the base flows)
:param LODF: Line Outage Distribution Factors Matrix
:param failed_idx: indices of the failed lines
:return: all post contingency flows
"""
# multiple contingency matrix
M = -LODF[np.ix_(failed_idx, failed_idx)]
for i in range(len(failed_idx)):
M[i, i] = 1.0
# normal flows of the failed lines indicated by failed_idx
F = flows[failed_idx]
# Affected flows after failing the lines indicated by failed_idx
Ff = np.linalg.solve(M, F)
# flow delta in the line alpha after the multiple contingency of the lines indicated by failed_idx
L = LODF[:, failed_idx]
dFf_alpha = np.dot(L, Ff)
# return the final contingency flow as the base flow plus the contingency flow delta
return flows + dFf_alpha
def get_n_minus_1_flows(circuit: MultiCircuit):
opt = PowerFlowOptions()
branches = circuit.get_branches()
m = circuit.get_branch_number()
Pmat = np.zeros((m, m)) # monitored, contingency
for c, branch in enumerate(branches):
if branch.active:
branch.active = False
pf = PowerFlowDriver(circuit, opt)
pf.run()
Pmat[:, c] = pf.results.Sbranch.real
branch.active = True
return Pmat
def check_lodf(grid: MultiCircuit):
flows_n1_nr = get_n_minus_1_flows(grid)
# assume 1 island
nc = compile_snapshot_circuit(grid)
islands = split_into_islands(nc)
circuit = islands[0]
pf_driver = PowerFlowDriver(grid, PowerFlowOptions())
pf_driver.run()
PTDF = compute_acptdf(Ybus=circuit.Ybus,
Yseries=circuit.Yseries,
Yf=circuit.Yf,
Yt=circuit.Yt,
Cf=circuit.C_branch_bus_f,
V=pf_driver.results.voltage,
pq=circuit.pq,
pv=circuit.pv,
distribute_slack=True)
LODF = make_lodf(circuit, PTDF)
Pbus = circuit.get_injections(False).real
flows_n = np.dot(PTDF, Pbus)
nl = circuit.nbr
flows_n1 = np.zeros((nl, nl))
for c in range(nl): # branch that fails (contingency)
# for m in range(nl): # branch to monitor
# flows_n1[m, c] = flows_n[m] + LODF[m, c] * flows_n[c]
flows_n1[:, c] = flows_n[:] + LODF[:, c] * flows_n[c]
return flows_n, flows_n1_nr, flows_n1
def test_ptdf(grid):
"""
Sigma-distances test
:param grid:
:return:
"""
nc = compile_snapshot_circuit(grid)
islands = split_into_islands(nc)
circuit = islands[0] # pick the first island
pf_driver = PowerFlowDriver(grid, PowerFlowOptions())
pf_driver.run()
PTDF = compute_acptdf(Ybus=circuit.Ybus,
Yseries=circuit.Yseries,
Yf=circuit.Yf,
Yt=circuit.Yt,
Cf=circuit.C_branch_bus_f,
V=pf_driver.results.voltage,
pq=circuit.pq,
pv=circuit.pv,
distribute_slack=False)
print('PTDF:')
print(PTDF)
if __name__ == '__main__':
from GridCal.Engine import FileOpen
import pandas as pd
np.set_printoptions(threshold=sys.maxsize, linewidth=200000000)
# np.set_printoptions(linewidth=2000, suppress=True)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/IEEE39_1W.gridcal'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/IEEE 14.xlsx'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/lynn5buspv.xlsx'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/IEEE 118.xlsx'
fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/1354 Pegase.xlsx'
# fname = 'helm_data1.gridcal'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/IEEE 14 PQ only.gridcal'
# fname = 'IEEE 14 PQ only full.gridcal'
# fname = '/home/santi/Descargas/matpower-fubm-master/data/case5.m'
# fname = '/home/santi/Descargas/matpower-fubm-master/data/case30.m'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/PGOC_6bus.gridcal'
grid_ = FileOpen(fname).open()
test_ptdf(grid_)
name = os.path.splitext(fname.split(os.sep)[-1])[0]
method = 'ACPTDF (No Jacobian, V=Vpf)'
nc_ = compile_snapshot_circuit(grid_)
islands_ = split_into_islands(nc_)
circuit_ = islands_[0]
pf_driver_ = PowerFlowDriver(grid_, PowerFlowOptions())
pf_driver_.run()
H_ = compute_acptdf(Ybus=circuit_.Ybus,
Yseries=circuit_.Yseries,
Yf=circuit_.Yf,
Yt=circuit_.Yt,
Cf=circuit_.C_branch_bus_f,
V=pf_driver_.results.voltage,
pq=circuit_.pq,
pv=circuit_.pv,
distribute_slack=False)
LODF_ = make_lodf(circuit_, H_)
if H_.shape[0] < 50:
print('PTDF:\n', H_)
print('LODF:\n', LODF_)
flows_n_, flows_n1_nr_, flows_n1_ = check_lodf(grid_)
# in the case of the grid PGOC_6bus
flows_multiple = multiple_failure(flows=flows_n_,
LODF=LODF_,
failed_idx=[1, 5]) # failed lines 2 and 6
Pn1_nr_df = pd.DataFrame(data=flows_n1_nr_, index=nc_.branch_names, columns=nc_.branch_names)
flows_n1_df =
|
pd.DataFrame(data=flows_n1_, index=nc_.branch_names, columns=nc_.branch_names)
|
pandas.DataFrame
|
# REQ 4.2.3.1 (REQ4) Chache TextRank information for future runs where we can compare against an existing sample set instead of regenerating the TextRank graph (this feature has been removed, instead textrank is just regenerated each time)
# REQ 4.2.3.2 (REQ5) Generate sentence summarization graphs of all MLAs using TextRank algorithm
# REQ 4.3.3.1 (REQ7) Get top N sentences from sentence summarization generated per MLA
import re
from storage_clients import MySqlClient, MinioClient
from preprocess.speech_parser import SpeechParser
from nltk.tokenize import sent_tokenize
from textrank import MLA, Session, Sentence, Summarizer
from pandas import DataFrame
from storage_clients import DbSchema
from pkgutil import get_data
import time
minio_client = MinioClient()
null_sentences = {sentence.strip() for sentence in str(get_data('data', 'sentences.txt').decode('utf-8')).split('\n')}
def run_textrank(mysql_client):
table = DbSchema.ranks
mysql_client.drop_table(table)
mysql_client.create_table(table)
i = 1
s = 0
startTime = time.clock()
for mla in load_data(mysql_client):
print(f'processing MLA {i} / 87: {mla.firstname} {mla.lastname}')
# loads information from minio to list of MLA classes
summarizer = Summarizer(mla.sentences)
save_to_sql(mla, table, mysql_client)
s += mla.numberOfSentences
i += 1
def load_data(mysql_client):
"""
generator for querying mlas, documents and loading
speech data from the minio instance.
this allows prefetching of metadata, while also
only querying the mlas underlying speech data at the
runtime for the summarizer.
"""
bucket = 'speeches'
mla_table = mysql_client.read_data("SELECT * FROM mlas")
documents = mysql_client.read_data("SELECT Id, DateCode FROM documents")
for index, row in mla_table.iterrows():
mla = MLA(row.FirstName, row.LastName, row.Caucus, row.Id)
# get sessions contained in files
files = minio_client.list_objects(
bucket, prefix=f'{mla.firstname}_{mla.lastname}', recursive=True)
for file in files:
date_code = file.object_name.split('/')[-1]
document_id = int(
documents.loc[documents['DateCode'] == date_code]['Id'])
session = Session(date_code, mla, document_id)
speeches_from_session = minio_client.get_object(
bucket, file.object_name).read().decode('utf-8')
for sent in sent_tokenize(speeches_from_session):
# Variables
# ---------------------------------------------------------------------------------------------
s = sent.strip()
if s not in null_sentences:
sentence = Sentence(s, session)
yield mla
def save_to_sql(mla, table, mysql_client):
summary_info = []
for session in mla.sessions:
for sentence in session.sentences:
summary_info.append({
'MLAId': mla.id,
'DocumentId': session.id,
'Sentence': str(sentence.text),
'MLARank': sentence.rank,
'Caucus': mla.caucus
})
df =
|
DataFrame(summary_info)
|
pandas.DataFrame
|
"""
Plotting of behavioral metrics during the full task (biased blocks) per lab
<NAME>
6 May 2020
"""
import seaborn as sns
import numpy as np
from os.path import join
import matplotlib.pyplot as plt
from scipy import stats
import scikit_posthocs as sp
from paper_behavior_functions import (figpath, seaborn_style, group_colors, institution_map,
FIGURE_WIDTH, FIGURE_HEIGHT, QUERY,
fit_psychfunc, dj2pandas, load_csv)
import pandas as pd
from statsmodels.stats.multitest import multipletests
# Initialize
seaborn_style()
figpath = figpath()
pal = group_colors()
institution_map, col_names = institution_map()
col_names = col_names[:-1]
# %% Process data
if QUERY is True:
# query sessions
from paper_behavior_functions import query_sessions_around_criterion
from ibl_pipeline import reference, subject, behavior
use_sessions, _ = query_sessions_around_criterion(criterion='ephys',
days_from_criterion=[2, 0],
force_cutoff=True)
session_keys = (use_sessions & 'task_protocol LIKE "%biased%"').fetch('KEY')
ses = ((use_sessions & 'task_protocol LIKE "%biased%"')
* subject.Subject * subject.SubjectLab * reference.Lab
* (behavior.TrialSet.Trial & session_keys))
ses = ses.proj('institution_short', 'subject_nickname', 'task_protocol', 'session_uuid',
'trial_stim_contrast_left', 'trial_stim_contrast_right',
'trial_response_choice', 'task_protocol', 'trial_stim_prob_left',
'trial_feedback_type', 'trial_response_time', 'trial_stim_on_time',
'session_end_time').fetch(
order_by='institution_short, subject_nickname,session_start_time, trial_id',
format='frame').reset_index()
behav = dj2pandas(ses)
behav['institution_code'] = behav.institution_short.map(institution_map)
else:
behav = load_csv('Fig4.csv')
biased_fits = pd.DataFrame()
for i, nickname in enumerate(behav['subject_nickname'].unique()):
if np.mod(i+1, 10) == 0:
print('Processing data of subject %d of %d' % (i+1,
len(behav['subject_nickname'].unique())))
# Get lab
lab = behav.loc[behav['subject_nickname'] == nickname, 'institution_code'].unique()[0]
# Fit psychometric curve
left_fit = fit_psychfunc(behav[(behav['subject_nickname'] == nickname)
& (behav['probabilityLeft'] == 80)])
right_fit = fit_psychfunc(behav[(behav['subject_nickname'] == nickname)
& (behav['probabilityLeft'] == 20)])
fits = pd.DataFrame(data={'threshold_l': left_fit['threshold'],
'threshold_r': right_fit['threshold'],
'bias_l': left_fit['bias'],
'bias_r': right_fit['bias'],
'lapselow_l': left_fit['lapselow'],
'lapselow_r': right_fit['lapselow'],
'lapsehigh_l': left_fit['lapsehigh'],
'lapsehigh_r': right_fit['lapsehigh'],
'nickname': nickname, 'lab': lab})
biased_fits = biased_fits.append(fits, sort=False)
# %% Statistics
stats_tests =
|
pd.DataFrame(columns=['variable', 'test_type', 'p_value'])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import pytest
import pandas as pd
from numpy import nan, float64
from jqfactor_analyzer.prepare import get_clean_factor_and_forward_returns
from jqfactor_analyzer.performance import (
factor_information_coefficient,
factor_autocorrelation,
mean_information_coefficient,
quantile_turnover,
factor_returns, factor_alpha_beta,
average_cumulative_return_by_quantile
)
from jqfactor_analyzer.utils import get_forward_returns_columns
dr = pd.date_range(start='2015-1-1', end='2015-1-2')
dr.name = 'date'
tickers = ['A', 'B', 'C', 'D']
factor = pd.DataFrame(index=dr,
columns=tickers,
data=[[1, 2, 3, 4],
[4, 3, 2, 1]]).stack()
factor.index = factor.index.set_names(['date', 'asset'])
factor.name = 'factor'
factor_data = pd.DataFrame()
factor_data['factor'] = factor
factor_data['group'] = pd.Series(index=factor.index,
data=[1, 1, 2, 2, 1, 1, 2, 2],)
factor_data['weights'] = pd.Series(range(8), index=factor.index,
dtype=float64) + 1
@pytest.mark.parametrize(
('factor_data', 'forward_returns', 'group_adjust',
'by_group', 'expected_ix', 'expected_ic_val'),
[(factor_data, [4, 3, 2, 1, 1, 2, 3, 4], False, False, dr, [-1., -1.]),
(factor_data, [1, 2, 3, 4, 4, 3, 2, 1], False, False, dr, [1., 1.]),
(factor_data, [1, 2, 3, 4, 4, 3, 2, 1], False, True,
pd.MultiIndex.from_product([dr, [1, 2]], names=['date', 'group']),
[1., 1., 1., 1.]),
(factor_data, [1, 2, 3, 4, 4, 3, 2, 1], True, True,
pd.MultiIndex.from_product([dr, [1, 2]], names=['date', 'group']),
[1., 1., 1., 1.])]
)
def test_information_coefficient(factor_data,
forward_returns,
group_adjust,
by_group,
expected_ix,
expected_ic_val):
factor_data = factor_data.copy()
factor_data['period_1'] = pd.Series(index=factor_data.index,
data=forward_returns)
ic = factor_information_coefficient(factor_data=factor_data,
group_adjust=group_adjust,
by_group=by_group)
expected_ic_df = pd.DataFrame(index=expected_ix,
columns=pd.Index(['period_1'], dtype='object'),
data=expected_ic_val)
pd.testing.assert_frame_equal(ic, expected_ic_df)
@pytest.mark.parametrize(
(
'factor_data', 'forward_returns', 'group_adjust',
'by_group', 'by_time', 'expected_ix', 'expected_ic_val'
), [
(factor_data, [4, 3, 2, 1, 1, 2, 3, 4], False, False, 'D',
dr, [-1., -1.]),
(factor_data, [1, 2, 3, 4, 4, 3, 2, 1], False, False, 'W',
pd.DatetimeIndex(['2015-01-04'], name='date', freq='W-SUN'), [1.]),
(factor_data, [1, 2, 3, 4, 4, 3, 2, 1], False, True, None,
pd.Int64Index([1, 2], name='group'), [1., 1.]),
(factor_data, [1, 2, 3, 4, 4, 3, 2, 1], False, True, 'W',
pd.MultiIndex.from_product(
[pd.DatetimeIndex(['2015-01-04'], name='date', freq='W-SUN'),
[1, 2]],
names=['date', 'group']
),
[1., 1.])
]
)
def test_mean_information_coefficient(factor_data,
forward_returns,
group_adjust,
by_group,
by_time,
expected_ix,
expected_ic_val):
factor_data = factor_data.copy()
factor_data['period_1'] = pd.Series(index=factor_data.index,
data=forward_returns)
ic = mean_information_coefficient(factor_data,
group_adjust=group_adjust,
by_group=by_group,
by_time=by_time)
expected_ic_df = pd.DataFrame(index=expected_ix,
columns=pd.Index(['period_1']),
data=expected_ic_val)
pd.testing.assert_frame_equal(ic, expected_ic_df,
check_index_type=False,
check_column_type=False)
@pytest.mark.parametrize(
('quantile_values', 'test_quantile', 'expected_vals'),
[([[1.0, 2.0, 3.0, 4.0],
[4.0, 3.0, 2.0, 1.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0]],
4.0,
[nan, 1.0, 1.0, 0.0]),
([[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0]],
3.0,
[nan, 0.0, 0.0, 0.0]),
([[1.0, 2.0, 3.0, 4.0],
[4.0, 3.0, 2.0, 1.0],
[1.0, 2.0, 3.0, 4.0],
[4.0, 3.0, 2.0, 1.0]],
2.0,
[nan, 1.0, 1.0, 1.0])]
)
def test_quantile_turnover(quantile_values, test_quantile,
expected_vals):
dr = pd.date_range(start='2015-1-1', end='2015-1-4')
dr.name = 'date'
tickers = ['A', 'B', 'C', 'D']
quantized_test_factor = pd.Series(
pd.DataFrame(index=dr, columns=tickers, data=quantile_values).stack()
)
quantized_test_factor.index = quantized_test_factor.index.set_names(
['date', 'asset']
)
to = quantile_turnover(quantized_test_factor, test_quantile)
expected = pd.Series(
index=quantized_test_factor.index.levels[0], data=expected_vals)
expected.name = test_quantile
pd.testing.assert_series_equal(to, expected)
@pytest.mark.parametrize(
('factor_data', 'factor_vals', 'fwd_return_vals',
'group_adjust', 'expected_vals'),
[(factor_data, [1, 2, 3, 4, 4, 3, 2, 1], [4, 3, 2, 1, 1, 2, 3, 4],
False, [-1.25000, -1.25000]),
(factor_data, [1, 1, 1, 1, 1, 1, 1, 1], [4, 3, 2, 1, 1, 2, 3, 4],
False, [0.0, 0.0]),
(factor_data, [1, 2, 3, 4, 4, 3, 2, 1], [4, 3, 2, 1, 1, 2, 3, 4],
True, [-0.5, -0.5]),
(factor_data, [1, 2, 3, 4, 1, 2, 3, 4], [1, 4, 1, 2, 1, 2, 2, 1],
True, [1.0, 0.0]),
(factor_data, [1, 1, 1, 1, 1, 1, 1, 1], [4, 3, 2, 1, 1, 2, 3, 4],
True, [0.0, 0.0])]
)
def test_factor_returns(factor_data,
factor_vals,
fwd_return_vals,
group_adjust,
expected_vals):
factor_data = factor_data.copy()
factor_data['period_1'] = fwd_return_vals
factor_data['factor'] = factor_vals
factor_returns_s = factor_returns(factor_data=factor_data,
demeaned=True,
group_adjust=group_adjust)
expected = pd.DataFrame(
index=dr,
data=expected_vals,
columns=get_forward_returns_columns(factor_data.columns)
)
pd.testing.assert_frame_equal(factor_returns_s, expected)
@pytest.mark.parametrize(
('factor_data', 'fwd_return_vals', 'alpha', 'beta'),
[(factor_data, [1, 2, 3, 4, 1, 1, 1, 1], -1, 5. / 6.)]
)
def test_factor_alpha_beta(factor_data, fwd_return_vals, alpha, beta):
factor_data = factor_data.copy()
factor_data['period_1'] = fwd_return_vals
ab = factor_alpha_beta(factor_data=factor_data)
expected = pd.DataFrame(columns=['period_1'],
index=['Ann. alpha', 'beta'],
data=[alpha, beta])
pd.testing.assert_frame_equal(ab, expected)
@pytest.mark.parametrize(
('factor_values', 'end_date', 'period', 'expected_vals'),
[([[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0]],
'2015-1-4', 1,
[nan, 1.0, 1.0, 1.0]),
([[4.0, 3.0, 2.0, 1.0],
[1.0, 2.0, 3.0, 4.0],
[4.0, 3.0, 2.0, 1.0],
[1.0, 2.0, 3.0, 4.0]],
'2015-1-4', 1,
[nan, -1.0, -1.0, -1.0]),
([[1.0, 2.0, 3.0, 4.0],
[2.0, 1.0, 4.0, 3.0],
[4.0, 3.0, 2.0, 1.0],
[1.0, 2.0, 3.0, 4.0],
[2.0, 1.0, 4.0, 3.0],
[4.0, 3.0, 2.0, 1.0],
[2.0, 1.0, 4.0, 3.0],
[4.0, 3.0, 2.0, 1.0],
[1.0, 2.0, 3.0, 4.0],
[2.0, 1.0, 4.0, 3.0],
[2.0, 1.0, 4.0, 3.0],
[4.0, 3.0, 2.0, 1.0]],
'2015-1-12', 3,
[nan, nan, nan, 1.0, 1.0, 1.0, 0.6, -0.6, -1.0, 1.0, -0.6, -1.0])]
)
def test_factor_autocorrelation(factor_values,
end_date,
period,
expected_vals):
dr = pd.date_range(start='2015-1-1', end=end_date)
dr.name = 'date'
tickers = ['A', 'B', 'C', 'D']
factor = pd.DataFrame(index=dr,
columns=tickers,
data=factor_values).stack()
factor.index = factor.index.set_names(['date', 'asset'])
factor_df = pd.DataFrame()
factor_df['factor'] = factor
fa = factor_autocorrelation(factor_df, period)
expected = pd.Series(index=dr, data=expected_vals)
expected.name = period
pd.testing.assert_series_equal(fa, expected)
@pytest.mark.parametrize(
('before', 'after', 'demeaned', 'quantiles', 'expected_vals'),
[(1, 2, False, 4,
[[1.00, 0.0, -0.50, -0.75],
[0.0, 0.0, 0.0, 0.0],
[0.00, 0.00, 0.00, 0.00],
[0.0, 0.0, 0.0, 0.0],
[-0.20, 0.0, 0.25, 0.5625],
[0.0, 0.0, 0.0, 0.0],
[-0.3333333, 0.0, 0.50, 1.25],
[0.0, 0.0, 0.0, 0.0]]),
(1, 2, True, 4,
[[0.8833333, 0.0, -0.5625, -1.015625],
[0.0, 0.0, 0.0, 0.0],
[-0.1166667, 0.0, -0.0625, -0.265625],
[0.0, 0.0, 0.0, 0.0],
[-0.3166667, 0.0, 0.1875, 0.296875],
[0.0, 0.0, 0.0, 0.0],
[-0.4500000, 0.0, 0.4375, 0.984375],
[0.0, 0.0, 0.0, 0.0]]),
(3, 0, False, 4,
[[7.0, 3.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[-0.488, -0.36, -0.2, 0.0],
[0.0, 0.0, 0.0, 0.0],
[-0.703704, -0.55555555, -0.333333333, 0.0],
[0.0, 0.0, 0.0, 0.0]]),
(0, 3, True, 4,
[[0.0, -0.5625, -1.015625, -1.488281],
[0.0, 0.0, 0.0, 0.0],
[0.0, -0.0625, -0.265625, -0.613281],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.1875, 0.296875, 0.339844],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.4375, 0.984375, 1.761719],
[0.0, 0.0, 0.0, 0.0]]),
(3, 3, False, 2,
[[3.5, 1.5, 0.5, 0.0, -0.25, -0.375, -0.4375],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.595852, -0.457778, -0.266667, 0.0, 0.375, 0.90625, 1.664062],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]),
(3, 3, True, 2,
[[2.047926, 0.978888, 0.383333, 0.0, -0.3125, -0.640625, -1.050781],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-2.047926, -0.978888, -0.383333, 0.0, 0.3125, 0.640625, 1.050781],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])]
)
def test_average_cumulative_return_by_quantile(before, after,
demeaned, quantiles,
expected_vals):
dr =
|
pd.date_range(start='2015-1-15', end='2015-2-1')
|
pandas.date_range
|
"""
Copyright 2020 Google LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
'''
Reads in articles from the New York Times API and saves
them to a cache.
'''
import time
import os
import datetime
import argparse
# News archive api
from nytimesarticle import articleAPI
import pandas as pd
import requests
from dateutil.rrule import rrule, MONTHLY
NYT_KEY = open('nyt_key.txt').read().strip()
api = articleAPI(NYT_KEY)
def parse_articles(articles):
'''
This function takes in a response to the NYT api and parses
the articles into a list of dictionaries
'''
news = []
for i in articles['response']['docs']:
if 'abstract' not in i.keys():
continue
if 'headline' not in i.keys():
continue
if 'news_desk' not in i.keys():
continue
if 'pub_date' not in i.keys():
continue
if 'snippet' not in i.keys():
continue
dic = {}
dic['id'] = i['_id']
if i.get('abstract', 'EMPTY') is not None:
dic['abstract'] = i.get('abstract', 'EMPTY').encode("utf8")
dic['headline'] = i['headline']['main'].encode("utf8")
dic['desk'] = i.get('news_desk', 'EMPTY')
if len(i['pub_date']) < 20:
continue
dic['date'] = i['pub_date'][0:10] # cutting time of day.
dic['time'] = i['pub_date'][11:19]
dic['section'] = i.get('section_name', 'EMPTY')
if i['snippet'] is not None:
dic['snippet'] = i['snippet'].encode("utf8")
dic['source'] = i.get('source', 'EMPTY')
dic['type'] = i.get('type_of_material', 'EMPTY')
dic['word_count'] = i.get('type_of_material', 0)
news.append(dic)
return pd.DataFrame(news)
def day_interval(days_back):
today = datetime.datetime.today()
that_day = today - datetime.timedelta(days=days_back)
day_ago = that_day - datetime.timedelta(days=1)
return (int(that_day.strftime('%Y%m%d')), int(day_ago.strftime('%Y%m%d')))
def bulk_look_up(start_year):
# create a list of year, month, pairs for the data
# from start dt to end date inclusive
# Source of API data: https://developer.nytimes.com/docs/archive-product/1/overview
start_dt = datetime.date(start_year, 1, 1)
end_dt = datetime.datetime.today()
dates = [(dt.year, dt.month) for dt in rrule(MONTHLY, dtstart=start_dt, until=end_dt)]
wait = 20
dfs = []
for year, month in dates:
found_df = False
for i in range(20):
try:
url = (
"https://api.nytimes.com/svc/archive/v1/{year}/{month}.json?&api-key={key}"
.format(year=year, month=month, key=NYT_KEY)
)
r = requests.get(url)
df = parse_articles(r.json())
found_df = True
break
except:
print(f'Error when getting articles, trying again in {wait} seconds...')
continue
if not found_df:
continue
print('Got {} articles for {}/{}'.format(df.shape[0], month, year))
dfs.append(df)
print(f'Waiting {wait} seconds for next request...')
time.sleep(20)
return
|
pd.concat(dfs, ignore_index=True)
|
pandas.concat
|
import pytest
from pandas._libs.tslibs.frequencies import INVALID_FREQ_ERR_MSG, _period_code_map
from pandas.errors import OutOfBoundsDatetime
from pandas import Period, Timestamp, offsets
class TestFreqConversion:
"""Test frequency conversion of date objects"""
@pytest.mark.parametrize("freq", ["A", "Q", "M", "W", "B", "D"])
def test_asfreq_near_zero(self, freq):
# GH#19643, GH#19650
per = Period("0001-01-01", freq=freq)
tup1 = (per.year, per.hour, per.day)
prev = per - 1
assert prev.ordinal == per.ordinal - 1
tup2 = (prev.year, prev.month, prev.day)
assert tup2 < tup1
def test_asfreq_near_zero_weekly(self):
# GH#19834
per1 = Period("0001-01-01", "D") + 6
per2 = Period("0001-01-01", "D") - 6
week1 = per1.asfreq("W")
week2 = per2.asfreq("W")
assert week1 != week2
assert week1.asfreq("D", "E") >= per1
assert week2.asfreq("D", "S") <= per2
def test_to_timestamp_out_of_bounds(self):
# GH#19643, used to incorrectly give Timestamp in 1754
per = Period("0001-01-01", freq="B")
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(OutOfBoundsDatetime, match=msg):
per.to_timestamp()
def test_asfreq_corner(self):
val = Period(freq="A", year=2007)
result1 = val.asfreq("5t")
result2 = val.asfreq("t")
expected = Period("2007-12-31 23:59", freq="t")
assert result1.ordinal == expected.ordinal
assert result1.freqstr == "5T"
assert result2.ordinal == expected.ordinal
assert result2.freqstr == "T"
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq="A", year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq="Q", year=2007, quarter=1)
ival_A_to_Q_end = Period(freq="Q", year=2007, quarter=4)
ival_A_to_M_start = Period(freq="M", year=2007, month=1)
ival_A_to_M_end = Period(freq="M", year=2007, month=12)
ival_A_to_W_start = Period(freq="W", year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq="W", year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq="B", year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq="D", year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_A_to_H_end = Period(freq="H", year=2007, month=12, day=31, hour=23)
ival_A_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_A_to_T_end = Period(
freq="Min", year=2007, month=12, day=31, hour=23, minute=59
)
ival_A_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_A_to_S_end = Period(
freq="S", year=2007, month=12, day=31, hour=23, minute=59, second=59
)
ival_AJAN_to_D_end = Period(freq="D", year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq="D", year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq="D", year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq="D", year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq="D", year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq="D", year=2006, month=12, day=1)
assert ival_A.asfreq("Q", "S") == ival_A_to_Q_start
assert ival_A.asfreq("Q", "e") == ival_A_to_Q_end
assert ival_A.asfreq("M", "s") == ival_A_to_M_start
assert ival_A.asfreq("M", "E") == ival_A_to_M_end
assert ival_A.asfreq("W", "S") == ival_A_to_W_start
assert ival_A.asfreq("W", "E") == ival_A_to_W_end
assert ival_A.asfreq("B", "S") == ival_A_to_B_start
assert ival_A.asfreq("B", "E") == ival_A_to_B_end
assert ival_A.asfreq("D", "S") == ival_A_to_D_start
assert ival_A.asfreq("D", "E") == ival_A_to_D_end
assert ival_A.asfreq("H", "S") == ival_A_to_H_start
assert ival_A.asfreq("H", "E") == ival_A_to_H_end
assert ival_A.asfreq("min", "S") == ival_A_to_T_start
assert ival_A.asfreq("min", "E") == ival_A_to_T_end
assert ival_A.asfreq("T", "S") == ival_A_to_T_start
assert ival_A.asfreq("T", "E") == ival_A_to_T_end
assert ival_A.asfreq("S", "S") == ival_A_to_S_start
assert ival_A.asfreq("S", "E") == ival_A_to_S_end
assert ival_AJAN.asfreq("D", "S") == ival_AJAN_to_D_start
assert ival_AJAN.asfreq("D", "E") == ival_AJAN_to_D_end
assert ival_AJUN.asfreq("D", "S") == ival_AJUN_to_D_start
assert ival_AJUN.asfreq("D", "E") == ival_AJUN_to_D_end
assert ival_ANOV.asfreq("D", "S") == ival_ANOV_to_D_start
assert ival_ANOV.asfreq("D", "E") == ival_ANOV_to_D_end
assert ival_A.asfreq("A") == ival_A
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq="Q", year=2007, quarter=1)
ival_Q_end_of_year = Period(freq="Q", year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq="A", year=2007)
ival_Q_to_M_start = Period(freq="M", year=2007, month=1)
ival_Q_to_M_end = Period(freq="M", year=2007, month=3)
ival_Q_to_W_start = Period(freq="W", year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq="W", year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq="B", year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq="D", year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_Q_to_H_end = Period(freq="H", year=2007, month=3, day=31, hour=23)
ival_Q_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_Q_to_T_end = Period(
freq="Min", year=2007, month=3, day=31, hour=23, minute=59
)
ival_Q_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_Q_to_S_end = Period(
freq="S", year=2007, month=3, day=31, hour=23, minute=59, second=59
)
ival_QEJAN_to_D_start = Period(freq="D", year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq="D", year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq="D", year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq="D", year=2006, month=9, day=30)
assert ival_Q.asfreq("A") == ival_Q_to_A
assert ival_Q_end_of_year.asfreq("A") == ival_Q_to_A
assert ival_Q.asfreq("M", "S") == ival_Q_to_M_start
assert ival_Q.asfreq("M", "E") == ival_Q_to_M_end
assert ival_Q.asfreq("W", "S") == ival_Q_to_W_start
assert ival_Q.asfreq("W", "E") == ival_Q_to_W_end
assert ival_Q.asfreq("B", "S") == ival_Q_to_B_start
assert ival_Q.asfreq("B", "E") == ival_Q_to_B_end
assert ival_Q.asfreq("D", "S") == ival_Q_to_D_start
assert ival_Q.asfreq("D", "E") == ival_Q_to_D_end
assert ival_Q.asfreq("H", "S") == ival_Q_to_H_start
assert ival_Q.asfreq("H", "E") == ival_Q_to_H_end
assert ival_Q.asfreq("Min", "S") == ival_Q_to_T_start
assert ival_Q.asfreq("Min", "E") == ival_Q_to_T_end
assert ival_Q.asfreq("S", "S") == ival_Q_to_S_start
assert ival_Q.asfreq("S", "E") == ival_Q_to_S_end
assert ival_QEJAN.asfreq("D", "S") == ival_QEJAN_to_D_start
assert ival_QEJAN.asfreq("D", "E") == ival_QEJAN_to_D_end
assert ival_QEJUN.asfreq("D", "S") == ival_QEJUN_to_D_start
assert ival_QEJUN.asfreq("D", "E") == ival_QEJUN_to_D_end
assert ival_Q.asfreq("Q") == ival_Q
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq="M", year=2007, month=1)
ival_M_end_of_year = Period(freq="M", year=2007, month=12)
ival_M_end_of_quarter = Period(freq="M", year=2007, month=3)
ival_M_to_A = Period(freq="A", year=2007)
ival_M_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_M_to_W_start = Period(freq="W", year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq="W", year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq="B", year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq="D", year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_M_to_H_end = Period(freq="H", year=2007, month=1, day=31, hour=23)
ival_M_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_M_to_T_end = Period(
freq="Min", year=2007, month=1, day=31, hour=23, minute=59
)
ival_M_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_M_to_S_end = Period(
freq="S", year=2007, month=1, day=31, hour=23, minute=59, second=59
)
assert ival_M.asfreq("A") == ival_M_to_A
assert ival_M_end_of_year.asfreq("A") == ival_M_to_A
assert ival_M.asfreq("Q") == ival_M_to_Q
assert ival_M_end_of_quarter.asfreq("Q") == ival_M_to_Q
assert ival_M.asfreq("W", "S") == ival_M_to_W_start
assert ival_M.asfreq("W", "E") == ival_M_to_W_end
assert ival_M.asfreq("B", "S") == ival_M_to_B_start
assert ival_M.asfreq("B", "E") == ival_M_to_B_end
assert ival_M.asfreq("D", "S") == ival_M_to_D_start
assert ival_M.asfreq("D", "E") == ival_M_to_D_end
assert ival_M.asfreq("H", "S") == ival_M_to_H_start
assert ival_M.asfreq("H", "E") == ival_M_to_H_end
assert ival_M.asfreq("Min", "S") == ival_M_to_T_start
assert ival_M.asfreq("Min", "E") == ival_M_to_T_end
assert ival_M.asfreq("S", "S") == ival_M_to_S_start
assert ival_M.asfreq("S", "E") == ival_M_to_S_end
assert ival_M.asfreq("M") == ival_M
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq="W", year=2007, month=1, day=1)
ival_WSUN = Period(freq="W", year=2007, month=1, day=7)
ival_WSAT = Period(freq="W-SAT", year=2007, month=1, day=6)
ival_WFRI = Period(freq="W-FRI", year=2007, month=1, day=5)
ival_WTHU = Period(freq="W-THU", year=2007, month=1, day=4)
ival_WWED = Period(freq="W-WED", year=2007, month=1, day=3)
ival_WTUE = Period(freq="W-TUE", year=2007, month=1, day=2)
ival_WMON = Period(freq="W-MON", year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq="D", year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq="D", year=2006, month=12, day=31)
ival_WSAT_to_D_end = Period(freq="D", year=2007, month=1, day=6)
ival_WFRI_to_D_start = Period(freq="D", year=2006, month=12, day=30)
ival_WFRI_to_D_end = Period(freq="D", year=2007, month=1, day=5)
ival_WTHU_to_D_start = Period(freq="D", year=2006, month=12, day=29)
ival_WTHU_to_D_end = Period(freq="D", year=2007, month=1, day=4)
ival_WWED_to_D_start = Period(freq="D", year=2006, month=12, day=28)
ival_WWED_to_D_end = Period(freq="D", year=2007, month=1, day=3)
ival_WTUE_to_D_start = Period(freq="D", year=2006, month=12, day=27)
ival_WTUE_to_D_end = Period(freq="D", year=2007, month=1, day=2)
ival_WMON_to_D_start = Period(freq="D", year=2006, month=12, day=26)
ival_WMON_to_D_end = Period(freq="D", year=2007, month=1, day=1)
ival_W_end_of_year = Period(freq="W", year=2007, month=12, day=31)
ival_W_end_of_quarter = Period(freq="W", year=2007, month=3, day=31)
ival_W_end_of_month = Period(freq="W", year=2007, month=1, day=31)
ival_W_to_A = Period(freq="A", year=2007)
ival_W_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_W_to_M = Period(freq="M", year=2007, month=1)
if Period(freq="D", year=2007, month=12, day=31).weekday == 6:
ival_W_to_A_end_of_year = Period(freq="A", year=2007)
else:
ival_W_to_A_end_of_year = Period(freq="A", year=2008)
if Period(freq="D", year=2007, month=3, day=31).weekday == 6:
ival_W_to_Q_end_of_quarter = Period(freq="Q", year=2007, quarter=1)
else:
ival_W_to_Q_end_of_quarter = Period(freq="Q", year=2007, quarter=2)
if Period(freq="D", year=2007, month=1, day=31).weekday == 6:
ival_W_to_M_end_of_month = Period(freq="M", year=2007, month=1)
else:
ival_W_to_M_end_of_month = Period(freq="M", year=2007, month=2)
ival_W_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_W_to_B_end = Period(freq="B", year=2007, month=1, day=5)
ival_W_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_W_to_D_end = Period(freq="D", year=2007, month=1, day=7)
ival_W_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_W_to_H_end = Period(freq="H", year=2007, month=1, day=7, hour=23)
ival_W_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_W_to_T_end = Period(
freq="Min", year=2007, month=1, day=7, hour=23, minute=59
)
ival_W_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_W_to_S_end = Period(
freq="S", year=2007, month=1, day=7, hour=23, minute=59, second=59
)
assert ival_W.asfreq("A") == ival_W_to_A
assert ival_W_end_of_year.asfreq("A") == ival_W_to_A_end_of_year
assert ival_W.asfreq("Q") == ival_W_to_Q
assert ival_W_end_of_quarter.asfreq("Q") == ival_W_to_Q_end_of_quarter
assert ival_W.asfreq("M") == ival_W_to_M
assert ival_W_end_of_month.asfreq("M") == ival_W_to_M_end_of_month
assert ival_W.asfreq("B", "S") == ival_W_to_B_start
assert ival_W.asfreq("B", "E") == ival_W_to_B_end
assert ival_W.asfreq("D", "S") == ival_W_to_D_start
assert ival_W.asfreq("D", "E") == ival_W_to_D_end
assert ival_WSUN.asfreq("D", "S") == ival_WSUN_to_D_start
assert ival_WSUN.asfreq("D", "E") == ival_WSUN_to_D_end
assert ival_WSAT.asfreq("D", "S") == ival_WSAT_to_D_start
assert ival_WSAT.asfreq("D", "E") == ival_WSAT_to_D_end
assert ival_WFRI.asfreq("D", "S") == ival_WFRI_to_D_start
assert ival_WFRI.asfreq("D", "E") == ival_WFRI_to_D_end
assert ival_WTHU.asfreq("D", "S") == ival_WTHU_to_D_start
assert ival_WTHU.asfreq("D", "E") == ival_WTHU_to_D_end
assert ival_WWED.asfreq("D", "S") == ival_WWED_to_D_start
assert ival_WWED.asfreq("D", "E") == ival_WWED_to_D_end
assert ival_WTUE.asfreq("D", "S") == ival_WTUE_to_D_start
assert ival_WTUE.asfreq("D", "E") == ival_WTUE_to_D_end
assert ival_WMON.asfreq("D", "S") == ival_WMON_to_D_start
assert ival_WMON.asfreq("D", "E") == ival_WMON_to_D_end
assert ival_W.asfreq("H", "S") == ival_W_to_H_start
assert ival_W.asfreq("H", "E") == ival_W_to_H_end
assert ival_W.asfreq("Min", "S") == ival_W_to_T_start
assert ival_W.asfreq("Min", "E") == ival_W_to_T_end
assert ival_W.asfreq("S", "S") == ival_W_to_S_start
assert ival_W.asfreq("S", "E") == ival_W_to_S_end
assert ival_W.asfreq("W") == ival_W
msg = INVALID_FREQ_ERR_MSG
with pytest.raises(ValueError, match=msg):
ival_W.asfreq("WK")
def test_conv_weekly_legacy(self):
# frequency conversion tests: from Weekly Frequency
msg = INVALID_FREQ_ERR_MSG
with pytest.raises(ValueError, match=msg):
Period(freq="WK", year=2007, month=1, day=1)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-SAT", year=2007, month=1, day=6)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-FRI", year=2007, month=1, day=5)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-THU", year=2007, month=1, day=4)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-WED", year=2007, month=1, day=3)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-TUE", year=2007, month=1, day=2)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-MON", year=2007, month=1, day=1)
def test_conv_business(self):
# frequency conversion tests: from Business Frequency"
ival_B = Period(freq="B", year=2007, month=1, day=1)
ival_B_end_of_year = Period(freq="B", year=2007, month=12, day=31)
ival_B_end_of_quarter = Period(freq="B", year=2007, month=3, day=30)
ival_B_end_of_month = Period(freq="B", year=2007, month=1, day=31)
ival_B_end_of_week = Period(freq="B", year=2007, month=1, day=5)
ival_B_to_A = Period(freq="A", year=2007)
ival_B_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_B_to_M = Period(freq="M", year=2007, month=1)
ival_B_to_W = Period(freq="W", year=2007, month=1, day=7)
ival_B_to_D = Period(freq="D", year=2007, month=1, day=1)
ival_B_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_B_to_H_end = Period(freq="H", year=2007, month=1, day=1, hour=23)
ival_B_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_B_to_T_end = Period(
freq="Min", year=2007, month=1, day=1, hour=23, minute=59
)
ival_B_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_B_to_S_end = Period(
freq="S", year=2007, month=1, day=1, hour=23, minute=59, second=59
)
assert ival_B.asfreq("A") == ival_B_to_A
assert ival_B_end_of_year.asfreq("A") == ival_B_to_A
assert ival_B.asfreq("Q") == ival_B_to_Q
assert ival_B_end_of_quarter.asfreq("Q") == ival_B_to_Q
assert ival_B.asfreq("M") == ival_B_to_M
assert ival_B_end_of_month.asfreq("M") == ival_B_to_M
assert ival_B.asfreq("W") == ival_B_to_W
assert ival_B_end_of_week.asfreq("W") == ival_B_to_W
assert ival_B.asfreq("D") == ival_B_to_D
assert ival_B.asfreq("H", "S") == ival_B_to_H_start
assert ival_B.asfreq("H", "E") == ival_B_to_H_end
assert ival_B.asfreq("Min", "S") == ival_B_to_T_start
assert ival_B.asfreq("Min", "E") == ival_B_to_T_end
assert ival_B.asfreq("S", "S") == ival_B_to_S_start
assert ival_B.asfreq("S", "E") == ival_B_to_S_end
assert ival_B.asfreq("B") == ival_B
def test_conv_daily(self):
# frequency conversion tests: from Business Frequency"
ival_D = Period(freq="D", year=2007, month=1, day=1)
ival_D_end_of_year = Period(freq="D", year=2007, month=12, day=31)
ival_D_end_of_quarter = Period(freq="D", year=2007, month=3, day=31)
ival_D_end_of_month = Period(freq="D", year=2007, month=1, day=31)
ival_D_end_of_week = Period(freq="D", year=2007, month=1, day=7)
ival_D_friday = Period(freq="D", year=2007, month=1, day=5)
ival_D_saturday = Period(freq="D", year=2007, month=1, day=6)
ival_D_sunday = Period(freq="D", year=2007, month=1, day=7)
# TODO: unused?
# ival_D_monday = Period(freq='D', year=2007, month=1, day=8)
ival_B_friday = Period(freq="B", year=2007, month=1, day=5)
ival_B_monday = Period(freq="B", year=2007, month=1, day=8)
ival_D_to_A = Period(freq="A", year=2007)
ival_Deoq_to_AJAN = Period(freq="A-JAN", year=2008)
ival_Deoq_to_AJUN = Period(freq="A-JUN", year=2007)
ival_Deoq_to_ADEC = Period(freq="A-DEC", year=2007)
ival_D_to_QEJAN = Period(freq="Q-JAN", year=2007, quarter=4)
ival_D_to_QEJUN = Period(freq="Q-JUN", year=2007, quarter=3)
ival_D_to_QEDEC = Period(freq="Q-DEC", year=2007, quarter=1)
ival_D_to_M = Period(freq="M", year=2007, month=1)
ival_D_to_W = Period(freq="W", year=2007, month=1, day=7)
ival_D_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_D_to_H_end = Period(freq="H", year=2007, month=1, day=1, hour=23)
ival_D_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_D_to_T_end = Period(
freq="Min", year=2007, month=1, day=1, hour=23, minute=59
)
ival_D_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_D_to_S_end = Period(
freq="S", year=2007, month=1, day=1, hour=23, minute=59, second=59
)
assert ival_D.asfreq("A") == ival_D_to_A
assert ival_D_end_of_quarter.asfreq("A-JAN") == ival_Deoq_to_AJAN
assert ival_D_end_of_quarter.asfreq("A-JUN") == ival_Deoq_to_AJUN
assert ival_D_end_of_quarter.asfreq("A-DEC") == ival_Deoq_to_ADEC
assert ival_D_end_of_year.asfreq("A") == ival_D_to_A
assert ival_D_end_of_quarter.asfreq("Q") == ival_D_to_QEDEC
assert ival_D.asfreq("Q-JAN") == ival_D_to_QEJAN
assert ival_D.asfreq("Q-JUN") == ival_D_to_QEJUN
assert ival_D.asfreq("Q-DEC") == ival_D_to_QEDEC
assert ival_D.asfreq("M") == ival_D_to_M
assert ival_D_end_of_month.asfreq("M") == ival_D_to_M
assert ival_D.asfreq("W") == ival_D_to_W
assert ival_D_end_of_week.asfreq("W") == ival_D_to_W
assert ival_D_friday.asfreq("B") == ival_B_friday
assert ival_D_saturday.asfreq("B", "S") == ival_B_friday
assert ival_D_saturday.asfreq("B", "E") == ival_B_monday
assert ival_D_sunday.asfreq("B", "S") == ival_B_friday
assert ival_D_sunday.asfreq("B", "E") == ival_B_monday
assert ival_D.asfreq("H", "S") == ival_D_to_H_start
assert ival_D.asfreq("H", "E") == ival_D_to_H_end
assert ival_D.asfreq("Min", "S") == ival_D_to_T_start
assert ival_D.asfreq("Min", "E") == ival_D_to_T_end
assert ival_D.asfreq("S", "S") == ival_D_to_S_start
assert ival_D.asfreq("S", "E") == ival_D_to_S_end
assert ival_D.asfreq("D") == ival_D
def test_conv_hourly(self):
# frequency conversion tests: from Hourly Frequency"
ival_H = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_H_end_of_year = Period(freq="H", year=2007, month=12, day=31, hour=23)
ival_H_end_of_quarter = Period(freq="H", year=2007, month=3, day=31, hour=23)
ival_H_end_of_month = Period(freq="H", year=2007, month=1, day=31, hour=23)
ival_H_end_of_week = Period(freq="H", year=2007, month=1, day=7, hour=23)
ival_H_end_of_day =
|
Period(freq="H", year=2007, month=1, day=1, hour=23)
|
pandas.Period
|
import argparse
import csv
import glob
import itertools
import json
import multiprocessing as mp
import os
import re
import datetime
import subprocess
import sys
import warnings
from functools import partial
from operator import itemgetter
import cxxfilt
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
from sqlalchemy import create_engine
from sofa_config import *
from sofa_ml import hsg_v1, hsg_v2, swarms_to_sofatrace
from sofa_models import SOFATrace
from sofa_print import *
import random
from DDS.sofa_ds_preprocess import ds_dds_preprocess
sofa_fieldnames = [
"timestamp", # 0
"event", # 1
"duration", # 2
"deviceId", # 3
"copyKind", # 4
"payload", # 5
"bandwidth", # 6
"pkt_src", # 7
"pkt_dst", # 8
"pid", # 9
"tid", # 10
"name", # 11
"category"] # 12
def random_generate_color():
rand = lambda: random.randint(0, 255)
return '#%02X%02X%02X' % ( 200, 200, rand())
def list_downsample(list_in, plot_ratio):
new_list = []
for i in range(len(list_in)):
if i % plot_ratio == 0:
# print("%d"%(i))
new_list.append(list_in[i])
return new_list
def trace_init():
t_begin = 0
deviceId = 0
metric = 0
event = -1
copyKind = -1
payload = -1
bandwidth = -1
pkt_src = pkt_dst = -1
pid = tid = -1
name = ''
category = 0
trace = [
t_begin,
event,
metric,
deviceId,
copyKind,
payload,
bandwidth,
pkt_src,
pkt_dst,
pid,
tid,
name,
category]
return trace
def list_to_csv_and_traces(logdir, _list, csvfile, _mode):
traces = []
if len(_list[1:]) > 0:
traces = pd.DataFrame(_list[1:])
traces.columns = sofa_fieldnames
_header = True if _mode == 'w' else False
traces.to_csv(logdir +
csvfile,
mode=_mode,
header=_header,
index=False,
float_format='%.6f')
else:
print_warning('Empty list cannot be exported to %s!' % csvfile)
return traces
# 0/0 [004] 96050.733788: 1 bus-cycles: ffffffff8106315a native_write_msr_safe
# 0/0 [004] 96050.733788: 7 cycles: ffffffff8106315a native_write_msr_safe
# 359342/359342 2493492.850125: 1 bus-cycles: ffffffff8106450a native_write_msr_safe
# 359342/359342 2493492.850128: 1 cycles: ffffffff8106450a
# native_write_msr_safe
def cpu_trace_read(sample, cfg, t_offset, cpu_mhz_xp, cpu_mhz_fp):
fields = sample.split()
event = event_raw = 0
counts = 0
if re.match('\[\d+\]', fields[1]) is not None:
time = float(fields[2].split(':')[0])
func_name = '[%s]'%fields[4].replace('-','_') + fields[6] + fields[7]
counts = float(fields[3])
event_raw = 1.0 * int("0x01" + fields[5], 16)
else:
time = float(fields[1].split(':')[0])
func_name = '[%s]'%fields[3].replace('-','_') + fields[5] + fields[6]
counts = float(fields[2])
event_raw = 1.0 * int("0x01" + fields[4], 16)
if not cfg.absolute_timestamp:
time = time - cfg.time_base
t_begin = time + t_offset
t_end = time + t_offset
if len(cpu_mhz_xp) > 1:
duration = counts/(np.interp(t_begin, cpu_mhz_xp, cpu_mhz_fp)*1e6)
else:
duration = counts/(3000.0*1e6)
event = np.log10(event_raw)
if cfg.perf_events.find('cycles') == -1:
duration = np.log2(event_raw/1e14)
trace = [t_begin, # 0
event, # % 1000000 # 1
duration, # 2
-1, # 3
-1, # 4
0, # 5
0, # 6
-1, # 7
-1, # 8
int(fields[0].split('/')[0]), # 9
int(fields[0].split('/')[1]), # 10
func_name, # 11
0] # 12
return trace
def net_trace_read(packet, cfg, t_offset):
#21234 1562233011.469681 IP 192.168.88.88.56828 > 172.16.31.10.5400: UDP, length 851
#21235 1562233011.471352 IP 10.57.185.172.8554 > 192.168.88.88.53528: tcp 0
fields = packet.split()
time = float(fields[0])
if not cfg.absolute_timestamp:
time = time - cfg.time_base
t_begin = time + t_offset
t_end = time + t_offset
if fields[1] != 'IP':
return []
protocol = fields[5]
if protocol == 'UDP,':
payload = int(fields[7])
elif protocol == 'tcp':
payload = int(fields[6])
else:
return []
duration = float(payload / 128.0e6)
bandwidth = 128.0e6
pkt_src = 0
pkt_dst = 0
for i in range(4):
pkt_src = pkt_src + \
int(fields[2].split('.')[i]) * np.power(1000, 3 - i)
pkt_dst = pkt_dst + \
int(fields[4].split('.')[i]) * np.power(1000, 3 - i)
trace = [ t_begin,
payload * 100 + 17,
duration,
-1,
-1,
payload,
bandwidth,
pkt_src,
pkt_dst,
-1,
-1,
"network:%s:%d_to_%d_with_%d" % (protocol, pkt_src, pkt_dst, payload),
0
]
return trace
def cuda_api_trace_read(
record,
cfg,
indices,
n_cudaproc,
ts_rescale,
dt_rescale,
payload_unit,
t_offset):
values = record.replace('"', '').split(',')
api_name = '[CUDA_API]' + values[indices.index('Name')]
# print("kernel name = %s" % kernel_name)
time = float(values[indices.index('Start')]) / ts_rescale + t_offset
if not cfg.absolute_timestamp:
time = time - cfg.time_base
duration = float(values[indices.index('Duration')]) / dt_rescale
t_begin = time
t_end = time + duration
payload = 0
bandwidth = 0
pid = n_cudaproc
deviceId = -1
tid = stream_id = -1
pkt_src = pkt_dst = copyKind = 0
# print("%d:%d [%s] ck:%d, %lf,%lf: %d -> %d: payload:%d, bandwidth:%lf,
# duration:%lf "%(deviceId, streamId, kernel_name, copyKind,
# t_begin,t_end, pkt_src, pkt_dst, payload, bandwidth, duration))
trace = [t_begin,
payload * 100 + 17,
duration,
deviceId,
copyKind,
payload,
bandwidth,
pkt_src,
pkt_dst,
pid,
tid,
api_name,
0]
return trace
def gpu_trace_read(
record,
cfg,
indices,
n_cudaproc,
ts_rescale,
dt_rescale,
payload_unit,
t_offset):
values = record.replace('"', '').split(',')
kernel_name = values[indices.index('Name')]
time = float(values[indices.index('Start')]) / ts_rescale + t_offset
if not cfg.absolute_timestamp:
time = time - cfg.time_base
duration = float(values[indices.index('Duration')]) / dt_rescale
t_begin = time
t_end = time + duration
try:
payload = int(float(values[indices.index('Size')]) * payload_unit)
except BaseException:
payload = 0
try:
bandwidth = float(values[indices.index('Throughput')])
except BaseException:
bandwidth = 0
pid = n_cudaproc
deviceId = -1
try:
deviceId = int(float(values[indices.index('Context')]))
except BaseException:
deviceId = -1
tid = stream_id = -1
try:
tid = streamId = int(float(values[indices.index('Stream')]))
except BaseException:
tid = streamId = -1
pkt_src = pkt_dst = copyKind = 0
if kernel_name.find('HtoD') != -1:
copyKind = 1
pkt_src = 0
pkt_dst = deviceId
kernel_name = "CUDA_COPY_H2D_%dB" % (payload)
elif kernel_name.find('DtoH') != -1:
copyKind = 2
pkt_src = deviceId
pkt_dst = 0
kernel_name = "CUDA_COPY_D2H_%dB" % (payload)
elif kernel_name.find('DtoD') != -1:
copyKind = 8
pkt_src = deviceId
pkt_dst = deviceId
kernel_name = "CUDA_COPY_D2D_%dB" % (payload)
elif kernel_name.find('PtoP') != -1:
copyKind = 10
try:
pkt_src = int(values[indices.index('Src Ctx')])
except BaseException:
pkt_src = 0
try:
pkt_dst = int(values[indices.index('Dst Ctx')])
except BaseException:
pkt_dst = 0
kernel_name = "[CUDA_COPY_P2P]from_gpu%d_to_gpu%d_%dB" % (pkt_src, pkt_dst, payload)
else:
copyKind = 0
if deviceId != -1:
kernel_name = '[gpu%d]'%deviceId + kernel_name
trace = [t_begin,
payload * 100 + 17,
duration,
deviceId,
copyKind,
payload,
bandwidth,
pkt_src,
pkt_dst,
pid,
tid,
kernel_name,
0]
return trace
def traces_to_json(traces, path, cfg, pid):
if len(traces) == 0:
print_warning("Empty traces!")
return
with open(path, 'w') as f:
for trace in traces:
if len(trace.data) > 0:
f.write(trace.name + " = ")
trace.data.rename(
columns={
trace.x_field: 'x',
trace.y_field: 'y'},
inplace=True)
sofa_series = {
"name": trace.title,
"color": trace.color,
"data": json.loads(
trace.data.to_json(
orient='records'))}
json.dump(sofa_series, f)
trace.data.rename(
columns={
'x': trace.x_field,
'y': trace.y_field},
inplace=True)
f.write("\n\n")
if cfg.ds:
f.write("sofa_traces%s = [ "%pid)
else:
f.write("sofa_traces = [ ")
for trace in traces:
if len(trace.data) > 0:
f.write(trace.name + ",")
if cfg.ds:
pass
#f.write("hl%s"%pid)
f.write(" ]")
def sofa_preprocess(cfg):
cfg.time_base = 0
t_glb_gpu_base = 0
logdir = cfg.logdir
with open(logdir + 'misc.txt', 'r') as f:
lines = f.readlines()
if len(lines) == 4:
cfg.pid = int(lines[3].split()[1])
else:
print_warning('Incorrect misc.txt content. Some profiling information may not be available.')
if int(os.system('command -v perf 1> /dev/null')) == 0:
with open(logdir + 'perf.script', 'w') as logfile:
subprocess.call(['perf',
'script',
'--kallsym',
'%s/kallsyms' % logdir,
'-i',
'%s/perf.data' % logdir,
'-F',
'time,pid,tid,event,ip,sym,dso,symoff,period,brstack,brstacksym'],
stdout=logfile)
with open(logdir + 'sofa_time.txt') as f:
lines = f.readlines()
cfg.time_base = float(lines[0]) + cfg.cpu_time_offset
print_info(cfg,'Time offset applied to timestamp (s):' + str(cfg.cpu_time_offset))
print_info(cfg,'SOFA global time base (s):' + str(cfg.time_base))
cpu_mhz_xp = [0.0]
cpu_mhz_fp = [3000.0]
#np.interp(2.5, xp, fp)
try:
with open(logdir + 'cpuinfo.txt') as f:
lines = f.readlines()
for line in lines:
fields = line.split()
timestamp = float(fields[0])
mhz = float(fields[1])
cpu_mhz_xp.append(timestamp)
cpu_mhz_fp.append(mhz)
except:
print_warning('no cpuinfo file is found, default cpu MHz = %lf'%(fp[0]))
net_traces = []
cpu_traces = []
cpu_traces_viz = []
blk_d_traces = []
blk_traces = []
vm_usr_traces = []
vm_sys_traces = []
vm_bi_traces = []
vm_b0_traces = []
vm_in_traces = []
vm_cs_traces = []
vm_wa_traces = []
vm_st_traces = []
mpstat_traces = []
diskstat_traces = []
tx_traces = []
rx_traces = []
strace_traces = []
pystacks_traces = []
nvsmi_sm_traces = []
nvsmi_mem_traces = []
nvsmi_enc_traces = []
nvsmi_dec_traces = []
pcm_pcie_traces = []
pcm_core_traces = []
pcm_memory_traces = []
gpu_traces = []
gpu_traces_viz = []
gpu_api_traces = []
gpu_api_traces_viz = []
gpu_kernel_traces = []
gpu_memcpy_traces = []
gpu_memcpy2_traces = []
gpu_memcpy_h2d_traces = []
gpu_memcpy_d2h_traces = []
gpu_memcpy_d2d_traces = []
gpu_glb_kernel_traces = []
gpu_glb_memcpy_traces = []
gpu_glb_memcpy2_traces = []
gpu_glb_memcpy_h2d_traces = []
gpu_glb_memcpy_d2h_traces = []
gpu_glb_memcpy_d2d_traces = []
ds_traces = []
gpulog_mode = 'w'
gpulog_header = 'True'
cpu_count = mp.cpu_count()
with open('%s/mpstat.txt' % logdir) as f:
mpstat = np.genfromtxt(logdir+'/mpstat.txt', delimiter=',', invalid_raise=False )
header = mpstat[0]
mpstat = mpstat[1:]
mpstat_list = []
mpstat_list.append(np.empty((len(sofa_fieldnames), 0)).tolist())
n_cores = int(mpstat[:,1].max() + 1)
stride = n_cores + 1
for i in range(len(mpstat)):
if len(mpstat[i]) < len(header):
continue
if i <= stride or mpstat[i,1] == -1:
continue
#time, cpu, user,nice, system, idle, iowait, irq, softirq
core = mpstat[i,1]
d_mp = mpstat[i,:] - mpstat[i-stride,:]
d_mp_total = d_mp[2] + d_mp[4] + d_mp[5] + d_mp[6] + d_mp[7]
if d_mp_total == 0 :
print_info(cfg, 'No increases in mpstat values')
continue
d_mp_usr = d_mp[2] * 100 / float(d_mp_total)
d_mp_sys = d_mp[4] * 100 / float(d_mp_total)
d_mp_idl = d_mp[5] * 100 / float(d_mp_total)
d_mp_iow = d_mp[6] * 100 / float(d_mp_total)
d_mp_irq = d_mp[7] * 100 / float(d_mp_total)
cpu_time = (d_mp_total - d_mp[5]) * 0.01
t_begin = mpstat[i,0]
if not cfg.absolute_timestamp:
t_begin = t_begin - cfg.time_base
deviceId = core
metric = cpu_time
event = -1
copyKind = -1
payload = -1
bandwidth = -1
pkt_src = pkt_dst = -1
pid = tid = -1
mpstat_info = 'mpstat_core%d (usr|sys|idl|iow|irq): |%3d|%3d|%3d|%3d|%3d|' % (core, d_mp_usr, d_mp_sys, d_mp_idl, d_mp_iow, d_mp_irq)
trace_usr = [
t_begin,
event,
metric,
deviceId,
copyKind,
payload,
bandwidth,
pkt_src,
pkt_dst,
pid,
tid,
mpstat_info,
0]
mpstat_list.append(trace_usr)
mpstat_traces = list_to_csv_and_traces(logdir, mpstat_list, 'mpstat.csv', 'w')
#==============================================================================
ds_pid = -1
if cfg.ds:
# ds global variables declaration for later raw data processing
with open(logdir + 'pid.txt') as pidfd:
ds_pid = int(pidfd.readline())
ds_dds_traces = ds_dds_preprocess(cfg, logdir, ds_pid)
#==============================================================================
with open('%s/diskstat.txt' % logdir) as f:
diskstats = f.readlines()
diskstat_list = []
diskstat_list.append(np.empty((len(sofa_fieldnames), 0)).tolist())
tmp_list = []
for diskstat in diskstats:
m = diskstat[:-1]
m = m.split(',')
tmp_list.append(m)
devs = list(map(lambda x: x[1], tmp_list))
n_dev = len(set(devs))
for i in range(len(diskstats)):
if i < n_dev:
continue
m = diskstats[i][:-1]
m = m.split(',')
dev = m[1]
m_last = diskstats[i-n_dev][:-1]
m_last = m_last.split(',')
secsize=0
# get sector size
try:
f = open('/sys/block/'+dev+'/queue/hw_sector_size')
s = f.readline()
s = re.match("\d+", s)
secsize = int(s.group())
except:
pass
d_read = int(m[2]) - int(m_last[2])
d_read *= secsize
d_write = int(m[3]) - int(m_last[3])
d_write *= secsize
d_disk_total = d_read + d_write #total bytes
if not d_disk_total:
continue
t_begin = float(m_last[0])
if not cfg.absolute_timestamp:
t_begin = t_begin - cfg.time_base
d_duration = float(m[0]) - float(m_last[0])
# MB/s
d_throughput = round((d_disk_total/d_duration)/float(1024 ** 2),2)
event = -1
duration = d_duration
deviceId = m[1]
copyKind = -1
payload = d_disk_total
bandwidth = d_throughput
pkt_src = -1
pkt_dst = -1
pid = -1
tid = -1
diskstat_info = 'diskstat_dev:%s (read|write): |%3d|%3d| bytes' % (m[1], d_read, d_write)
trace = [
t_begin,
event,
duration,
deviceId,
copyKind,
payload,
bandwidth,
pkt_src,
pkt_dst,
pid,
tid,
diskstat_info,
0]
diskstat_list.append(trace)
diskstat_traces = list_to_csv_and_traces(logdir, diskstat_list, 'diskstat.csv', 'w')
# dev cpu sequence timestamp pid event operation start_block+number_of_blocks process
# <mjr,mnr> number
# 8,0 6 1 0.000000000 31479 A W 691248304 + 1024 <- (8,5) 188175536
# 8,0 6 2 0.000001254 31479 Q W 691248304 + 1024 [dd]
# 8,0 6 3 0.000003353 31479 G W 691248304 + 1024 [dd]
# 8,0 6 4 0.000005004 31479 I W 691248304 + 1024 [dd]
# 8,0 6 5 0.000006175 31479 D W 691248304 + 1024 [dd]
# 8,0 2 1 0.001041752 0 C W 691248304 + 1024 [0]
if cfg.blktrace_device is not None:
with open('%s/blktrace.txt' % logdir) as f:
lines = f.readlines()
print_info(cfg,"Length of blktrace = %d" % len(lines))
if len(lines) > 0:
blktrace_d_list = []
blktrace_list = []
blktrace_d_list.append(np.empty((len(sofa_fieldnames), 0)).tolist())
blktrace_list.append(np.empty((len(sofa_fieldnames), 0)).tolist())
record_error_flag = 0
t = 0
for i in range(len(lines)):
# filter some total calculate information in the below of blktrace.txt file
if len(lines[i]) > 50 and "Read" not in lines[i] and "CPU" not in lines[i] and "IO unplugs" not in lines[i]:
fields = lines[i].split()
blktrace_dev = fields[0]
blktrace_cpu = fields[1]
blktrace_sequence_number = fields[2]
blktrace_timestamp = float(fields[3])
blktrace_pid = fields[4]
blktrace_event = fields[5]
blktrace_operation = fields[6]
try:
blktrace_start_block = int(fields[7])
except:
blktrace_start_block = 0
record_error_flag = 1
pass
# the two column blktrace_block_size and blktrace_process is for future used
if len(fields) > 10:
blktrace_block_size = fields[9]
blktrace_process = fields[10]
t_begin = blktrace_timestamp
deviceId = cpuid = blktrace_cpu
event = blktrace_event
copyKind = -1
payload = -1
bandwidth = -1
pkt_src = pkt_dst = -1
pid = tid = blktrace_pid
name_info = 'starting_block='+str(blktrace_start_block)
trace = [
t_begin,
event,
blktrace_start_block,
deviceId,
copyKind,
payload,
bandwidth,
pkt_src,
pkt_dst,
pid,
tid,
name_info,
cpuid]
if 'D' is event:
blktrace_d_list.append(trace)
if 'C' is event:
for i in range(len(blktrace_d_list)):
if i==0:
continue
if int(blktrace_d_list[i][2])==int(blktrace_start_block):
time_consume = float(blktrace_timestamp)-float(blktrace_d_list[i][0])
# print('blktrace_d_list[i]:%s'%blktrace_d_list[i])
# print('int(blktrace_timestamp):%f, int(blktrace_d_list[i][0]:%f, time_consume:%f' % (float(blktrace_timestamp), float(blktrace_d_list[i][0]), time_consume))
trace = [
blktrace_d_list[i][0],
event,
float(time_consume),
deviceId,
copyKind,
payload,
bandwidth,
pkt_src,
pkt_dst,
pid,
tid,
name_info,
cpuid]
blktrace_list.append(trace)
blktrace_d_list[i][11] = 'latency=%0.6f' % float(time_consume)
blk_d_traces = list_to_csv_and_traces(
logdir, blktrace_d_list, 'blktrace.csv', 'w')
blk_traces = list_to_csv_and_traces(
logdir, blktrace_list, 'blktrace.csv', 'a')
if record_error_flag == 1 :
print_warning('blktrace maybe record failed!')
# procs -----------------------memory---------------------- ---swap-- -
# r b swpd free buff cache si so bi bo in cs us sy id wa st
# 2 0 0 400091552 936896 386150912 0 0 3 18 0 1 5 0 95 0 0
# ============ Preprocessing VMSTAT Trace ==========================
with open('%s/vmstat.txt' % logdir) as f:
lines = f.readlines()
print_info(cfg,"Length of vmstat_traces = %d" % len(lines))
if len(lines) > 0:
vm_usr_list = []
vm_sys_list = []
vm_bi_list = []
vm_bo_list = []
vm_in_list = []
vm_cs_list = []
vm_wa_list = []
vm_st_list = []
vm_usr_list.append(np.empty((len(sofa_fieldnames), 0)).tolist())
vm_sys_list.append(np.empty((len(sofa_fieldnames), 0)).tolist())
vm_bi_list.append(np.empty((len(sofa_fieldnames), 0)).tolist())
vm_bo_list.append(np.empty((len(sofa_fieldnames), 0)).tolist())
vm_in_list.append(np.empty((len(sofa_fieldnames), 0)).tolist())
vm_cs_list.append(np.empty((len(sofa_fieldnames), 0)).tolist())
vm_wa_list.append(np.empty((len(sofa_fieldnames), 0)).tolist())
vm_st_list.append(np.empty((len(sofa_fieldnames), 0)).tolist())
t = 0
t_begin = 0
if not cfg.absolute_timestamp:
t_begin = t - cfg.cpu_time_offset
else:
t_begin = t_begin + t
for i in range(len(lines)):
if lines[i].find('procs') == - \
1 and lines[i].find('swpd') == -1:
fields = lines[i].split()
if len(fields) < 17:
continue
vm_r = float(fields[0]) + 1e-5
vm_b = float(fields[1]) + 1e-5
vm_sw = float(fields[2]) + 1e-5
vm_fr = float(fields[3]) + 1e-5
vm_bu = float(fields[4]) + 1e-5
vm_ca = float(fields[5]) + 1e-5
vm_si = float(fields[6]) + 1e-5
vm_so = float(fields[7]) + 1e-5
vm_bi = float(fields[8]) + 1e-5
vm_bo = float(fields[9]) + 1e-5
vm_in = float(fields[10]) + 1e-5
vm_cs = float(fields[11]) + 1e-5
vm_usr = float(fields[12]) + 1e-5
vm_sys = float(fields[13]) + 1e-5
vm_idl = float(fields[14]) + 1e-5
vm_wa = float(fields[15]) + 1e-5
vm_st = float(fields[16]) + 1e-5
deviceId = cpuid = -1
event = -1
copyKind = -1
payload = -1
bandwidth = -1
pkt_src = pkt_dst = -1
pid = tid = -1
vmstat_info = 'r=' + str(int(vm_r)) + '|'\
+ 'b=' + str(int(vm_b)) + '|'\
+ 'sw=' + str(int(vm_sw)) + '|'\
+ 'fr=' + str(int(vm_fr)) + '|'\
+ 'bu=' + str(int(vm_bu)) + '|'\
+ 'ca=' + str(int(vm_ca)) + '|'\
+ 'si=' + str(int(vm_si)) + '|'\
+ 'so=' + str(int(vm_so)) + '|'\
+ 'bi=' + str(int(vm_bi)) + '|'\
+ 'bo=' + str(int(vm_bo)) + '|'\
+ 'in=' + str(int(vm_in)) + '|'\
+ 'cs=' + str(int(vm_cs)) + '|'\
+ 'usr=' + str(int(vm_usr)) + '|'\
+ 'sys=' + str(int(vm_sys)) + '|'\
+ 'idl=' + str(int(vm_idl)) + '|'\
+ 'wa=' + str(int(vm_wa)) + '|'\
+ 'st=' + str(int(vm_st))
trace = [
t_begin,
event,
vm_bi,
deviceId,
copyKind,
payload,
bandwidth,
pkt_src,
pkt_dst,
pid,
tid,
vmstat_info,
cpuid]
vm_bi_list.append(trace)
trace = [
t_begin,
event,
vm_bo,
deviceId,
copyKind,
payload,
bandwidth,
pkt_src,
pkt_dst,
pid,
tid,
vmstat_info,
cpuid]
vm_bo_list.append(trace)
trace = [
t_begin,
event,
vm_in,
deviceId,
copyKind,
payload,
bandwidth,
pkt_src,
pkt_dst,
pid,
tid,
vmstat_info,
cpuid]
vm_in_list.append(trace)
trace = [
t_begin,
event,
vm_cs,
deviceId,
copyKind,
payload,
bandwidth,
pkt_src,
pkt_dst,
pid,
tid,
vmstat_info,
cpuid]
vm_cs_list.append(trace)
trace = [
t_begin,
event,
vm_wa,
deviceId,
copyKind,
payload,
bandwidth,
pkt_src,
pkt_dst,
pid,
tid,
vmstat_info,
cpuid]
vm_wa_list.append(trace)
trace = [
t_begin,
event,
vm_st,
deviceId,
copyKind,
payload,
bandwidth,
pkt_src,
pkt_dst,
pid,
tid,
vmstat_info,
cpuid]
vm_st_list.append(trace)
trace = [
t_begin,
event,
vm_usr,
deviceId,
copyKind,
payload,
bandwidth,
pkt_src,
pkt_dst,
pid,
tid,
vmstat_info,
cpuid]
vm_usr_list.append(trace)
trace = [
t_begin,
event,
vm_sys,
deviceId,
copyKind,
payload,
bandwidth,
pkt_src,
pkt_dst,
pid,
tid,
vmstat_info,
cpuid]
vm_sys_list.append(trace)
t_begin = t_begin + 1
vm_bi_traces = list_to_csv_and_traces(
logdir, vm_bi_list, 'vmstat.csv', 'w')
vm_bo_traces = list_to_csv_and_traces(
logdir, vm_bo_list, 'vmstat.csv', 'a')
vm_in_traces = list_to_csv_and_traces(
logdir, vm_in_list, 'vmstat.csv', 'a')
vm_cs_traces = list_to_csv_and_traces(
logdir, vm_cs_list, 'vmstat.csv', 'a')
vm_wa_traces = list_to_csv_and_traces(
logdir, vm_wa_list, 'vmstat.csv', 'a')
vm_st_traces = list_to_csv_and_traces(
logdir, vm_st_list, 'vmstat.csv', 'a')
vm_usr_traces = list_to_csv_and_traces(
logdir, vm_usr_list, 'vmstat.csv', 'a')
vm_sys_traces = list_to_csv_and_traces(
logdir, vm_sys_list, 'vmstat.csv', 'a')
# timestamp, name, index, utilization.gpu [%], utilization.memory [%]
# 2019/05/16 16:49:04.650, GeForce 940MX, 0, 0 %, 0 %
if os.path.isfile('%s/nvsmi_query.txt' % logdir):
with open('%s/nvsmi_query.txt' % logdir) as f:
next(f)
lines = f.readlines()
nvsmi_query_has_data = True
for line in lines:
if line.find('failed') != -1 or line.find('Failed') != -1:
nvsmi_query_has_data = False
print_warning('No nvsmi query data.')
break
if nvsmi_query_has_data:
print_info(cfg,"Length of nvsmi_query_traces = %d" % len(lines))
nvsmi_sm_list = []
nvsmi_mem_list = []
nvsmi_sm_list.append(np.empty((len(sofa_fieldnames), 0)).tolist())
nvsmi_mem_list.append(np.empty((len(sofa_fieldnames), 0)).tolist())
for i in range(len(lines)):
fields = lines[i].split(',')
nv_time = fields[0]
nv_time = datetime.datetime.strptime(nv_time, '%Y/%m/%d %H:%M:%S.%f').timestamp() + cfg.nvsmi_time_zone * 3600
nvsmi_id = int(fields[2])
nvsmi_sm = int(fields[3][:-2])
nvsmi_mem = int(fields[4][:-2])
# nvtime
t_begin = nv_time
if not cfg.absolute_timestamp:
t_begin = t_begin - cfg.time_base
deviceId = cpuid = nvsmi_id
event = -1
copyKind = -1
payload = -1
bandwidth = -1
pkt_src = pkt_dst = -1
pid = tid = -1
sm_info = "GPUID_sm=%d_%d" % (nvsmi_id, nvsmi_sm)
mem_info = "GPUID_mem=%d_%d" % (nvsmi_id, nvsmi_mem)
trace = [
t_begin,
0,
nvsmi_sm,
deviceId,
copyKind,
payload,
bandwidth,
pkt_src,
pkt_dst,
pid,
tid,
sm_info,
cpuid]
nvsmi_sm_list.append(trace)
trace = [
t_begin,
1,
nvsmi_mem,
deviceId,
copyKind,
payload,
bandwidth,
pkt_src,
pkt_dst,
pid,
tid,
mem_info,
cpuid]
nvsmi_mem_list.append(trace)
if len(nvsmi_sm_list)>1:
nvsmi_sm_traces = list_to_csv_and_traces(logdir, nvsmi_sm_list, 'nvsmi_trace.csv', 'w')
nvsmi_mem_traces = list_to_csv_and_traces(logdir, nvsmi_mem_list, 'nvsmi_trace.csv', 'a')
# gpu sm mem enc dec
# Idx % % % %
# 0 0 0 0 0
# 1 0 0 0 0
# 2 0 0 0 0
if os.path.isfile('%s/nvsmi.txt' % logdir):
with open('%s/nvsmi.txt' % logdir) as f:
lines = f.readlines()
nvsmi_has_data = True
for line in lines:
if line.find('failed') != -1 or line.find('Failed') != -1:
nvsmi_has_data = False
print_warning('No nvsmi data.')
break
if nvsmi_has_data:
print_info(cfg,"Length of nvsmi_traces = %d" % len(lines))
nvsmi_enc_list = []
nvsmi_dec_list = []
nvsmi_enc_list.append(np.empty((len(sofa_fieldnames), 0)).tolist())
nvsmi_dec_list.append(np.empty((len(sofa_fieldnames), 0)).tolist())
t = 0
for i in range(len(lines)):
if lines[i].find('gpu') == -1 and lines[i].find('Idx') == -1:
fields = lines[i].split()
if len(fields) < 5:
continue
nvsmi_id = int(fields[0])
if fields[3] == '-':
nvsmi_enc = int(0)
else:
nvsmi_enc = int(fields[3])
if fields[4] == '-':
nvsmi_dec = int(0)
else:
nvsmi_dec = int(fields[4])
if cfg.absolute_timestamp:
t_begin = t + cfg.time_base
else:
t_begin = t
deviceId = cpuid = nvsmi_id
event = -1
copyKind = -1
payload = -1
bandwidth = -1
pkt_src = pkt_dst = -1
pid = tid = -1
enc_info = "GPUID_enc=%d_%d" % (nvsmi_id, nvsmi_enc)
dec_info = "GPUID_dec=%d_%d" % (nvsmi_id, nvsmi_dec)
trace = [
t_begin,
2,
nvsmi_enc,
deviceId,
copyKind,
payload,
bandwidth,
pkt_src,
pkt_dst,
pid,
tid,
enc_info,
cpuid]
if t > 3 :
nvsmi_enc_list.append(trace)
trace = [
t_begin,
3,
nvsmi_dec,
deviceId,
copyKind,
payload,
bandwidth,
pkt_src,
pkt_dst,
pid,
tid,
dec_info,
cpuid]
if t > 3 :
nvsmi_dec_list.append(trace)
if nvsmi_id == 0:
t = t + 1
if len(nvsmi_enc_list)>1:
cfg.nvsmi_data = True
nvsmi_enc_traces = list_to_csv_and_traces(logdir, nvsmi_enc_list, 'nvsmi_trace.csv', 'a')
nvsmi_dec_traces = list_to_csv_and_traces(logdir, nvsmi_dec_list, 'nvsmi_trace.csv', 'a')
else:
print_warning("Program exectution time is fewer than 3 seconds, so nvsmi trace analysis will not be displayed.")
# ============ Preprocessing Network Trace ==========================
if os.path.isfile('%s/sofa.pcap' % logdir):
with open(logdir + 'net.tmp', 'w') as f:
subprocess.check_call(
["tcpdump", "-q", "-n", "-tt", "-r",
"%s/sofa.pcap"%logdir ], stdout=f, stderr=subprocess.DEVNULL)
with open(logdir + 'net.tmp') as f:
packets = lines = f.readlines()
print_info(cfg,"Length of net_traces = %d" % len(packets))
if packets:
with mp.Pool(processes=cpu_count) as pool:
res = pool.map(
partial(
net_trace_read,
cfg=cfg,
t_offset=0),
packets)
res_viz = list_downsample(res, cfg.plot_ratio)
net_traces = pd.DataFrame(res_viz)
net_traces.columns = sofa_fieldnames
net_traces.to_csv(
logdir + 'nettrace.csv',
mode='w',
header=True,
index=False,
float_format='%.6f')
# ============ Apply for Network filter =====================
if cfg.net_filters:
filtered_net_groups = []
packet_not_zero = net_traces['payload'] > 0
start = (net_traces['pkt_src'] == float(cfg.net_filters[0]))
for filter in cfg.net_filters[1:]:
end = (net_traces['pkt_dst'] == float(filter))
group = net_traces[packet_not_zero & start & end]
filtered_net_groups.append({'group': group,
'color': 'rgba(%s,%s,%s,0.8)' %(random.randint(0,255),random.randint(0,255),random.randint(0,255)),
'keyword': 'to_%s' %filter})
end = (net_traces['pkt_dst'] == float(cfg.net_filters[0]))
for filter in cfg.net_filters[1:]:
start = (net_traces['pkt_src'] == float(filter))
group = net_traces[packet_not_zero & start & end]
filtered_net_groups.append({'group': group,
'color': 'rgba(%s,%s,%s,0.8)' %(random.randint(0,255),random.randint(0,255),random.randint(0,255)),
'keyword': 'from_%s' %filter})
else:
print_warning("no network traces were recorded.")
# ============ Preprocessing Network Bandwidth Trace ============
with open('%s/netstat.txt' % logdir) as f:
lines = f.readlines()
if lines:
tmp_time = float(lines[0].split(',')[0])
tmp_tx = int(lines[0].split(',')[1])
tmp_rx = int(lines[0].split(',')[2])
all_time = []
all_tx = []
all_rx = []
tx_list = []
rx_list = []
bandwidth_result = pd.DataFrame([], columns=['time', 'tx_bandwidth', 'rx_bandwidth'])
for line in lines[1:]:
time = float(line.split(',')[0])
tx = int(line.split(',')[1])
rx = int(line.split(',')[2])
tx_bandwidth = (tx - tmp_tx) / (time - tmp_time)
rx_bandwidth = (rx - tmp_rx) / (time - tmp_time)
#sofa_fieldnames = [
# "timestamp", # 0
# "event", # 1
# "duration", # 2
# "deviceId", # 3
# "copyKind", # 4
# "payload", # 5
# "bandwidth", # 6
# "pkt_src", # 7
# "pkt_dst", # 8
# "pid", # 9
# "tid", # 10
# "name", # 11
# "category"] # 12
t_begin = time
if not cfg.absolute_timestamp:
t_begin = t_begin - cfg.time_base
trace = [
t_begin, # timestamp
0, # event
-1,
-1,
-1,
-1,
tx_bandwidth, # tx bandwidth
-1,
-1,
-1,
-1,
"network_bandwidth_tx(bytes):%d" % tx_bandwidth,
0
]
tx_list.append(trace)
trace = [
t_begin, # timestamp
1, # event
-1,
-1,
-1,
-1,
rx_bandwidth, # rx bandwidth
-1,
-1,
-1,
-1,
"network_bandwidth_rx(bytes):%d" % rx_bandwidth,
0
]
rx_list.append(trace)
# for visualize
all_time.append(time)
all_tx.append(tx_bandwidth)
all_rx.append(rx_bandwidth)
# for pandas
result = [t_begin, tx_bandwidth, rx_bandwidth]
tmp_bandwidth_result = pd.DataFrame([result], columns=['time', 'tx_bandwidth', 'rx_bandwidth'])
bandwidth_result = pd.concat([bandwidth_result, tmp_bandwidth_result], ignore_index=True)
bandwidth_result.to_csv('%s/netbandwidth.csv' %logdir, header=True)
# prepare for next round loop
tmp_time = time
tmp_tx = tx
tmp_rx = rx
tx_traces = pd.DataFrame(tx_list, columns = sofa_fieldnames)
tx_traces.to_csv(
logdir + 'netstat.csv',
mode='w',
header=True,
index=False,
float_format='%.6f')
rx_traces = pd.DataFrame(rx_list, columns = sofa_fieldnames)
rx_traces.to_csv(
logdir + 'netstat.csv',
mode='a',
header=False,
index=False,
float_format='%.6f')
# ============ Preprocessing GPU Trace ==========================
num_cudaproc = 0
filtered_gpu_groups = []
indices = []
for nvvp_filename in glob.glob(logdir + "gputrace*[0-9].nvvp"):
print_progress("Read " + nvvp_filename + " by nvprof -- begin")
with open(logdir + "gputrace.tmp", "w") as f:
subprocess.call(["nvprof", "--csv", "--print-gpu-trace", "-i", nvvp_filename], stderr=f)
#Automatically retrieve the timestamp of the first CUDA activity(e.g. kernel, memory op, etc..)
engine = create_engine("sqlite:///"+nvvp_filename)
t_glb_gpu_bases = []
try:
t_glb_gpu_bases.append( (pd.read_sql_table('CUPTI_ACTIVITY_KIND_MEMSET',engine)).iloc[0]['start'])
except BaseException:
print_info(cfg,'NO MEMSET')
try:
t_glb_gpu_bases.append( (pd.read_sql_table('CUPTI_ACTIVITY_KIND_MEMCPY',engine)).iloc[0]['start'])
except BaseException:
print_info(cfg,'NO MEMCPY')
try:
t_glb_gpu_bases.append( (pd.read_sql_table('CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL',engine)).iloc[0]['start'])
except BaseException:
print_info(cfg,'NO CONCURRENT KERNEL')
try:
t_glb_gpu_bases.append( (pd.read_sql_table('CUPTI_ACTIVITY_KIND_KERNEL',engine)).iloc[0]['start'])
except BaseException:
print_info(cfg,'NO KERNEL')
if len(t_glb_gpu_bases) > 0:
t_glb_gpu_base = sorted(t_glb_gpu_bases)[0]*1.0/1e+9
else:
print_warning("There is no data in tables of NVVP file.")
print_info(cfg,"Timestamp of the first GPU trace = " + str(t_glb_gpu_base))
print_progress("Read " + nvvp_filename + " by nvprof -- end")
num_cudaproc = num_cudaproc + 1
with open(logdir + 'gputrace.tmp') as f:
records = f.readlines()
# print(records[1])
if len(records) > 0 and records[1].split(',')[0] == '"Start"':
indices = records[1].replace(
'"', '').replace(
'\n', '').split(',')
# ms,ms,,,,,,,,B,B,MB,GB/s,,,,
payload_unit = 1
if records[2].split(',')[11] == 'GB':
payload_unit = np.power(1024,3)
elif records[2].split(',')[11] == 'MB':
payload_unit = np.power(1024,2)
elif records[2].split(',')[11] == 'KB':
payload_unit = np.power(1024,1)
elif records[2].split(',')[11] == 'B':
payload_unit = 1
else:
print_info(cfg,"The payload unit in gputrace.tmp was not recognized!")
sys.exit(1)
ts_rescale = 1.0
if records[2].split(',')[0] == 'ms':
ts_rescale = 1.0e3
elif records[2].split(',')[0] == 'us':
ts_rescale = 1.0e6
dt_rescale = 1.0
if records[2].split(',')[1] == 'ms':
dt_rescale = 1.0e3
elif records[2].split(',')[1] == 'us':
dt_rescale = 1.0e6
records = records[3:]
print_info(cfg,"Length of gpu_traces = %d" % len(records))
t_base = float(records[0].split(',')[0])
with mp.Pool(processes=cpu_count) as pool:
res = pool.map(
partial(
gpu_trace_read,
cfg=cfg,
indices=indices,
ts_rescale=ts_rescale,
dt_rescale=dt_rescale,
payload_unit=payload_unit,
n_cudaproc=num_cudaproc,
t_offset=t_glb_gpu_base -
t_base),
records)
gpu_traces = pd.DataFrame(res)
gpu_traces.columns = sofa_fieldnames
res_viz = list_downsample(res, cfg.plot_ratio)
gpu_traces_viz = pd.DataFrame(res_viz)
gpu_traces_viz.columns = sofa_fieldnames
gpu_traces.to_csv(
logdir + 'gputrace.csv',
mode='w',
header=True,
index=False,
float_format='%.6f')
# Apply filters for GPU traces
df_grouped = gpu_traces.groupby('name')
for filter in cfg.gpu_filters:
group = gpu_traces[gpu_traces['name'].str.contains(
filter.keyword)]
filtered_gpu_groups.append({'group': group, 'color': filter.color,
'keyword': filter.keyword})
else:
print_warning(
"gputrace existed, but no kernel traces were recorded.")
os.system('cat %s/gputrace.tmp' % logdir)
# ============ Preprocessing GPU API Trace ==========================
if cfg.cuda_api_tracing:
num_cudaproc = 0
indices = []
for nvvp_filename in glob.glob(logdir + "gputrace*[0-9].nvvp"):
print_progress("Read " + nvvp_filename + " for API traces by nvprof -- begin")
with open(logdir + "cuda_api_trace.tmp", "w") as f:
subprocess.call(["nvprof", "--csv", "--print-api-trace", "-i", nvvp_filename], stderr=f)
#Automatically retrieve the timestamp of the first CUDA activity(e.g. kernel, memory op, etc..)
engine = create_engine("sqlite:///"+nvvp_filename)
t_glb_gpu_bases = []
first_corid = 1
try:
t_glb_gpu_bases.append((pd.read_sql_table('CUPTI_ACTIVITY_KIND_RUNTIME',engine)).iloc[0]['start'])
first_corid = (pd.read_sql_table('CUPTI_ACTIVITY_KIND_RUNTIME',engine)).iloc[0]['correlationId']
except BaseException:
print_info(cfg,'NO RUNTIME')
if len(t_glb_gpu_bases) > 0:
t_glb_gpu_base = sorted(t_glb_gpu_bases)[0]*1.0/1e+9
else:
print_warning("There is no data in tables of NVVP file.")
print_info(cfg,"Timestamp of the first CUDA API trace = " + str(t_glb_gpu_base))
print_progress("Read " + nvvp_filename + " by nvprof -- end")
num_cudaproc = num_cudaproc + 1
with open(logdir + 'cuda_api_trace.tmp') as f:
records = f.readlines()
# print(records[1])
if len(records) > 0 and records[1].split(',')[0] == '"Start"':
indices = records[1].replace(
'"', '').replace(
'\n', '').split(',')
ts_rescale = 1.0
if records[2].split(',')[0] == 'ms':
ts_rescale = 1.0e3
elif records[2].split(',')[0] == 'us':
ts_rescale = 1.0e6
dt_rescale = 1.0
if records[2].split(',')[1] == 'ms':
dt_rescale = 1.0e3
elif records[2].split(',')[1] == 'us':
dt_rescale = 1.0e6
records = records[3:]
print_info(cfg,"Length of cuda_api_traces = %d" % len(records))
#TODO: Apply parallel search to speed up
t_base = float(records[0].split(',')[0])
if len(records[0].split(',')) == 4:
for record in records:
if int(record.split(',')[3]) == first_corid:
t_base = float(record.split(',')[0])
print_info(cfg,'First Correlation_ID ' + str(first_corid) + ' is found in cuda_api_trace.tmp')
print_info(cfg,'First API trace timestamp is ' + str(t_base))
break
with mp.Pool(processes=cpu_count) as pool:
res = pool.map(
partial(
cuda_api_trace_read,
cfg=cfg,
indices=indices,
ts_rescale=ts_rescale,
dt_rescale=dt_rescale,
payload_unit=payload_unit,
n_cudaproc=num_cudaproc,
t_offset=t_glb_gpu_base -
t_base),
records)
cuda_api_traces = pd.DataFrame(res)
cuda_api_traces.columns = sofa_fieldnames
res_viz = list_downsample(res, cfg.plot_ratio)
cuda_api_traces_viz =
|
pd.DataFrame(res_viz)
|
pandas.DataFrame
|
# Import standard python packages
import numbers
import copy
import pandas as pd
import pathlib
import numpy as np
import sys
# EIA reports coal counties using the FIPS Codes for the county. The county can be a one, two, or three digit number.
# For standardization sake, we convert them all to a three digit number.
# This function takes one input: an array of FIPS county codes.
# This function returns one output: an array of three-digit FIPS county codes
# This function is used in the following codes: eia_coal_consumption_data.py
def convert_fips_county_three_digits(fips_codes):
fips_three = []
fips_codes = int(fips_codes)
for county_fips in fips_codes:
if len(str(int(county_fips))) == 1:
fips_three.append('00' + str(int(county_fips)))
elif len(str(int(county_fips))) == 2:
fips_three.append('0' + str(int(county_fips)))
elif len(str(int(county_fips))) == 3:
fips_three.append(str(int(county_fips)))
fips_three = pd.Series(fips_three)
fips_three = fips_three.values
return fips_three
def convert_fips_state_two_digits(fips_codes):
fips_two = []
for state_fips in fips_codes:
if len(str(int(state_fips))) == 1:
fips_two.append('0' + str(int(state_fips)))
elif len(str(int(state_fips))) == 2:
fips_two.append(str(int(state_fips)))
fips_two = pd.Series(fips_two)
fips_two = fips_two.values
return fips_two
# FIPS county codes can be one to three digits. The standard way of reporting them is to report them with three digits
# with preceding zeros. This function converts adds the preceding zeros to the county codes in an array if necessary.
# It then combines the fips code with the state abbreviation.
# This function takes two inputs: a pandas array of FIPS county codes and a pandas array of state abbreviations.
# This function returns one output: a pandas array of State Abbreviation and FIPS county codes.
# This function is used in the following codes: CFPP_fuel_data_processing_2015.py, CFPP_fuel_data_processing.py
def fips_codes_state_county_codes(fips_county_codes, state_abbreviations):
i = 0
t = 0
temp = []
state_county_codes = []
while i < len(fips_county_codes):
if isinstance(fips_county_codes.iloc[i], numbers.Number):
code = int(fips_county_codes.iloc[i])
if fips_county_codes.iloc[i] / 100 >= 1:
state_county_codes.append(state_abbreviations.iloc[i] + ', ' + str(code))
elif fips_county_codes.iloc[i] / 10 >= 1:
state_county_codes.append(state_abbreviations.iloc[i] + ', 0' + str(code))
elif fips_county_codes.iloc[i] / 1 >= 0:
state_county_codes.append(state_abbreviations.iloc[i] + ', 00' + str(code))
else:
state_county_codes.append(state_abbreviations.iloc[i] + ', ' + str(fips_county_codes.iloc[i]))
i += 1
state_county_codes = pd.Series(state_county_codes)
state_county_codes = state_county_codes.values
return state_county_codes
# EIA reports coal rank using a three letter abbreviations. COALQUAL reports everything using the full rank name.
# This function converts those three letter abbreviations to the full rank name (in all caps).
# This function takes one inputs: a pandas array of coal rank abbreviations.
# This function returns one output: a pandas array of coal ranks.
# This function is used in the following codes: CFPP_fuel_data_processing_2015.py, CFPP_fuel_data_processing.py
def rank_abbreviation_to_full_name(coal_rank_abbreviations):
i = 0
fuel_abbreviation = []
while i < len(coal_rank_abbreviations):
if coal_rank_abbreviations.iloc[i] == 'BIT':
fuel_abbreviation.append('BITUMINOUS')
elif coal_rank_abbreviations.iloc[i] == 'SUB':
fuel_abbreviation.append('SUBBITUMINOUS')
elif coal_rank_abbreviations.iloc[i] == 'LIG':
fuel_abbreviation.append('LIGNITE')
i += 1
fuel_abbreviation = pd.Series(fuel_abbreviation)
fuel_abbreviation = fuel_abbreviation.values
return fuel_abbreviation
# EIA and coal mine data includes both county names and county codes, but we need to create a merge key that has both
# these county identifiers and the relevant state. This code concatenates these functions.
# This function takes two inputs: two arrays to concatenate with a comma between them.
# This function returns one input: an array of the concatenated strings.
# This function is used in the following codes: eia_coal_consumption_data.py
def fips_code_county_name_state_concatenation(identifiers_1, identifiers_2):
concatenated_strings = []
i = 0
while i < len(identifiers_1):
if ~isinstance(identifiers_1.iloc[i], str):
identifier_1 = str(identifiers_1.iloc[i])
else:
identifier_1 = identifiers_1.iloc[i]
if ~isinstance(identifiers_2.iloc[i], str):
identifier_2 = str(identifiers_2.iloc[i])
else:
identifier_2 = identifiers_2.iloc[i]
concatenated_strings.append(identifier_1 + ", " + identifier_2)
i += 1
concatenated_strings = pd.Series(concatenated_strings)
concatenated_strings = concatenated_strings.values
return concatenated_strings
def state_county_fips_code_concatenation(identifiers_1, identifiers_2):
concatenated_strings = []
i = 0
while i < len(identifiers_1):
if ~isinstance(identifiers_1.iloc[i], str):
identifier_1 = str(identifiers_1.iloc[i])
else:
identifier_1 = identifiers_1.iloc[i]
if ~isinstance(identifiers_2.iloc[i], str):
identifier_2 = str(identifiers_2.iloc[i])
else:
identifier_2 = identifiers_2.iloc[i]
concatenated_strings.append(identifier_1 + identifier_2)
i += 1
concatenated_strings = pd.Series(concatenated_strings)
concatenated_strings = concatenated_strings.values
return concatenated_strings
def state_code_to_abbreviation(series):
state_dic = {1:"AL", 2: 'AK', 3: 'IM', 4: 'AZ', 5: 'AR', 6: 'CA', 8: 'CO', 9: 'CT', 10: 'DE', 11: 'DC', 12: 'FL', 13: 'GA', 15: 'HI', 16: 'ID', 17: 'IL', 18: 'IN', 19: 'IA', 20: 'KS', 21: 'KY', 22: 'LA', 23: 'ME', 24: 'MD', 25: 'MA', 26: 'MI', 27: 'MN', 28: 'MS', 29: 'MO', 30: 'MT', 31: 'NE', 32: 'NV', 33: 'NH', 34: 'NJ', 35: 'NM', 36: 'NY', 37: 'NC', 38: 'ND', 39: 'OH', 40: 'OK', 41: 'OR', 42: 'PA', 44: 'RI', 45: 'SC', 46: 'SD', 47: 'TN', 48: 'TX', 49: 'UT', 50: 'VT', 51: 'VA', 53: 'WA', 54: 'WV', 55: 'WI', 56: 'WY'}
i = 0
temp = []
while i < len(series):
state = state_dic[series.iloc[i]]
temp.append(state)
i = i + 1
return pd.Series(temp)
def data_filtering(dataframe, capacity, outputfile):
# Filter plants that (1) don't use coal and (2) use either imported coal (IMP) or waste coal (WC).
if type(dataframe.Fuel_Group.iloc[2]) != str:
dataframe = dataframe[dataframe.Fuel_Group == 1]
temp = ['Coal'] * len(dataframe.Fuel_Group)
fuel =
|
pd.Series(temp)
|
pandas.Series
|
import pandas as pd
import numpy as np
import datetime as dt
import pytz
def make_df(days_ago: int, df_len: int) -> pd.DataFrame:
"""
Make a dataframe similar to the online csv
Parameters
----------
days_ago : int
How many days ago the df should start at
df_len : int
How long the df should be
Returns
-------
pd.DataFrame
A df with a datetime index
"""
start_date = dt.datetime.now(pytz.utc) - dt.timedelta(days=days_ago)
times = [start_date + dt.timedelta(days=day + 1) for day in range(df_len)]
values = [[val, val + 1] for val in range(df_len)]
df = pd.DataFrame(values, times)
return df
def make_full_df(
days_ago_start: int = 100,
len_cluster: int = 2,
num_clusters: int = 6,
cluster_interval: int = 14,
) -> pd.DataFrame:
df_all =
|
pd.DataFrame()
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LogisticRegression
# from sklearn.tree import DecisionTreeClassifier
# from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
import sklearn.metrics as metrics
from sklearn.metrics import confusion_matrix, multilabel_confusion_matrix
from skmultilearn.problem_transform import ClassifierChain
from skmultilearn.problem_transform import BinaryRelevance
from skmultilearn.adapt import MLkNN
from keras.layers import Dense
from keras.models import Sequential
from keras.metrics import *
##########################################################
# Section 1 - Data Loading
##########################################################
# Getting feature data
finalData = np.array(pd.read_csv('D:/UIP/finaldata.csv', index_col='Name'))
biodata = finalData[:, 21:]
# Getting type data as dataframe for visualisations
pType = pd.read_csv('D:/UIP/primType.csv', index_col=0)
sType = pd.read_csv('D:/UIP/secondType.csv', index_col=0)
bTypes = pd.read_csv('D:/UIP/sparseTypes.csv', index_col=0)
# Getting features as numpy arrays for model inputs
primType = np.array(pType)
secType = np.array(sType)
bothTypes = np.array(bTypes)
# Get splitted data
Xtrain, Xtest, Ytrain, Ytest = train_test_split(finalData, bothTypes, test_size=0.2, random_state=12345)
XtrainPrim, XtestPrim, YtrainPrim, YtestPrim = train_test_split(finalData, primType, test_size=0.2, random_state=12345)
XtrainSec, XtestSec, YtrainSec, YtestSec = train_test_split(finalData, secType, test_size=0.2, random_state=12345)
# Get splitted biodata
XtrainBio, XtestBio, YtrainBio, YtestBio = train_test_split(biodata, bothTypes, test_size=0.2, random_state=12345)
XtrainPrimBio, XtestPrimBio, YtrainPrimBio, YtestPrimBio = train_test_split(biodata, primType, test_size=0.2, random_state=12345)
XtrainSecBio, XtestSecBio, YtrainSecBio, YtestSecBio = train_test_split(biodata, secType, test_size=0.2, random_state=12345)
##########################################################
# Section 2 - Data Visualisation
##########################################################
# Visualising class distribution for Pokemon type
def visualiseTypeDist(typeData, nat):
# Type Categories
categories = list(typeData.columns.values)
plt.figure(figsize=(15, 8))
ax = sns.barplot(categories, typeData.sum().values)
# Axis labels
if nat == 1:
plt.title("Distribution of Primary Pokemon Types", fontsize=14)
elif nat == 2:
plt.title("Distribution of Secondary Pokemon Types", fontsize=14)
else:
plt.title("Distribution of Pokemon Types (single and dual)", fontsize=14)
plt.ylabel('Pokemon of that Type', fontsize=14)
plt.xlabel('Pokemon Type', fontsize=14)
rects = ax.patches
labels = typeData.sum().values
# Print hist labels
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2, height + 1,
label, ha='center', va='bottom', fontsize=12)
plt.show()
visualiseTypeDist(pType, 1)
visualiseTypeDist(sType, 2)
visualiseTypeDist(bTypes, 0)
# Function to re-encode output of Neural Network into one-hot encoding
def reEncode(predictions):
newOut = np.ndarray((len(predictions), len(predictions[0])))
for i in range(len(predictions)):
row = predictions[i]
m = max(row)
for j in range(len(predictions[0])):
if row[j] == m:
newOut[i][j] = 1
else:
newOut[i][j] = 0
return newOut
# Setting epsilon for re-encoding multiple type predictions
epsilon = 0.03
# Function to re-encode output of Neural Network into multiple-hot encoding
def reEncodeMulti(predictions):
newOut = np.ndarray((len(predictions), len(predictions[0])))
for i in range(len(predictions)):
row = predictions[i]
m = max(row)
rowAlt = [e for e in row if e != m]
tx = max(rowAlt)
rowAltB = [e for e in rowAlt if e != tx]
tb = max(rowAltB)
for j in range(len(predictions[0])):
if row[j] == m:
newOut[i][j] = 1
elif row[j] == tx:
if (tx - tb) >= epsilon:
newOut[i][j] = 1
else:
newOut[i][j] = 0
return newOut
# ###############################################################
# # Section 3 - Multi-class classification for Type 1 of Pokemon
# ###############################################################
# Neural Network with Softmax + Categorical Crossentropy
def test_network(Xtrain, Xtest, Ytrain, Ytest):
model = Sequential()
feat = len(Xtrain[0])
# Hidden Layers
model.add(Dense(64, activation='relu', input_dim=feat))
# model.add(Dense(64, activation='relu'))
# Output layer with 18 nodes using Softmax activation (we have 18 Pokemon types)
model.add(Dense(18, activation='softmax'))
# Running the model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(Xtrain, Ytrain, epochs=40, batch_size=32)
# Accuracy Metrics and Predictions
score = model.evaluate(Xtest, Ytest, batch_size=16)
predictions = model.predict(Xtest)
return predictions, score
# # Decision Tree - (Deprecated)
# def test_tree(Xtrain, Xtest, Ytrain, Ytest):
# # Setting tree parameters
# classifier = DecisionTreeClassifier(criterion='entropy', max_depth=10, random_state=12345)
# classifier.fit(Xtrain, Ytrain)
# # Accuracy Metrics and Predictions
# print('Accuracy Score for Decision Tree on training set: {:.2f}'.format(classifier.score(Xtrain, Ytrain)))
# print('Accuracy Score for Decision Tree on test set: {:.2f}'.format(classifier.score(Xtest, Ytest)))
# predictions = classifier.predict(Xtest)
# return predictions
# K-Nearest Neighbours for Multi-Class classification
def test_knn(Xtrain, Xtest, Ytrain, Ytest):
# Setting k = 3
classifier = KNeighborsClassifier(n_neighbors=3)
classifier.fit(Xtrain, Ytrain)
# Accuracy Metrics and Predictions
predictions = classifier.predict(Xtest)
score = classifier.score(Xtest, Ytest)
return predictions, score
# ######################################################################
# # Section 4 - Multi-class, Multi-label approach to Type classification
# ######################################################################
# Neural Network with Softmax + Binary Crossentropy
def test_network2(Xtrain, Xtest, Ytrain, Ytest):
model = Sequential()
feat = len(Xtrain[0])
# Hidden Layers
model.add(Dense(64, activation='relu', input_dim=feat))
# model.add(Dense(64, activation='relu'))
# Output layer with 18 nodes using Softmax activation (we have 18 Pokemon types)
model.add(Dense(18, activation='softmax'))
# Running the model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(Xtrain, Ytrain, epochs=40, batch_size=32)
# Accuracy Metrics and Predictions
score = model.evaluate(Xtest, Ytest, batch_size=16)
predictions = model.predict(Xtest)
return predictions, score
# Multilabel k Nearest Neighbours (MLkNN)
def test_mlknn(Xtrain, Xtest, Ytrain, Ytest):
# Training the classfier and making predictions
classifier = MLkNN(k=1)
classifier.fit(Xtrain, Ytrain)
predictions = classifier.predict(Xtest)
# Measuring accuracy
scores = classifier.score(Xtest, Ytest)
loss = metrics.hamming_loss(Ytest, predictions)
return predictions, scores, loss
# Binary Relevance with Logistic Regression
def test_logistic(Xtrain, Xtest, Ytrain, Ytest):
# Setting parameters for Logistic Regression
reg = LogisticRegression(C = 1.0, solver='lbfgs', random_state=12345)
# Initialising the Binary Relevance Pipeline
classifier = BinaryRelevance(classifier=reg)
# Training the classfiers and making predictions
classifier.fit(Xtrain, Ytrain)
predictions = classifier.predict(Xtest)
# Measuring accuracy
scores = classifier.score(Xtest, Ytest)
loss = metrics.hamming_loss(Ytest, predictions)
return predictions, scores, loss
###############################################################
# Section 5 - Getting results from models
###############################################################
typeList = ['Normal', 'Fighting', 'Flying', 'Poison', 'Ground', 'Rock', 'Bug', 'Ghost',
'Steel', 'Fire', 'Water', 'Grass', 'Electric', 'Psychic', 'Ice', 'Dragon', 'Dark', 'Fairy']
pokemon = pd.read_csv('D:/UIP/testList.csv', header=0)['Name']
#### Section 5.1 - Predicting a Pokemon's primary type. First with bio + move data, then only biodata. ####
# Neural Network
primaryNet_predic, primaryNet_acc = test_network(XtrainPrim, XtestPrim, YtrainPrim, YtestPrim)
pd.DataFrame(reEncode(primaryNet_predic), index=pokemon, columns=typeList).to_csv('D:/UIP/Pred/NetPredictionsPrim.csv')
primaryNet_predicBio, primaryNet_accBio = test_network(XtrainPrimBio, XtestPrimBio, YtrainPrimBio, YtestPrimBio)
pd.DataFrame(reEncode(primaryNet_predicBio), index=pokemon, columns=typeList).to_csv('D:/UIP/Pred/NetPredictionsPrimWithoutMoves.csv')
# # Decision Tree
# primaryForest_predic = test_tree(XtrainPrim, XtestPrim, YtrainPrim, YtestPrim)
# primaryForest_predicBio = test_tree(XtrainPrimBio, XtestPrimBio, YtrainPrimBio, YtestPrimBio)
# K Nearest Neighbours
primaryKNN_predic, primaryKNN_acc = test_knn(XtrainPrim, XtestPrim, YtrainPrim, YtestPrim)
|
pd.DataFrame(primaryKNN_predic, index=pokemon, columns=typeList)
|
pandas.DataFrame
|
from typing import Tuple
import numpy as np
import pandas as pd
from HW4.decisionstump import DecisionStump
class Adaboost:
def __init__(self):
self.T = 0
self.h = []
self.alpha = pd.Series([])
self.w = pd.DataFrame([])
def train(self, X_train: pd.DataFrame, y_train: pd.Series,
n_iter: int = 10):
# Initialize parameters
N, D = X_train.shape
self.T = n_iter
self.h = []
self.alpha = []
self.w = []
w_t = pd.Series(np.full(N, 1/N), index=y_train.index, name=f"iter 0")
# Boosting
for t in range(self.T):
h_t = DecisionStump()
# Compute the weighted training error of h_t
err_t = h_t.train(X_train, y_train, w_t)
# Compute the importance of h_t
alpha_t = 0.5 * np.log((1 - err_t) / err_t)
# Update the weights
h_t_pred = h_t.predict(X_train)
w_t = w_t * np.exp(-alpha_t * y_train * h_t_pred)
w_t = w_t / w_t.sum()
w_t =
|
pd.Series(w_t, index=y_train.index, name=f"iter {t+1}")
|
pandas.Series
|
"""SQL io tests
The SQL tests are broken down in different classes:
- `PandasSQLTest`: base class with common methods for all test classes
- Tests for the public API (only tests with sqlite3)
- `_TestSQLApi` base class
- `TestSQLApi`: test the public API with sqlalchemy engine
- `TestSQLiteFallbackApi`: test the public API with a sqlite DBAPI
connection
- Tests for the different SQL flavors (flavor specific type conversions)
- Tests for the sqlalchemy mode: `_TestSQLAlchemy` is the base class with
common methods, `_TestSQLAlchemyConn` tests the API with a SQLAlchemy
Connection object. The different tested flavors (sqlite3, MySQL,
PostgreSQL) derive from the base class
- Tests for the fallback mode (`TestSQLiteFallback`)
"""
import csv
from datetime import date, datetime, time
from io import StringIO
import sqlite3
import warnings
import numpy as np
import pytest
from pandas.core.dtypes.common import is_datetime64_dtype, is_datetime64tz_dtype
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
isna,
to_datetime,
to_timedelta,
)
import pandas._testing as tm
import pandas.io.sql as sql
from pandas.io.sql import read_sql_query, read_sql_table
try:
import sqlalchemy
import sqlalchemy.schema
import sqlalchemy.sql.sqltypes as sqltypes
from sqlalchemy.ext import declarative
from sqlalchemy.orm import session as sa_session
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
"create_iris": {
"sqlite": """CREATE TABLE iris (
"SepalLength" REAL,
"SepalWidth" REAL,
"PetalLength" REAL,
"PetalWidth" REAL,
"Name" TEXT
)""",
"mysql": """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
"postgresql": """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)""",
},
"insert_iris": {
"sqlite": """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
"mysql": """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
"postgresql": """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);""",
},
"create_test_types": {
"sqlite": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TEXT,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" REAL,
"IntCol" INTEGER,
"BoolCol" INTEGER,
"IntColWithNull" INTEGER,
"BoolColWithNull" INTEGER
)""",
"mysql": """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`IntDateOnlyCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
"postgresql": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"DateColWithTz" TIMESTAMP WITH TIME ZONE,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)""",
},
"insert_test_types": {
"sqlite": {
"query": """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"mysql": {
"query": """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"postgresql": {
"query": """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"DateColWithTz",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
},
"read_parameters": {
"sqlite": "SELECT * FROM iris WHERE Name=? AND SepalLength=?",
"mysql": 'SELECT * FROM iris WHERE `Name`="%s" AND `SepalLength`=%s',
"postgresql": 'SELECT * FROM iris WHERE "Name"=%s AND "SepalLength"=%s',
},
"read_named_parameters": {
"sqlite": """
SELECT * FROM iris WHERE Name=:name AND SepalLength=:length
""",
"mysql": """
SELECT * FROM iris WHERE
`Name`="%(name)s" AND `SepalLength`=%(length)s
""",
"postgresql": """
SELECT * FROM iris WHERE
"Name"=%(name)s AND "SepalLength"=%(length)s
""",
},
"create_view": {
"sqlite": """
CREATE VIEW iris_view AS
SELECT * FROM iris
"""
},
}
class MixInBase:
def teardown_method(self, method):
# if setup fails, there may not be a connection to close.
if hasattr(self, "conn"):
for tbl in self._get_all_tables():
self.drop_table(tbl)
self._close_conn()
class MySQLMixIn(MixInBase):
def drop_table(self, table_name):
cur = self.conn.cursor()
cur.execute(f"DROP TABLE IF EXISTS {sql._get_valid_mysql_name(table_name)}")
self.conn.commit()
def _get_all_tables(self):
cur = self.conn.cursor()
cur.execute("SHOW TABLES")
return [table[0] for table in cur.fetchall()]
def _close_conn(self):
from pymysql.err import Error
try:
self.conn.close()
except Error:
pass
class SQLiteMixIn(MixInBase):
def drop_table(self, table_name):
self.conn.execute(
f"DROP TABLE IF EXISTS {sql._get_valid_sqlite_name(table_name)}"
)
self.conn.commit()
def _get_all_tables(self):
c = self.conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
return [table[0] for table in c.fetchall()]
def _close_conn(self):
self.conn.close()
class SQLAlchemyMixIn(MixInBase):
def drop_table(self, table_name):
sql.SQLDatabase(self.conn).drop_table(table_name)
def _get_all_tables(self):
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
table_list = meta.tables.keys()
return table_list
def _close_conn(self):
pass
class PandasSQLTest:
"""
Base class with common private methods for SQLAlchemy and fallback cases.
"""
def _get_exec(self):
if hasattr(self.conn, "execute"):
return self.conn
else:
return self.conn.cursor()
@pytest.fixture(params=[("data", "iris.csv")])
def load_iris_data(self, datapath, request):
import io
iris_csv_file = datapath(*request.param)
if not hasattr(self, "conn"):
self.setup_connect()
self.drop_table("iris")
self._get_exec().execute(SQL_STRINGS["create_iris"][self.flavor])
with io.open(iris_csv_file, mode="r", newline=None) as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header row
ins = SQL_STRINGS["insert_iris"][self.flavor]
for row in r:
self._get_exec().execute(ins, row)
def _load_iris_view(self):
self.drop_table("iris_view")
self._get_exec().execute(SQL_STRINGS["create_view"][self.flavor])
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
assert issubclass(pytype, np.floating)
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _load_test1_data(self):
columns = ["index", "A", "B", "C", "D"]
data = [
(
"2000-01-03 00:00:00",
0.980268513777,
3.68573087906,
-0.364216805298,
-1.15973806169,
),
(
"2000-01-04 00:00:00",
1.04791624281,
-0.0412318367011,
-0.16181208307,
0.212549316967,
),
(
"2000-01-05 00:00:00",
0.498580885705,
0.731167677815,
-0.537677223318,
1.34627041952,
),
(
"2000-01-06 00:00:00",
1.12020151869,
1.56762092543,
0.00364077397681,
0.67525259227,
),
]
self.test_frame1 = DataFrame(data, columns=columns)
def _load_test2_data(self):
df = DataFrame(
dict(
A=[4, 1, 3, 6],
B=["asd", "gsq", "ylt", "jkl"],
C=[1.1, 3.1, 6.9, 5.3],
D=[False, True, True, False],
E=["1990-11-22", "1991-10-26", "1993-11-26", "1995-12-12"],
)
)
df["E"] = to_datetime(df["E"])
self.test_frame2 = df
def _load_test3_data(self):
columns = ["index", "A", "B"]
data = [
("2000-01-03 00:00:00", 2 ** 31 - 1, -1.987670),
("2000-01-04 00:00:00", -29, -0.0412318367011),
("2000-01-05 00:00:00", 20000, 0.731167677815),
("2000-01-06 00:00:00", -290867, 1.56762092543),
]
self.test_frame3 = DataFrame(data, columns=columns)
def _load_raw_sql(self):
self.drop_table("types_test_data")
self._get_exec().execute(SQL_STRINGS["create_test_types"][self.flavor])
ins = SQL_STRINGS["insert_test_types"][self.flavor]
data = [
{
"TextCol": "first",
"DateCol": "2000-01-03 00:00:00",
"DateColWithTz": "2000-01-01 00:00:00-08:00",
"IntDateCol": 535852800,
"IntDateOnlyCol": 20101010,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": 1,
"BoolColWithNull": False,
},
{
"TextCol": "first",
"DateCol": "2000-01-04 00:00:00",
"DateColWithTz": "2000-06-01 00:00:00-07:00",
"IntDateCol": 1356998400,
"IntDateOnlyCol": 20101212,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": None,
"BoolColWithNull": None,
},
]
for d in data:
self._get_exec().execute(
ins["query"], [d[field] for field in ins["fields"]]
)
def _count_rows(self, table_name):
result = (
self._get_exec()
.execute(f"SELECT count(*) AS count_1 FROM {table_name}")
.fetchone()
)
return result[0]
def _read_sql_iris(self):
iris_frame = self.pandasSQL.read_query("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_parameter(self):
query = SQL_STRINGS["read_parameters"][self.flavor]
params = ["Iris-setosa", 5.1]
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_named_parameter(self):
query = SQL_STRINGS["read_named_parameters"][self.flavor]
params = {"name": "Iris-setosa", "length": 5.1}
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self, method=None):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=method)
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _to_sql_empty(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1.iloc[:0], "test_frame1")
def _to_sql_fail(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
assert self.pandasSQL.has_table("test_frame1")
msg = "Table 'test_frame1' already exists"
with pytest.raises(ValueError, match=msg):
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
self.drop_table("test_frame1")
def _to_sql_replace(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="replace")
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_append(self):
# Nuke table just in case
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="append")
assert self.pandasSQL.has_table("test_frame1")
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_method_callable(self):
check = [] # used to double check function below is really being used
def sample(pd_table, conn, keys, data_iter):
check.append(1)
data = [dict(zip(keys, row)) for row in data_iter]
conn.execute(pd_table.table.insert(), data)
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=sample)
assert self.pandasSQL.has_table("test_frame1")
assert check == [1]
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _roundtrip(self):
self.drop_table("test_frame_roundtrip")
self.pandasSQL.to_sql(self.test_frame1, "test_frame_roundtrip")
result = self.pandasSQL.read_query("SELECT * FROM test_frame_roundtrip")
result.set_index("level_0", inplace=True)
# result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.pandasSQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _to_sql_save_index(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")], columns=["A", "B", "C"], index=["A"]
)
self.pandasSQL.to_sql(df, "test_to_sql_saves_index")
ix_cols = self._get_index_columns("test_to_sql_saves_index")
assert ix_cols == [["A"]]
def _transaction_test(self):
with self.pandasSQL.run_transaction() as trans:
trans.execute("CREATE TABLE test_trans (A INT, B TEXT)")
class DummyException(Exception):
pass
# Make sure when transaction is rolled back, no rows get inserted
ins_sql = "INSERT INTO test_trans (A,B) VALUES (1, 'blah')"
try:
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
raise DummyException("error")
except DummyException:
# ignore raised exception
pass
res = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res) == 0
# Make sure when transaction is committed, rows do get inserted
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
res2 = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res2) == 1
# -----------------------------------------------------------------------------
# -- Testing the public API
class _TestSQLApi(PandasSQLTest):
"""
Base class to test the public API.
From this two classes are derived to run these tests for both the
sqlalchemy mode (`TestSQLApi`) and the fallback mode
(`TestSQLiteFallbackApi`). These tests are run with sqlite3. Specific
tests for the different sql flavours are included in `_TestSQLAlchemy`.
Notes:
flavor can always be passed even in SQLAlchemy mode,
should be correctly ignored.
we don't use drop_table because that isn't part of the public api
"""
flavor = "sqlite"
mode: str
def setup_connect(self):
self.conn = self.connect()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
def load_test_data_and_sql(self):
self._load_iris_view()
self._load_test1_data()
self._load_test2_data()
self._load_test3_data()
self._load_raw_sql()
def test_read_sql_iris(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_sql_view(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris_view", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_to_sql(self):
sql.to_sql(self.test_frame1, "test_frame1", self.conn)
assert sql.has_table("test_frame1", self.conn)
def test_to_sql_fail(self):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
assert sql.has_table("test_frame2", self.conn)
msg = "Table 'test_frame2' already exists"
with pytest.raises(ValueError, match=msg):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
def test_to_sql_replace(self):
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="replace")
assert sql.has_table("test_frame3", self.conn)
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame3")
assert num_rows == num_entries
def test_to_sql_append(self):
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="append")
assert sql.has_table("test_frame4", self.conn)
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame4")
assert num_rows == num_entries
def test_to_sql_type_mapping(self):
sql.to_sql(self.test_frame3, "test_frame5", self.conn, index=False)
result = sql.read_sql("SELECT * FROM test_frame5", self.conn)
tm.assert_frame_equal(self.test_frame3, result)
def test_to_sql_series(self):
s = Series(np.arange(5, dtype="int64"), name="series")
sql.to_sql(s, "test_series", self.conn, index=False)
s2 = sql.read_sql_query("SELECT * FROM test_series", self.conn)
tm.assert_frame_equal(s.to_frame(), s2)
def test_roundtrip(self):
sql.to_sql(self.test_frame1, "test_frame_roundtrip", con=self.conn)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
# HACK!
result.index = self.test_frame1.index
result.set_index("level_0", inplace=True)
result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def test_roundtrip_chunksize(self):
sql.to_sql(
self.test_frame1,
"test_frame_roundtrip",
con=self.conn,
index=False,
chunksize=2,
)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
tm.assert_frame_equal(result, self.test_frame1)
def test_execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = sql.execute("SELECT * FROM iris", con=self.conn)
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def test_date_parsing(self):
# Test date parsing in read_sql
# No Parsing
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn)
assert not issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["DateCol"]
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"},
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["IntDateCol"]
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates={"IntDateCol": "s"}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"IntDateOnlyCol": "%Y%m%d"},
)
assert issubclass(df.IntDateOnlyCol.dtype.type, np.datetime64)
assert df.IntDateOnlyCol.tolist() == [
pd.Timestamp("2010-10-10"),
pd.Timestamp("2010-12-12"),
]
def test_date_and_index(self):
# Test case where same column appears in parse_date and index_col
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
index_col="DateCol",
parse_dates=["DateCol", "IntDateCol"],
)
assert issubclass(df.index.dtype.type, np.datetime64)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_timedelta(self):
# see #6921
df = to_timedelta(Series(["00:00:01", "00:00:03"], name="foo")).to_frame()
with tm.assert_produces_warning(UserWarning):
df.to_sql("test_timedelta", self.conn)
result = sql.read_sql_query("SELECT * FROM test_timedelta", self.conn)
tm.assert_series_equal(result["foo"], df["foo"].astype("int64"))
def test_complex_raises(self):
df = DataFrame({"a": [1 + 1j, 2j]})
msg = "Complex datatypes not supported"
with pytest.raises(ValueError, match=msg):
df.to_sql("test_complex", self.conn)
@pytest.mark.parametrize(
"index_name,index_label,expected",
[
# no index name, defaults to 'index'
(None, None, "index"),
# specifying index_label
(None, "other_label", "other_label"),
# using the index name
("index_name", None, "index_name"),
# has index name, but specifying index_label
("index_name", "other_label", "other_label"),
# index name is integer
(0, None, "0"),
# index name is None but index label is integer
(None, 0, "0"),
],
)
def test_to_sql_index_label(self, index_name, index_label, expected):
temp_frame = DataFrame({"col1": range(4)})
temp_frame.index.name = index_name
query = "SELECT * FROM test_index_label"
sql.to_sql(temp_frame, "test_index_label", self.conn, index_label=index_label)
frame = sql.read_sql_query(query, self.conn)
assert frame.columns[0] == expected
def test_to_sql_index_label_multiindex(self):
temp_frame = DataFrame(
{"col1": range(4)},
index=MultiIndex.from_product([("A0", "A1"), ("B0", "B1")]),
)
# no index name, defaults to 'level_0' and 'level_1'
sql.to_sql(temp_frame, "test_index_label", self.conn)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[0] == "level_0"
assert frame.columns[1] == "level_1"
# specifying index_label
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label=["A", "B"],
)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["A", "B"]
# using the index name
temp_frame.index.names = ["A", "B"]
sql.to_sql(temp_frame, "test_index_label", self.conn, if_exists="replace")
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["A", "B"]
# has index name, but specifying index_label
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label=["C", "D"],
)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["C", "D"]
msg = "Length of 'index_label' should match number of levels, which is 2"
with pytest.raises(ValueError, match=msg):
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label="C",
)
def test_multiindex_roundtrip(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")],
columns=["A", "B", "C"],
index=["A", "B"],
)
df.to_sql("test_multiindex_roundtrip", self.conn)
result = sql.read_sql_query(
"SELECT * FROM test_multiindex_roundtrip", self.conn, index_col=["A", "B"]
)
tm.assert_frame_equal(df, result, check_index_type=True)
def test_integer_col_names(self):
df = DataFrame([[1, 2], [3, 4]], columns=[0, 1])
sql.to_sql(df, "test_frame_integer_col_names", self.conn, if_exists="replace")
def test_get_schema(self):
create_sql = sql.get_schema(self.test_frame1, "test", con=self.conn)
assert "CREATE" in create_sql
def test_get_schema_dtypes(self):
float_frame = DataFrame({"a": [1.1, 1.2], "b": [2.1, 2.2]})
dtype = sqlalchemy.Integer if self.mode == "sqlalchemy" else "INTEGER"
create_sql = sql.get_schema(
float_frame, "test", con=self.conn, dtype={"b": dtype}
)
assert "CREATE" in create_sql
assert "INTEGER" in create_sql
def test_get_schema_keys(self):
frame = DataFrame({"Col1": [1.1, 1.2], "Col2": [2.1, 2.2]})
create_sql = sql.get_schema(frame, "test", con=self.conn, keys="Col1")
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("Col1")'
assert constraint_sentence in create_sql
# multiple columns as key (GH10385)
create_sql = sql.get_schema(
self.test_frame1, "test", con=self.conn, keys=["A", "B"]
)
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("A", "B")'
assert constraint_sentence in create_sql
def test_chunksize_read(self):
df = DataFrame(np.random.randn(22, 5), columns=list("abcde"))
df.to_sql("test_chunksize", self.conn, index=False)
# reading the query in one time
res1 = sql.read_sql_query("select * from test_chunksize", self.conn)
# reading the query in chunks with read_sql_query
res2 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_query(
"select * from test_chunksize", self.conn, chunksize=5
):
res2 = concat([res2, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res2)
# reading the query in chunks with read_sql_query
if self.mode == "sqlalchemy":
res3 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_table("test_chunksize", self.conn, chunksize=5):
res3 = concat([res3, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res3)
def test_categorical(self):
# GH8624
# test that categorical gets written correctly as dense column
df = DataFrame(
{
"person_id": [1, 2, 3],
"person_name": ["<NAME>", "<NAME>", "<NAME>"],
}
)
df2 = df.copy()
df2["person_name"] = df2["person_name"].astype("category")
df2.to_sql("test_categorical", self.conn, index=False)
res = sql.read_sql_query("SELECT * FROM test_categorical", self.conn)
tm.assert_frame_equal(res, df)
def test_unicode_column_name(self):
# GH 11431
df = DataFrame([[1, 2], [3, 4]], columns=["\xe9", "b"])
df.to_sql("test_unicode", self.conn, index=False)
def test_escaped_table_name(self):
# GH 13206
df = DataFrame({"A": [0, 1, 2], "B": [0.2, np.nan, 5.6]})
df.to_sql("d1187b08-4943-4c8d-a7f6", self.conn, index=False)
res = sql.read_sql_query("SELECT * FROM `d1187b08-4943-4c8d-a7f6`", self.conn)
tm.assert_frame_equal(res, df)
@pytest.mark.single
@pytest.mark.skipif(not SQLALCHEMY_INSTALLED, reason="SQLAlchemy not installed")
class TestSQLApi(SQLAlchemyMixIn, _TestSQLApi):
"""
Test the public API as it would be used directly
Tests for `read_sql_table` are included here, as this is specific for the
sqlalchemy mode.
"""
flavor = "sqlite"
mode = "sqlalchemy"
def connect(self):
return sqlalchemy.create_engine("sqlite:///:memory:")
def test_read_table_columns(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, "test_frame", self.conn)
cols = ["A", "B"]
result = sql.read_sql_table("test_frame", self.conn, columns=cols)
assert result.columns.tolist() == cols
def test_read_table_index_col(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, "test_frame", self.conn)
result = sql.read_sql_table("test_frame", self.conn, index_col="index")
assert result.index.names == ["index"]
result = sql.read_sql_table("test_frame", self.conn, index_col=["A", "B"])
assert result.index.names == ["A", "B"]
result = sql.read_sql_table(
"test_frame", self.conn, index_col=["A", "B"], columns=["C", "D"]
)
assert result.index.names == ["A", "B"]
assert result.columns.tolist() == ["C", "D"]
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
iris_frame1 = sql.read_sql_table("iris", self.conn)
iris_frame2 = sql.read_sql("iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
def test_not_reflect_all_tables(self):
# create invalid table
qry = """CREATE TABLE invalid (x INTEGER, y UNKNOWN);"""
self.conn.execute(qry)
qry = """CREATE TABLE other_table (x INTEGER, y INTEGER);"""
self.conn.execute(qry)
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
sql.read_sql_table("other_table", self.conn)
sql.read_sql_query("SELECT * FROM other_table", self.conn)
# Verify some things
assert len(w) == 0
def test_warning_case_insensitive_table_name(self):
# see gh-7815
#
# We can't test that this warning is triggered, a the database
# configuration would have to be altered. But here we test that
# the warning is certainly NOT triggered in a normal case.
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# This should not trigger a Warning
self.test_frame1.to_sql("CaseSensitive", self.conn)
# Verify some things
assert len(w) == 0
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes("test_index_saved")
ixs = [i["column_names"] for i in ixs]
return ixs
def test_sqlalchemy_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame(
{"time": to_datetime(["201412120154", "201412110254"], utc=True)}
)
db = sql.SQLDatabase(self.conn)
table = sql.SQLTable("test_type", db, frame=df)
# GH 9086: TIMESTAMP is the suggested type for datetimes with timezones
assert isinstance(table.table.c["time"].type, sqltypes.TIMESTAMP)
def test_database_uri_string(self):
# Test read_sql and .to_sql method with a database URI (GH10654)
test_frame1 = self.test_frame1
# db_uri = 'sqlite:///:memory:' # raises
# sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) near
# "iris": syntax error [SQL: 'iris']
with tm.ensure_clean() as name:
db_uri = "sqlite:///" + name
table = "iris"
test_frame1.to_sql(table, db_uri, if_exists="replace", index=False)
test_frame2 = sql.read_sql(table, db_uri)
test_frame3 = sql.read_sql_table(table, db_uri)
query = "SELECT * FROM iris"
test_frame4 = sql.read_sql_query(query, db_uri)
tm.assert_frame_equal(test_frame1, test_frame2)
tm.assert_frame_equal(test_frame1, test_frame3)
tm.assert_frame_equal(test_frame1, test_frame4)
# using driver that will not be installed on Travis to trigger error
# in sqlalchemy.create_engine -> test passing of this error to user
try:
# the rest of this test depends on pg8000's being absent
import pg8000 # noqa
pytest.skip("pg8000 is installed")
except ImportError:
pass
db_uri = "postgresql+pg8000://user:pass@host/dbname"
with pytest.raises(ImportError, match="pg8000"):
sql.read_sql("select * from table", db_uri)
def _make_iris_table_metadata(self):
sa = sqlalchemy
metadata = sa.MetaData()
iris = sa.Table(
"iris",
metadata,
sa.Column("SepalLength", sa.REAL),
sa.Column("SepalWidth", sa.REAL),
sa.Column("PetalLength", sa.REAL),
sa.Column("PetalWidth", sa.REAL),
sa.Column("Name", sa.TEXT),
)
return iris
def test_query_by_text_obj(self):
# WIP : GH10846
name_text = sqlalchemy.text("select * from iris where name=:name")
iris_df = sql.read_sql(name_text, self.conn, params={"name": "Iris-versicolor"})
all_names = set(iris_df["Name"])
assert all_names == {"Iris-versicolor"}
def test_query_by_select_obj(self):
# WIP : GH10846
iris = self._make_iris_table_metadata()
name_select = sqlalchemy.select([iris]).where(
iris.c.Name == sqlalchemy.bindparam("name")
)
iris_df = sql.read_sql(name_select, self.conn, params={"name": "Iris-setosa"})
all_names = set(iris_df["Name"])
assert all_names == {"Iris-setosa"}
class _EngineToConnMixin:
"""
A mixin that causes setup_connect to create a conn rather than an engine.
"""
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
super().load_test_data_and_sql()
engine = self.conn
conn = engine.connect()
self.__tx = conn.begin()
self.pandasSQL = sql.SQLDatabase(conn)
self.__engine = engine
self.conn = conn
yield
self.__tx.rollback()
self.conn.close()
self.conn = self.__engine
self.pandasSQL = sql.SQLDatabase(self.__engine)
# XXX:
# super().teardown_method(method)
@pytest.mark.single
class TestSQLApiConn(_EngineToConnMixin, TestSQLApi):
pass
@pytest.mark.single
class TestSQLiteFallbackApi(SQLiteMixIn, _TestSQLApi):
"""
Test the public sqlite connection fallback API
"""
flavor = "sqlite"
mode = "fallback"
def connect(self, database=":memory:"):
return sqlite3.connect(database)
def test_sql_open_close(self):
# Test if the IO in the database still work if the connection closed
# between the writing and reading (as in many real situations).
with tm.ensure_clean() as name:
conn = self.connect(name)
sql.to_sql(self.test_frame3, "test_frame3_legacy", conn, index=False)
conn.close()
conn = self.connect(name)
result = sql.read_sql_query("SELECT * FROM test_frame3_legacy;", conn)
conn.close()
tm.assert_frame_equal(self.test_frame3, result)
@pytest.mark.skipif(SQLALCHEMY_INSTALLED, reason="SQLAlchemy is installed")
def test_con_string_import_error(self):
conn = "mysql://root@localhost/pandas_nosetest"
msg = "Using URI string without sqlalchemy installed"
with pytest.raises(ImportError, match=msg):
sql.read_sql("SELECT * FROM iris", conn)
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
msg = "Execution failed on sql 'iris': near \"iris\": syntax error"
with pytest.raises(sql.DatabaseError, match=msg):
sql.read_sql("iris", self.conn)
def test_safe_names_warning(self):
# GH 6798
df = DataFrame([[1, 2], [3, 4]], columns=["a", "b "]) # has a space
# warns on create table with spaces in names
with tm.assert_produces_warning():
sql.to_sql(df, "test_frame3_legacy", self.conn, index=False)
def test_get_schema2(self):
# without providing a connection object (available for backwards comp)
create_sql = sql.get_schema(self.test_frame1, "test")
assert "CREATE" in create_sql
def _get_sqlite_column_type(self, schema, column):
for col in schema.split("\n"):
if col.split()[0].strip('""') == column:
return col.split()[1]
raise ValueError(f"Column {column} not found")
def test_sqlite_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame(
{"time": to_datetime(["201412120154", "201412110254"], utc=True)}
)
db = sql.SQLiteDatabase(self.conn)
table = sql.SQLiteTable("test_type", db, frame=df)
schema = table.sql_schema()
assert self._get_sqlite_column_type(schema, "time") == "TIMESTAMP"
# -----------------------------------------------------------------------------
# -- Database flavor specific tests
class _TestSQLAlchemy(SQLAlchemyMixIn, PandasSQLTest):
"""
Base class for testing the sqlalchemy backend.
Subclasses for specific database types are created below. Tests that
deviate for each flavor are overwritten there.
"""
flavor: str
@pytest.fixture(autouse=True, scope="class")
def setup_class(cls):
cls.setup_import()
cls.setup_driver()
conn = cls.connect()
conn.connect()
def load_test_data_and_sql(self):
self._load_raw_sql()
self._load_test1_data()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
@classmethod
def setup_import(cls):
# Skip this test if SQLAlchemy not available
if not SQLALCHEMY_INSTALLED:
pytest.skip("SQLAlchemy not installed")
@classmethod
def setup_driver(cls):
raise NotImplementedError()
@classmethod
def connect(cls):
raise NotImplementedError()
def setup_connect(self):
try:
self.conn = self.connect()
self.pandasSQL = sql.SQLDatabase(self.conn)
# to test if connection can be made:
self.conn.connect()
except sqlalchemy.exc.OperationalError:
pytest.skip(f"Can't connect to {self.flavor} server")
def test_read_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
self._read_sql_iris_parameter()
def test_read_sql_named_parameter(self):
self._read_sql_iris_named_parameter()
def test_to_sql(self):
self._to_sql()
def test_to_sql_empty(self):
self._to_sql_empty()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replace(self):
self._to_sql_replace()
def test_to_sql_append(self):
self._to_sql_append()
def test_to_sql_method_multi(self):
self._to_sql(method="multi")
def test_to_sql_method_callable(self):
self._to_sql_method_callable()
def test_create_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}
)
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, "temp_frame")
assert temp_conn.has_table("temp_frame")
def test_drop_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}
)
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, "temp_frame")
assert temp_conn.has_table("temp_frame")
pandasSQL.drop_table("temp_frame")
assert not temp_conn.has_table("temp_frame")
def test_roundtrip(self):
self._roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_read_table(self):
iris_frame = sql.read_sql_table("iris", con=self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_table_columns(self):
iris_frame = sql.read_sql_table(
"iris", con=self.conn, columns=["SepalLength", "SepalLength"]
)
tm.equalContents(iris_frame.columns.values, ["SepalLength", "SepalLength"])
def test_read_table_absent_raises(self):
msg = "Table this_doesnt_exist not found"
with pytest.raises(ValueError, match=msg):
sql.read_sql_table("this_doesnt_exist", con=self.conn)
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
assert issubclass(df.BoolCol.dtype.type, np.bool_)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Bool column with NA values becomes object
assert issubclass(df.BoolColWithNull.dtype.type, np.object)
def test_bigint(self):
# int64 should be converted to BigInteger, GH7433
df = DataFrame(data={"i64": [2 ** 62]})
df.to_sql("test_bigint", self.conn, index=False)
result = sql.read_sql_table("test_bigint", self.conn)
tm.assert_frame_equal(df, result)
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
# MySQL SHOULD be converted.
assert issubclass(df.DateCol.dtype.type, np.datetime64)
def test_datetime_with_timezone(self):
# edge case that converts postgresql datetime with time zone types
# to datetime64[ns,psycopg2.tz.FixedOffsetTimezone..], which is ok
# but should be more natural, so coerce to datetime64[ns] for now
def check(col):
# check that a column is either datetime64[ns]
# or datetime64[ns, UTC]
if is_datetime64_dtype(col.dtype):
# "2000-01-01 00:00:00-08:00" should convert to
# "2000-01-01 08:00:00"
assert col[0] == Timestamp("2000-01-01 08:00:00")
# "2000-06-01 00:00:00-07:00" should convert to
# "2000-06-01 07:00:00"
assert col[1] == Timestamp("2000-06-01 07:00:00")
elif is_datetime64tz_dtype(col.dtype):
assert str(col.dt.tz) == "UTC"
# "2000-01-01 00:00:00-08:00" should convert to
# "2000-01-01 08:00:00"
# "2000-06-01 00:00:00-07:00" should convert to
# "2000-06-01 07:00:00"
# GH 6415
expected_data = [
Timestamp("2000-01-01 08:00:00", tz="UTC"),
Timestamp("2000-06-01 07:00:00", tz="UTC"),
]
expected = Series(expected_data, name=col.name)
tm.assert_series_equal(col, expected)
else:
raise AssertionError(
f"DateCol loaded with incorrect type -> {col.dtype}"
)
# GH11216
df = pd.read_sql_query("select * from types_test_data", self.conn)
if not hasattr(df, "DateColWithTz"):
pytest.skip("no column with datetime with time zone")
# this is parsed on Travis (linux), but not on macosx for some reason
# even with the same versions of psycopg2 & sqlalchemy, possibly a
# Postgresql server version difference
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
df = pd.read_sql_query(
"select * from types_test_data", self.conn, parse_dates=["DateColWithTz"]
)
if not hasattr(df, "DateColWithTz"):
pytest.skip("no column with datetime with time zone")
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
assert str(col.dt.tz) == "UTC"
check(df.DateColWithTz)
df = pd.concat(
list(
pd.read_sql_query(
"select * from types_test_data", self.conn, chunksize=1
)
),
ignore_index=True,
)
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
assert str(col.dt.tz) == "UTC"
expected = sql.read_sql_table("types_test_data", self.conn)
col = expected.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
tm.assert_series_equal(df.DateColWithTz, expected.DateColWithTz)
# xref #7139
# this might or might not be converted depending on the postgres driver
df = sql.read_sql_table("types_test_data", self.conn)
check(df.DateColWithTz)
def test_datetime_with_timezone_roundtrip(self):
# GH 9086
# Write datetimetz data to a db and read it back
# For dbs that support timestamps with timezones, should get back UTC
# otherwise naive data should be returned
expected = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3, tz="US/Pacific")}
)
expected.to_sql("test_datetime_tz", self.conn, index=False)
if self.flavor == "postgresql":
# SQLAlchemy "timezones" (i.e. offsets) are coerced to UTC
expected["A"] = expected["A"].dt.tz_convert("UTC")
else:
# Otherwise, timestamps are returned as local, naive
expected["A"] = expected["A"].dt.tz_localize(None)
result = sql.read_sql_table("test_datetime_tz", self.conn)
tm.assert_frame_equal(result, expected)
result = sql.read_sql_query("SELECT * FROM test_datetime_tz", self.conn)
if self.flavor == "sqlite":
# read_sql_query does not return datetime type like read_sql_table
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"])
tm.assert_frame_equal(result, expected)
def test_naive_datetimeindex_roundtrip(self):
# GH 23510
# Ensure that a naive DatetimeIndex isn't converted to UTC
dates = date_range("2018-01-01", periods=5, freq="6H")
expected = DataFrame({"nums": range(5)}, index=dates)
expected.to_sql("foo_table", self.conn, index_label="info_date")
result = sql.read_sql_table("foo_table", self.conn, index_col="info_date")
# result index with gain a name from a set_index operation; expected
tm.assert_frame_equal(result, expected, check_names=False)
def test_date_parsing(self):
# No Parsing
df = sql.read_sql_table("types_test_data", self.conn)
expected_type = object if self.flavor == "sqlite" else np.datetime64
assert issubclass(df.DateCol.dtype.type, expected_type)
df = sql.read_sql_table("types_test_data", self.conn, parse_dates=["DateCol"])
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"}
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data",
self.conn,
parse_dates={"DateCol": {"format": "%Y-%m-%d %H:%M:%S"}},
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates=["IntDateCol"]
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"IntDateCol": "s"}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"IntDateCol": {"unit": "s"}}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_datetime(self):
df = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3), "B": np.arange(3.0)}
)
df.to_sql("test_datetime", self.conn)
# with read_table -> type information from schema used
result = sql.read_sql_table("test_datetime", self.conn)
result = result.drop("index", axis=1)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query("SELECT * FROM test_datetime", self.conn)
result = result.drop("index", axis=1)
if self.flavor == "sqlite":
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"])
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_NaT(self):
df = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3), "B": np.arange(3.0)}
)
df.loc[1, "A"] = np.nan
df.to_sql("test_datetime", self.conn, index=False)
# with read_table -> type information from schema used
result = sql.read_sql_table("test_datetime", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query("SELECT * FROM test_datetime", self.conn)
if self.flavor == "sqlite":
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"], errors="coerce")
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_date(self):
# test support for datetime.date
df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
df.to_sql("test_date", self.conn, index=False)
res = read_sql_table("test_date", self.conn)
result = res["a"]
expected = to_datetime(df["a"])
# comes back as datetime64
tm.assert_series_equal(result, expected)
def test_datetime_time(self):
# test support for datetime.time
df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"])
df.to_sql("test_time", self.conn, index=False)
res = read_sql_table("test_time", self.conn)
tm.assert_frame_equal(res, df)
# GH8341
# first, use the fallback to have the sqlite adapter put in place
sqlite_conn = TestSQLiteFallback.connect()
sql.to_sql(df, "test_time2", sqlite_conn, index=False)
res = sql.read_sql_query("SELECT * FROM test_time2", sqlite_conn)
ref = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(ref, res) # check if adapter is in place
# then test if sqlalchemy is unaffected by the sqlite adapter
sql.to_sql(df, "test_time3", self.conn, index=False)
if self.flavor == "sqlite":
res = sql.read_sql_query("SELECT * FROM test_time3", self.conn)
ref = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(ref, res)
res = sql.read_sql_table("test_time3", self.conn)
tm.assert_frame_equal(df, res)
def test_mixed_dtype_insert(self):
# see GH6509
s1 = Series(2 ** 25 + 1, dtype=np.int32)
s2 = Series(0.0, dtype=np.float32)
df = DataFrame({"s1": s1, "s2": s2})
# write and read again
df.to_sql("test_read_write", self.conn, index=False)
df2 = sql.read_sql_table("test_read_write", self.conn)
tm.assert_frame_equal(df, df2, check_dtype=False, check_exact=True)
def test_nan_numeric(self):
# NaNs in numeric float column
df = DataFrame({"A": [0, 1, 2], "B": [0.2, np.nan, 5.6]})
df.to_sql("test_nan", self.conn, index=False)
# with read_table
result = sql.read_sql_table("test_nan", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query("SELECT * FROM test_nan", self.conn)
tm.assert_frame_equal(result, df)
def test_nan_fullcolumn(self):
# full NaN column (numeric float column)
df = DataFrame({"A": [0, 1, 2], "B": [np.nan, np.nan, np.nan]})
df.to_sql("test_nan", self.conn, index=False)
# with read_table
result = sql.read_sql_table("test_nan", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> not type info from table -> stays None
df["B"] = df["B"].astype("object")
df["B"] = None
result = sql.read_sql_query("SELECT * FROM test_nan", self.conn)
tm.assert_frame_equal(result, df)
def test_nan_string(self):
# NaNs in string column
df = DataFrame({"A": [0, 1, 2], "B": ["a", "b", np.nan]})
df.to_sql("test_nan", self.conn, index=False)
# NaNs are coming back as None
df.loc[2, "B"] = None
# with read_table
result = sql.read_sql_table("test_nan", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query("SELECT * FROM test_nan", self.conn)
tm.assert_frame_equal(result, df)
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes(tbl_name)
ixs = [i["column_names"] for i in ixs]
return ixs
def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_transactions(self):
self._transaction_test()
def test_get_schema_create_table(self):
# Use a dataframe without a bool column, since MySQL converts bool to
# TINYINT (which read_sql_table returns as an int and causes a dtype
# mismatch)
self._load_test3_data()
tbl = "test_get_schema_create_table"
create_sql = sql.get_schema(self.test_frame3, tbl, con=self.conn)
blank_test_df = self.test_frame3.iloc[:0]
self.drop_table(tbl)
self.conn.execute(create_sql)
returned_df = sql.read_sql_table(tbl, self.conn)
tm.assert_frame_equal(returned_df, blank_test_df, check_index_type=False)
self.drop_table(tbl)
def test_dtype(self):
cols = ["A", "B"]
data = [(0.8, True), (0.9, None)]
df = DataFrame(data, columns=cols)
df.to_sql("dtype_test", self.conn)
df.to_sql("dtype_test2", self.conn, dtype={"B": sqlalchemy.TEXT})
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
sqltype = meta.tables["dtype_test2"].columns["B"].type
assert isinstance(sqltype, sqlalchemy.TEXT)
msg = "The type of B is not a SQLAlchemy type"
with pytest.raises(ValueError, match=msg):
df.to_sql("error", self.conn, dtype={"B": str})
# GH9083
df.to_sql("dtype_test3", self.conn, dtype={"B": sqlalchemy.String(10)})
meta.reflect()
sqltype = meta.tables["dtype_test3"].columns["B"].type
assert isinstance(sqltype, sqlalchemy.String)
assert sqltype.length == 10
# single dtype
df.to_sql("single_dtype_test", self.conn, dtype=sqlalchemy.TEXT)
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
sqltypea = meta.tables["single_dtype_test"].columns["A"].type
sqltypeb = meta.tables["single_dtype_test"].columns["B"].type
assert isinstance(sqltypea, sqlalchemy.TEXT)
assert isinstance(sqltypeb, sqlalchemy.TEXT)
def test_notna_dtype(self):
cols = {
"Bool": Series([True, None]),
"Date": Series([datetime(2012, 5, 1), None]),
"Int": Series([1, None], dtype="object"),
"Float": Series([1.1, None]),
}
df = DataFrame(cols)
tbl = "notna_dtype_test"
df.to_sql(tbl, self.conn)
returned_df = sql.read_sql_table(tbl, self.conn) # noqa
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
if self.flavor == "mysql":
my_type = sqltypes.Integer
else:
my_type = sqltypes.Boolean
col_dict = meta.tables[tbl].columns
assert isinstance(col_dict["Bool"].type, my_type)
assert isinstance(col_dict["Date"].type, sqltypes.DateTime)
assert isinstance(col_dict["Int"].type, sqltypes.Integer)
assert isinstance(col_dict["Float"].type, sqltypes.Float)
def test_double_precision(self):
V = 1.23456789101112131415
df = DataFrame(
{
"f32": Series([V], dtype="float32"),
"f64": Series([V], dtype="float64"),
"f64_as_f32": Series([V], dtype="float64"),
"i32": Series([5], dtype="int32"),
"i64": Series([5], dtype="int64"),
}
)
df.to_sql(
"test_dtypes",
self.conn,
index=False,
if_exists="replace",
dtype={"f64_as_f32": sqlalchemy.Float(precision=23)},
)
res = sql.read_sql_table("test_dtypes", self.conn)
# check precision of float64
assert np.round(df["f64"].iloc[0], 14) == np.round(res["f64"].iloc[0], 14)
# check sql types
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
col_dict = meta.tables["test_dtypes"].columns
assert str(col_dict["f32"].type) == str(col_dict["f64_as_f32"].type)
assert isinstance(col_dict["f32"].type, sqltypes.Float)
assert isinstance(col_dict["f64"].type, sqltypes.Float)
assert isinstance(col_dict["i32"].type, sqltypes.Integer)
assert isinstance(col_dict["i64"].type, sqltypes.BigInteger)
def test_connectable_issue_example(self):
# This tests the example raised in issue
# https://github.com/pandas-dev/pandas/issues/10104
def foo(connection):
query = "SELECT test_foo_data FROM test_foo_data"
return sql.read_sql_query(query, con=connection)
def bar(connection, data):
data.to_sql(name="test_foo_data", con=connection, if_exists="append")
def main(connectable):
with connectable.connect() as conn:
with conn.begin():
foo_data = conn.run_callable(foo)
conn.run_callable(bar, foo_data)
DataFrame({"test_foo_data": [0, 1, 2]}).to_sql("test_foo_data", self.conn)
main(self.conn)
def test_temporary_table(self):
test_data = "Hello, World!"
expected = DataFrame({"spam": [test_data]})
Base = declarative.declarative_base()
class Temporary(Base):
__tablename__ = "temp_test"
__table_args__ = {"prefixes": ["TEMPORARY"]}
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
spam = sqlalchemy.Column(sqlalchemy.Unicode(30), nullable=False)
Session = sa_session.sessionmaker(bind=self.conn)
session = Session()
with session.transaction:
conn = session.connection()
Temporary.__table__.create(conn)
session.add(Temporary(spam=test_data))
session.flush()
df = sql.read_sql_query(sql=sqlalchemy.select([Temporary.spam]), con=conn)
tm.assert_frame_equal(df, expected)
class _TestSQLAlchemyConn(_EngineToConnMixin, _TestSQLAlchemy):
def test_transactions(self):
pytest.skip("Nested transactions rollbacks don't work with Pandas")
class _TestSQLiteAlchemy:
"""
Test the sqlalchemy backend against an in-memory sqlite database.
"""
flavor = "sqlite"
@classmethod
def connect(cls):
return sqlalchemy.create_engine("sqlite:///:memory:")
@classmethod
def setup_driver(cls):
# sqlite3 is built-in
cls.driver = None
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
# sqlite has no boolean type, so integer type is returned
assert issubclass(df.BoolCol.dtype.type, np.integer)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Non-native Bool column with NA values stays as float
assert issubclass(df.BoolColWithNull.dtype.type, np.floating)
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
assert not issubclass(df.DateCol.dtype.type, np.datetime64)
def test_bigint_warning(self):
# test no warning for BIGINT (to support int64) is raised (GH7433)
df = DataFrame({"a": [1, 2]}, dtype="int64")
df.to_sql("test_bigintwarning", self.conn, index=False)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
sql.read_sql_table("test_bigintwarning", self.conn)
assert len(w) == 0
class _TestMySQLAlchemy:
"""
Test the sqlalchemy backend against an MySQL database.
"""
flavor = "mysql"
@classmethod
def connect(cls):
url = "mysql+{driver}://root@localhost/pandas_nosetest"
return sqlalchemy.create_engine(
url.format(driver=cls.driver), connect_args=cls.connect_args
)
@classmethod
def setup_driver(cls):
pymysql = pytest.importorskip("pymysql")
cls.driver = "pymysql"
cls.connect_args = {"client_flag": pymysql.constants.CLIENT.MULTI_STATEMENTS}
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
# MySQL has no real BOOL type (it's an alias for TINYINT)
assert issubclass(df.BoolCol.dtype.type, np.integer)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Bool column with NA = int column with NA values => becomes float
assert issubclass(df.BoolColWithNull.dtype.type, np.floating)
def test_read_procedure(self):
import pymysql
# see GH7324. Although it is more an api test, it is added to the
# mysql tests as sqlite does not have stored procedures
df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]})
df.to_sql("test_procedure", self.conn, index=False)
proc = """DROP PROCEDURE IF EXISTS get_testdb;
CREATE PROCEDURE get_testdb ()
BEGIN
SELECT * FROM test_procedure;
END"""
connection = self.conn.connect()
trans = connection.begin()
try:
r1 = connection.execute(proc) # noqa
trans.commit()
except pymysql.Error:
trans.rollback()
raise
res1 = sql.read_sql_query("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res1)
# test delegation to read_sql_query
res2 = sql.read_sql("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res2)
class _TestPostgreSQLAlchemy:
"""
Test the sqlalchemy backend against an PostgreSQL database.
"""
flavor = "postgresql"
@classmethod
def connect(cls):
url = "postgresql+{driver}://postgres@localhost/pandas_nosetest"
return sqlalchemy.create_engine(url.format(driver=cls.driver))
@classmethod
def setup_driver(cls):
pytest.importorskip("psycopg2")
cls.driver = "psycopg2"
def test_schema_support(self):
# only test this for postgresql (schema's not supported in
# mysql/sqlite)
df = DataFrame({"col1": [1, 2], "col2": [0.1, 0.2], "col3": ["a", "n"]})
# create a schema
self.conn.execute("DROP SCHEMA IF EXISTS other CASCADE;")
self.conn.execute("CREATE SCHEMA other;")
# write dataframe to different schema's
df.to_sql("test_schema_public", self.conn, index=False)
df.to_sql(
"test_schema_public_explicit", self.conn, index=False, schema="public"
)
df.to_sql("test_schema_other", self.conn, index=False, schema="other")
# read dataframes back in
res1 = sql.read_sql_table("test_schema_public", self.conn)
tm.assert_frame_equal(df, res1)
res2 = sql.read_sql_table("test_schema_public_explicit", self.conn)
tm.assert_frame_equal(df, res2)
res3 = sql.read_sql_table(
"test_schema_public_explicit", self.conn, schema="public"
)
tm.assert_frame_equal(df, res3)
res4 = sql.read_sql_table("test_schema_other", self.conn, schema="other")
tm.assert_frame_equal(df, res4)
msg = "Table test_schema_other not found"
with pytest.raises(ValueError, match=msg):
sql.read_sql_table("test_schema_other", self.conn, schema="public")
# different if_exists options
# create a schema
self.conn.execute("DROP SCHEMA IF EXISTS other CASCADE;")
self.conn.execute("CREATE SCHEMA other;")
# write dataframe with different if_exists options
df.to_sql("test_schema_other", self.conn, schema="other", index=False)
df.to_sql(
"test_schema_other",
self.conn,
schema="other",
index=False,
if_exists="replace",
)
df.to_sql(
"test_schema_other",
self.conn,
schema="other",
index=False,
if_exists="append",
)
res = sql.read_sql_table("test_schema_other", self.conn, schema="other")
tm.assert_frame_equal(concat([df, df], ignore_index=True), res)
# specifying schema in user-provided meta
# The schema won't be applied on another Connection
# because of transactional schemas
if isinstance(self.conn, sqlalchemy.engine.Engine):
engine2 = self.connect()
meta = sqlalchemy.MetaData(engine2, schema="other")
pdsql = sql.SQLDatabase(engine2, meta=meta)
pdsql.to_sql(df, "test_schema_other2", index=False)
pdsql.to_sql(df, "test_schema_other2", index=False, if_exists="replace")
pdsql.to_sql(df, "test_schema_other2", index=False, if_exists="append")
res1 = sql.read_sql_table("test_schema_other2", self.conn, schema="other")
res2 = pdsql.read_table("test_schema_other2")
tm.assert_frame_equal(res1, res2)
def test_copy_from_callable_insertion_method(self):
# GH 8953
# Example in io.rst found under _io.sql.method
# not available in sqlite, mysql
def psql_insert_copy(table, conn, keys, data_iter):
# gets a DBAPI connection that can provide a cursor
dbapi_conn = conn.connection
with dbapi_conn.cursor() as cur:
s_buf = StringIO()
writer = csv.writer(s_buf)
writer.writerows(data_iter)
s_buf.seek(0)
columns = ", ".join(f'"{k}"' for k in keys)
if table.schema:
table_name = f"{table.schema}.{table.name}"
else:
table_name = table.name
sql_query = f"COPY {table_name} ({columns}) FROM STDIN WITH CSV"
cur.copy_expert(sql=sql_query, file=s_buf)
expected = DataFrame({"col1": [1, 2], "col2": [0.1, 0.2], "col3": ["a", "n"]})
expected.to_sql(
"test_copy_insert", self.conn, index=False, method=psql_insert_copy
)
result = sql.read_sql_table("test_copy_insert", self.conn)
tm.assert_frame_equal(result, expected)
@pytest.mark.single
@pytest.mark.db
class TestMySQLAlchemy(_TestMySQLAlchemy, _TestSQLAlchemy):
pass
@pytest.mark.single
@pytest.mark.db
class TestMySQLAlchemyConn(_TestMySQLAlchemy, _TestSQLAlchemyConn):
pass
@pytest.mark.single
@pytest.mark.db
class TestPostgreSQLAlchemy(_TestPostgreSQLAlchemy, _TestSQLAlchemy):
pass
@pytest.mark.single
@pytest.mark.db
class TestPostgreSQLAlchemyConn(_TestPostgreSQLAlchemy, _TestSQLAlchemyConn):
pass
@pytest.mark.single
class TestSQLiteAlchemy(_TestSQLiteAlchemy, _TestSQLAlchemy):
pass
@pytest.mark.single
class TestSQLiteAlchemyConn(_TestSQLiteAlchemy, _TestSQLAlchemyConn):
pass
# -----------------------------------------------------------------------------
# -- Test Sqlite / MySQL fallback
@pytest.mark.single
class TestSQLiteFallback(SQLiteMixIn, PandasSQLTest):
"""
Test the fallback mode against an in-memory sqlite database.
"""
flavor = "sqlite"
@classmethod
def connect(cls):
return sqlite3.connect(":memory:")
def setup_connect(self):
self.conn = self.connect()
def load_test_data_and_sql(self):
self.pandasSQL = sql.SQLiteDatabase(self.conn)
self._load_test1_data()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
def test_read_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
self._read_sql_iris_parameter()
def test_read_sql_named_parameter(self):
self._read_sql_iris_named_parameter()
def test_to_sql(self):
self._to_sql()
def test_to_sql_empty(self):
self._to_sql_empty()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replace(self):
self._to_sql_replace()
def test_to_sql_append(self):
self._to_sql_append()
def test_to_sql_method_multi(self):
# GH 29921
self._to_sql(method="multi")
def test_create_and_drop_table(self):
temp_frame = DataFrame(
{"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}
)
self.pandasSQL.to_sql(temp_frame, "drop_test_frame")
assert self.pandasSQL.has_table("drop_test_frame")
self.pandasSQL.drop_table("drop_test_frame")
assert not self.pandasSQL.has_table("drop_test_frame")
def test_roundtrip(self):
self._roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_datetime_date(self):
# test support for datetime.date
df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
df.to_sql("test_date", self.conn, index=False)
res = read_sql_query("SELECT * FROM test_date", self.conn)
if self.flavor == "sqlite":
# comes back as strings
tm.assert_frame_equal(res, df.astype(str))
elif self.flavor == "mysql":
tm.assert_frame_equal(res, df)
def test_datetime_time(self):
# test support for datetime.time, GH #8341
df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"])
df.to_sql("test_time", self.conn, index=False)
res = read_sql_query("SELECT * FROM test_time", self.conn)
if self.flavor == "sqlite":
# comes back as strings
expected = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(res, expected)
def _get_index_columns(self, tbl_name):
ixs = sql.read_sql_query(
"SELECT * FROM sqlite_master WHERE type = 'index' "
+ f"AND tbl_name = '{tbl_name}'",
self.conn,
)
ix_cols = []
for ix_name in ixs.name:
ix_info = sql.read_sql_query(f"PRAGMA index_info({ix_name})", self.conn)
ix_cols.append(ix_info.name.tolist())
return ix_cols
def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_transactions(self):
self._transaction_test()
def _get_sqlite_column_type(self, table, column):
recs = self.conn.execute(f"PRAGMA table_info({table})")
for cid, name, ctype, not_null, default, pk in recs:
if name == column:
return ctype
raise ValueError(f"Table {table}, column {column} not found")
def test_dtype(self):
if self.flavor == "mysql":
pytest.skip("Not applicable to MySQL legacy")
cols = ["A", "B"]
data = [(0.8, True), (0.9, None)]
df = DataFrame(data, columns=cols)
df.to_sql("dtype_test", self.conn)
df.to_sql("dtype_test2", self.conn, dtype={"B": "STRING"})
# sqlite stores Boolean values as INTEGER
assert self._get_sqlite_column_type("dtype_test", "B") == "INTEGER"
assert self._get_sqlite_column_type("dtype_test2", "B") == "STRING"
msg = r"B \(<class 'bool'>\) not a string"
with pytest.raises(ValueError, match=msg):
df.to_sql("error", self.conn, dtype={"B": bool})
# single dtype
df.to_sql("single_dtype_test", self.conn, dtype="STRING")
assert self._get_sqlite_column_type("single_dtype_test", "A") == "STRING"
assert self._get_sqlite_column_type("single_dtype_test", "B") == "STRING"
def test_notna_dtype(self):
if self.flavor == "mysql":
pytest.skip("Not applicable to MySQL legacy")
cols = {
"Bool": Series([True, None]),
"Date": Series([datetime(2012, 5, 1), None]),
"Int": Series([1, None], dtype="object"),
"Float": Series([1.1, None]),
}
df = DataFrame(cols)
tbl = "notna_dtype_test"
df.to_sql(tbl, self.conn)
assert self._get_sqlite_column_type(tbl, "Bool") == "INTEGER"
assert self._get_sqlite_column_type(tbl, "Date") == "TIMESTAMP"
assert self._get_sqlite_column_type(tbl, "Int") == "INTEGER"
assert self._get_sqlite_column_type(tbl, "Float") == "REAL"
def test_illegal_names(self):
# For sqlite, these should work fine
df = DataFrame([[1, 2], [3, 4]], columns=["a", "b"])
msg = "Empty table or column name specified"
with pytest.raises(ValueError, match=msg):
df.to_sql("", self.conn)
for ndx, weird_name in enumerate(
[
"test_weird_name]",
"test_weird_name[",
"test_weird_name`",
'test_weird_name"',
"test_weird_name'",
"_b.test_weird_name_01-30",
'"_b.test_weird_name_01-30"',
"99beginswithnumber",
"12345",
"\xe9",
]
):
df.to_sql(weird_name, self.conn)
sql.table_exists(weird_name, self.conn)
df2 = DataFrame([[1, 2], [3, 4]], columns=["a", weird_name])
c_tbl = f"test_weird_col_name{ndx:d}"
df2.to_sql(c_tbl, self.conn)
sql.table_exists(c_tbl, self.conn)
# -----------------------------------------------------------------------------
# -- Old tests from 0.13.1 (before refactor using sqlalchemy)
def date_format(dt):
"""Returns date in YYYYMMDD format."""
return dt.strftime("%Y%m%d")
_formatters = {
datetime: "'{}'".format,
str: "'{}'".format,
np.str_: "'{}'".format,
bytes: "'{}'".format,
float: "{:.8f}".format,
int: "{:d}".format,
type(None): lambda x: "NULL",
np.float64: "{:.10f}".format,
bool: "'{!s}'".format,
}
def format_query(sql, *args):
"""
"""
processed_args = []
for arg in args:
if isinstance(arg, float) and isna(arg):
arg = None
formatter = _formatters[type(arg)]
processed_args.append(formatter(arg))
return sql % tuple(processed_args)
def tquery(query, con=None, cur=None):
"""Replace removed sql.tquery function"""
res = sql.execute(query, con=con, cur=cur).fetchall()
if res is None:
return None
else:
return list(res)
@pytest.mark.single
class TestXSQLite(SQLiteMixIn):
@pytest.fixture(autouse=True)
def setup_method(self, request, datapath):
self.method = request.function
self.conn = sqlite3.connect(":memory:")
# In some test cases we may close db connection
# Re-open conn here so we can perform cleanup in teardown
yield
self.method = request.function
self.conn = sqlite3.connect(":memory:")
def test_basic(self):
frame = tm.makeTimeDataFrame()
self._check_roundtrip(frame)
def test_write_row_by_row(self):
frame = tm.makeTimeDataFrame()
frame.iloc[0, 0] = np.nan
create_sql = sql.get_schema(frame, "test")
cur = self.conn.cursor()
cur.execute(create_sql)
cur = self.conn.cursor()
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
for idx, row in frame.iterrows():
fmt_sql = format_query(ins, *row)
tquery(fmt_sql, cur=cur)
self.conn.commit()
result = sql.read_sql("select * from test", con=self.conn)
result.index = frame.index
tm.assert_frame_equal(result, frame, check_less_precise=True)
def test_execute(self):
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, "test")
cur = self.conn.cursor()
cur.execute(create_sql)
ins = "INSERT INTO test VALUES (?, ?, ?, ?)"
row = frame.iloc[0]
sql.execute(ins, self.conn, params=tuple(row))
self.conn.commit()
result = sql.read_sql("select * from test", self.conn)
result.index = frame.index[:1]
tm.assert_frame_equal(result, frame[:1])
def test_schema(self):
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, "test")
lines = create_sql.splitlines()
for l in lines:
tokens = l.split(" ")
if len(tokens) == 2 and tokens[0] == "A":
assert tokens[1] == "DATETIME"
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, "test", keys=["A", "B"])
lines = create_sql.splitlines()
assert 'PRIMARY KEY ("A", "B")' in create_sql
cur = self.conn.cursor()
cur.execute(create_sql)
def test_execute_fail(self):
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a, b)
);
"""
cur = self.conn.cursor()
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
sql.execute('INSERT INTO test VALUES("foo", "baz", 2.567)', self.conn)
with pytest.raises(Exception):
sql.execute('INSERT INTO test VALUES("foo", "bar", 7)', self.conn)
def test_execute_closed_connection(self):
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a, b)
);
"""
cur = self.conn.cursor()
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
self.conn.close()
with pytest.raises(Exception):
tquery("select * from test", con=self.conn)
def test_na_roundtrip(self):
pass
def _check_roundtrip(self, frame):
sql.to_sql(frame, name="test_table", con=self.conn, index=False)
result = sql.read_sql("select * from test_table", self.conn)
# HACK! Change this once indexes are handled properly.
result.index = frame.index
expected = frame
tm.assert_frame_equal(result, expected)
frame["txt"] = ["a"] * len(frame)
frame2 = frame.copy()
new_idx = Index(np.arange(len(frame2))) + 10
frame2["Idx"] = new_idx.copy()
sql.to_sql(frame2, name="test_table2", con=self.conn, index=False)
result = sql.read_sql("select * from test_table2", self.conn, index_col="Idx")
expected = frame.copy()
expected.index = new_idx
expected.index.name = "Idx"
tm.assert_frame_equal(expected, result)
def test_keyword_as_column_names(self):
df = DataFrame({"From": np.ones(5)})
sql.to_sql(df, con=self.conn, name="testkeywords", index=False)
def test_onecolumn_of_integer(self):
# GH 3628
# a column_of_integers dataframe should transfer well to sql
mono_df = DataFrame([1, 2], columns=["c0"])
sql.to_sql(mono_df, con=self.conn, name="mono_df", index=False)
# computing the sum via sql
con_x = self.conn
the_sum = sum(my_c0[0] for my_c0 in con_x.execute("select * from mono_df"))
# it should not fail, and gives 3 ( Issue #3628 )
assert the_sum == 3
result = sql.read_sql("select * from mono_df", con_x)
tm.assert_frame_equal(result, mono_df)
def test_if_exists(self):
df_if_exists_1 = DataFrame({"col1": [1, 2], "col2": ["A", "B"]})
df_if_exists_2 = DataFrame({"col1": [3, 4, 5], "col2": ["C", "D", "E"]})
table_name = "table_if_exists"
sql_select = f"SELECT * FROM {table_name}"
def clean_up(test_table_to_drop):
"""
Drops tables created from individual tests
so no dependencies arise from sequential tests
"""
self.drop_table(test_table_to_drop)
msg = "'notvalidvalue' is not valid for if_exists"
with pytest.raises(ValueError, match=msg):
sql.to_sql(
frame=df_if_exists_1,
con=self.conn,
name=table_name,
if_exists="notvalidvalue",
)
clean_up(table_name)
# test if_exists='fail'
sql.to_sql(
frame=df_if_exists_1, con=self.conn, name=table_name, if_exists="fail"
)
msg = "Table 'table_if_exists' already exists"
with pytest.raises(ValueError, match=msg):
sql.to_sql(
frame=df_if_exists_1, con=self.conn, name=table_name, if_exists="fail"
)
# test if_exists='replace'
sql.to_sql(
frame=df_if_exists_1,
con=self.conn,
name=table_name,
if_exists="replace",
index=False,
)
assert tquery(sql_select, con=self.conn) == [(1, "A"), (2, "B")]
sql.to_sql(
frame=df_if_exists_2,
con=self.conn,
name=table_name,
if_exists="replace",
index=False,
)
assert tquery(sql_select, con=self.conn) == [(3, "C"), (4, "D"), (5, "E")]
clean_up(table_name)
# test if_exists='append'
sql.to_sql(
frame=df_if_exists_1,
con=self.conn,
name=table_name,
if_exists="fail",
index=False,
)
assert tquery(sql_select, con=self.conn) == [(1, "A"), (2, "B")]
sql.to_sql(
frame=df_if_exists_2,
con=self.conn,
name=table_name,
if_exists="append",
index=False,
)
assert tquery(sql_select, con=self.conn) == [
(1, "A"),
(2, "B"),
(3, "C"),
(4, "D"),
(5, "E"),
]
clean_up(table_name)
@pytest.mark.single
@pytest.mark.db
@pytest.mark.skip(
reason="gh-13611: there is no support for MySQL if SQLAlchemy is not installed"
)
class TestXMySQL(MySQLMixIn):
@pytest.fixture(autouse=True, scope="class")
def setup_class(cls):
pymysql = pytest.importorskip("pymysql")
pymysql.connect(host="localhost", user="root", passwd="", db="pandas_nosetest")
try:
pymysql.connect(read_default_group="pandas")
except pymysql.ProgrammingError:
raise RuntimeError(
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf."
)
except pymysql.Error:
raise RuntimeError(
"Cannot connect to database. "
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf."
)
@pytest.fixture(autouse=True)
def setup_method(self, request, datapath):
pymysql = pytest.importorskip("pymysql")
pymysql.connect(host="localhost", user="root", passwd="", db="pandas_nosetest")
try:
pymysql.connect(read_default_group="pandas")
except pymysql.ProgrammingError:
raise RuntimeError(
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf."
)
except pymysql.Error:
raise RuntimeError(
"Cannot connect to database. "
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf."
)
self.method = request.function
def test_basic(self):
frame = tm.makeTimeDataFrame()
self._check_roundtrip(frame)
def test_write_row_by_row(self):
frame = tm.makeTimeDataFrame()
frame.iloc[0, 0] = np.nan
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, "test")
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
for idx, row in frame.iterrows():
fmt_sql = format_query(ins, *row)
tquery(fmt_sql, cur=cur)
self.conn.commit()
result = sql.read_sql("select * from test", con=self.conn)
result.index = frame.index
tm.assert_frame_equal(result, frame, check_less_precise=True)
def test_chunksize_read_type(self):
frame = tm.makeTimeDataFrame()
frame.index.name = "index"
drop_sql = "DROP TABLE IF EXISTS test"
cur = self.conn.cursor()
cur.execute(drop_sql)
sql.to_sql(frame, name="test", con=self.conn)
query = "select * from test"
chunksize = 5
chunk_gen = pd.read_sql_query(
sql=query, con=self.conn, chunksize=chunksize, index_col="index"
)
chunk_df = next(chunk_gen)
tm.assert_frame_equal(frame[:chunksize], chunk_df)
def test_execute(self):
frame = tm.makeTimeDataFrame()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, "test")
cur = self.conn.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unknown table.*")
cur.execute(drop_sql)
cur.execute(create_sql)
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
row = frame.iloc[0].values.tolist()
sql.execute(ins, self.conn, params=tuple(row))
self.conn.commit()
result = sql.read_sql("select * from test", self.conn)
result.index = frame.index[:1]
tm.assert_frame_equal(result, frame[:1])
def test_schema(self):
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, "test")
lines = create_sql.splitlines()
for l in lines:
tokens = l.split(" ")
if len(tokens) == 2 and tokens[0] == "A":
assert tokens[1] == "DATETIME"
frame = tm.makeTimeDataFrame()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, "test", keys=["A", "B"])
lines = create_sql.splitlines()
assert "PRIMARY KEY (`A`, `B`)" in create_sql
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
def test_execute_fail(self):
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a(5), b(5))
);
"""
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
sql.execute('INSERT INTO test VALUES("foo", "baz", 2.567)', self.conn)
with pytest.raises(Exception):
sql.execute('INSERT INTO test VALUES("foo", "bar", 7)', self.conn)
def test_execute_closed_connection(self, request, datapath):
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a(5), b(5))
);
"""
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
self.conn.close()
with pytest.raises(Exception):
tquery("select * from test", con=self.conn)
# Initialize connection again (needed for tearDown)
self.setup_method(request, datapath)
def test_na_roundtrip(self):
pass
def _check_roundtrip(self, frame):
drop_sql = "DROP TABLE IF EXISTS test_table"
cur = self.conn.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unknown table.*")
cur.execute(drop_sql)
sql.to_sql(frame, name="test_table", con=self.conn, index=False)
result = sql.read_sql("select * from test_table", self.conn)
# HACK! Change this once indexes are handled properly.
result.index = frame.index
result.index.name = frame.index.name
expected = frame
tm.assert_frame_equal(result, expected)
frame["txt"] = ["a"] * len(frame)
frame2 = frame.copy()
index = Index(np.arange(len(frame2))) + 10
frame2["Idx"] = index
drop_sql = "DROP TABLE IF EXISTS test_table2"
cur = self.conn.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unknown table.*")
cur.execute(drop_sql)
sql.to_sql(frame2, name="test_table2", con=self.conn, index=False)
result = sql.read_sql("select * from test_table2", self.conn, index_col="Idx")
expected = frame.copy()
# HACK! Change this once indexes are handled properly.
expected.index = index
expected.index.names = result.index.names
tm.assert_frame_equal(expected, result)
def test_keyword_as_column_names(self):
df = DataFrame({"From": np.ones(5)})
sql.to_sql(
df, con=self.conn, name="testkeywords", if_exists="replace", index=False
)
def test_if_exists(self):
df_if_exists_1 = DataFrame({"col1": [1, 2], "col2": ["A", "B"]})
df_if_exists_2 =
|
DataFrame({"col1": [3, 4, 5], "col2": ["C", "D", "E"]})
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import os
def parse_uniProt_map(uniProtMap):
df = pd.read_csv(uniProtMap, sep='\t')
df.dropna(inplace=True)
uniProtMapping = dict(zip(list(df['Entry']), list(df['Gene names (primary )'])))
return uniProtMapping
def parse_HuRI(ppiFile='./data/atlas/HuRI.psi', uniProtMap="./data/UniProt/uniprot-taxonomy_9606.tab",
wFile_PPI='./data/parsed/HuRI_PPI.pkl', root='./'):
ppiFile, uniProtMap, wFile_PPI = root+ppiFile, root+uniProtMap, root+wFile_PPI
if os.path.exists(wFile_PPI): return pd.read_pickle(wFile_PPI)
uniProtMapping = parse_uniProt_map(uniProtMap)
# only direct interaction & physical association & association are PPIs
ppi_df = pd.read_csv(ppiFile, sep='\t', header=None)
inv_ppis = np.transpose(np.asarray([ppi_df[0], ppi_df[1]]))
ppis = []
for i in inv_ppis:
if i[0] == "-" or i[1] == "-": continue
ppi_h = "-".join(i[0].split(":")[1].split("-")[:-1]) if len(i[0].split(":")[1].split("-")) > 1 else i[0].split(":")[1].split("-")[0]
ppi_t = "-".join(i[1].split(":")[1].split("-")[:-1]) if len(i[1].split(":")[1].split("-")) > 1 else i[1].split(":")[1].split("-")[0]
ppis.append([ppi_h, ppi_t])
mappedPPIs = []
for ppi in ppis:
if ppi[0] not in uniProtMapping or ppi[1] not in uniProtMapping: continue
mappedPPIs.append([uniProtMapping[ppi[0]], uniProtMapping[ppi[1]]])
mappedPPIs = np.transpose(np.asarray(mappedPPIs))
ppi_df =
|
pd.DataFrame({'nodeA': mappedPPIs[0], 'nodeB': mappedPPIs[1]})
|
pandas.DataFrame
|
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import QThread, pyqtSignal
from GUI import Ui_MainWindow # generated GUI py file
import sys
import os
import pandas as pd
import numpy as np
from dateutil.relativedelta import relativedelta
from datetime import datetime
import ctypes
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import pwlf
from GPyOpt.methods import BayesianOptimization
import openpyxl
import math
from scipy import stats
# python included dependencies: datetime, ctypes, math, os, sys
# installed package dependencies: dateutil, gpy, matplotlib, numpy, openpyxl (and image), pandas (and xlsxwriter), pwlf, pyqt, scipi
# class to populate a PyQT table view with a pandas dataframe
class PandasModel(QtCore.QAbstractTableModel):
def __init__(self, data, parent=None):
QtCore.QAbstractTableModel.__init__(self, parent)
self._data = data
def rowCount(self, parent=None):
return self._data.shape[0]
def columnCount(self, parent=None):
return self._data.shape[1]
def data(self, index, role=QtCore.Qt.DisplayRole):
if index.isValid():
if role == QtCore.Qt.DisplayRole:
return str(self._data.iloc[index.row(), index.column()])
return None
def headerData(self, col, orientation, role):
if orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole:
return self._data.columns[col]
return None
# class to handle threading of datapoints so GUI is responsive
class DataPointsWorkThread(QThread):
signal = pyqtSignal('PyQt_PyObject')
signal_pb = pyqtSignal('PyQt_PyObject')
def __init__(self, data, start_date, end_date, pb_inc, option):
QThread.__init__(self)
# create instance of WorkerThread class and pass variables from application class as instance variables
self.data = data
self.start_date = start_date
self.end_date = end_date
self.pb_inc = pb_inc
self.option = option
def run(self):
# local variables from instance variables for reference convenience
data = self.data
start_date = self.start_date
end_date = self.end_date
pb_inc = self.pb_inc
option = self.option
# initialize datapoints data frame and progress bar
df = pd.DataFrame()
pb_update = 0
# group data into month intervals increasing each item by 1 day
for date in pd.date_range(start_date, end_date):
start_date_add_days = date + relativedelta(months=+1,days=-1)
paid_date = start_date_add_days + relativedelta(months=+2)
if option == "option2":
start_date_add_month = date + relativedelta(months=+1)
start_date_add_months = date + relativedelta(months=+2,days=-1)
paid_date_add_months = start_date_add_months + relativedelta(months=+2)
elif option == "option3":
start_date_add_month = date + relativedelta(months=+1)
start_date_add_months = date + relativedelta(months=+12,days=-1)
paid_date_add_months = start_date_add_months + relativedelta(months=+2)
col1 = date
col2 = start_date_add_days
# sum payment data following criteria of allowing for additional 2 months of paid dates
if option == "option1":
col3 = round(data[(data.a >= date) & (data.a <= start_date_add_days) & (data.b <= paid_date)].sum()['c'],2)
elif option == "option2":
col3 = round(data[(data.a >= date) & (data.a <= start_date_add_days) & (data.b <= paid_date)].sum()['c'],2)
elif option == "option3":
col3 = round(data[(data.a >= date) & (data.a <= start_date_add_days) & (data.b <= paid_date)].sum()['c'],2) * 12
if option == "option1":
df = df.append({'A' : col1 , 'B' : col2, 'C' : col3},ignore_index=True)
else:
col4 = round(data[(data.a >= start_date_add_month) & (data.a <= start_date_add_months) & (data.b <= paid_date_add_months)].sum()['c'],2)
col5 = col3 - col4
df = df.append({'A' : col1 , 'B' : col2, 'C' : col3, 'D' : col4, 'E' : col5},ignore_index=True)
# update progress
pb_update = pb_update + (100/pb_inc)
self.signal_pb.emit(pb_update)
# find index of maximum and corresponding date refactor == True is redundant isChecked is Boolean
if option == "option1":
index_max_C = df[(df.C == df.max()['C'])].index.tolist()
else:
index_max_C = df[(df.E == df.max()['E'])].index.tolist()
list_date_max_C = df['A'].iloc[index_max_C].tolist()
list_dollars_max_C = df['C'].iloc[index_max_C].tolist()
# clear dataframe
df = pd.DataFrame()
# reconstruct dataframe respective to maximum date
date_max_C = list_date_max_C[0]
dollars_max_C = list_dollars_max_C[0]
DOI = (date_max_C + relativedelta(months=+1)).strftime("%#m/%#d/%Y")
DOI_dollars = '${:,.2f}'.format(dollars_max_C)
date_max_C_end = date_max_C + relativedelta(months=+3,days=-1)
for its in range(36):
df = df.append({'A': str(date_max_C).replace('00:00:00',''), 'B': str(date_max_C + relativedelta(months=+1,days=-1)).replace('00:00:00',''), 'C': round(data[(data.a >= date_max_C) & (data.a <= date_max_C + relativedelta(months=+1,days=-1)) & (data.b <= date_max_C_end)].sum()['c'],2)},ignore_index=True)
# update progress
pb_update = pb_update + (100/pb_inc)
self.signal_pb.emit(pb_update)
date_max_C = date_max_C + relativedelta(months=-1)
# sort new dataframe and reset index
df = df.sort_values(by="A")
df = df.reset_index(drop=True)
# define x and y outputs
x = np.arange(1,37)
y = np.array(df['C'].tolist())
# drop and readd A and B with formatting
A_format, B_format = [], []
a = np.array(df['A'].tolist())
b = np.array(df['B'].tolist())
for each in a:
A_format.append(pd.to_datetime(each).strftime("%#m/%#d/%Y"))
df.drop(columns=['A'])
df['A'] = A_format
for each in b:
B_format.append(pd.to_datetime(each).strftime("%#m/%#d/%Y"))
df.drop(columns=['A'])
df['B'] = B_format
# replace with final
C_format = []
for each in y:
C_format.append('${:,.2f}'.format(each))
df.drop(columns=['C'])
df['C'] = C_format
# create a dictionary of variables to pass to display
dp_output = {
"df":df,
"x":x,
"y":y,
"DOI":DOI,
"DOI_dollars":DOI_dollars
}
# emitting a pyqtSignal named display_output with output dictionary data
self.signal.emit(dp_output)
# class to handle threading of regression so GUI is responsive
class RegressionWorkThread(QThread):
signal = pyqtSignal('PyQt_PyObject')
def __init__(self, x, y, df, max_segments, max_iter, isnt_discretized):
QThread.__init__(self)
# create instance of WorkerThread class and pass variables from application class as instance variables
self.x = x
self.y = y
self.df = df
self.max_segments = max_segments
self.max_iter = max_iter
self.isnt_discretized = isnt_discretized
def run(self):
# local variables from instance variables for reference convenience
x = self.x
y = self.y
df = self.df
max_segments = self.max_segments
max_iter = self.max_iter
isnt_discretized = self.isnt_discretized
# reduce df if user has already populated df, selected new option, and ran again
if len(df.columns) > 3:
df = df[['Date Range Start','Date Range End','Sum']]
def my_obj(x):
l = y.mean()*0.001 # penalty parameter
f = np.zeros(x.shape[0])
for i, j in enumerate(x):
my_pwlf.fit(j[0])
f[i] = my_pwlf.ssr + (l*j[0])
return f
# initialize piecewise linear fit with your x and y data
my_pwlf = pwlf.PiecewiseLinFit(x, y)
# define the lower and upper bound for the number of line segements
bounds = [{'name': 'var_1', 'type': 'discrete', 'domain': np.arange(2, max_segments + 1)}]
np.random.seed(12121)
myBopt = BayesianOptimization(my_obj, domain=bounds, model_type='GP',
initial_design_numdata=10,
initial_design_type='latin',
exact_feval=True, verbosity=False,
verbosity_model=False)
# perform the bayesian optimization to find the optimum number of line segments
myBopt.run_optimization(max_iter=max_iter, verbosity=False)
# perform the fit for the optimum
my_pwlf.fit(myBopt.x_opt)
# generate regression model and prepare variables and stats for df
if isnt_discretized: # time recode is continuous without discretization, all explanatory variables vary
# predict for the determined points
xHat = np.linspace(min(x), max(x), num=3501) # stretch linespace so segments are not jagged
yHat = my_pwlf.predict(xHat)
# calculate n
n = len(x)
# get model parameters
beta = my_pwlf.beta
# calculate k
k = len(beta)
# calculate the standard errors associated with each beta parameter
se = my_pwlf.standard_errors()
# calculate t-value
t = beta / se
# calculate the p-values
pvalues = my_pwlf.p_values()
# calculate r-squared, multiple r, and r-squared adjusted
# because k includes y-intercept: n-(k+1) => (n-k) for r_sq_adj, mse, and dof , (k) => (k-1) for dof and msr
r_sq = my_pwlf.r_squared()
r_mult = math.sqrt(r_sq)
r_sq_adj = 1 - ((n - 1) / (n - k) * (1 - r_sq))
# calculate sums of squares, means of squares, and standard error
fit_breaks = my_pwlf.fit_breaks
ybar = np.ones(my_pwlf.n_data) * np.mean(my_pwlf.y_data)
ydiff = my_pwlf.y_data - ybar
sst = np.dot(ydiff, ydiff)
sse = my_pwlf.fit_with_breaks(fit_breaks)
ssr = (sst - sse)
mse = sse / (n - k)
msr = ssr / (k - 1)
S = math.sqrt(mse)
# calculate F-statistic
Fstat = (msr / mse)
# calculate degrees of freedom (regression, residual/errors, and total)
dof = [(k - 1),(n - k),(n - 1)]
# populate yHats array unique to pwlf
yHat_values, yHat_index = [], 0
for yHats in range(1,37):
yHat_values.append("${:,.2f}".format(yHat[yHat_index]))
yHat_index += 100
# construct independent variables dataframe
# construct the regression matrix
A = np.zeros((n, my_pwlf.n_parameters))
A[:, 0] = 1.0
A[:, 1] = x - my_pwlf.fit_breaks[0]
for i in range(my_pwlf.n_segments-1):
int_locations = x > my_pwlf.fit_breaks[i+1]
if sum(int_locations) > 0:
int_index = np.argmax(int_locations)
A[int_index:, i+2] = x[int_index:] - my_pwlf.fit_breaks[i+1]
# transform regression matrix to a dataframe with structure columns = yint, x1, x2, ..., xn
B = list(map(list,zip(*A)))
# construct independent variables dataframe
df_variables = pd.DataFrame()
for arrays in range(len(A[0])):
df_variables.insert(loc=arrays,column="col:"+str(arrays),value=B[arrays])
# drop y-intercept column
df_variables.drop(df_variables.columns[0], axis=1, inplace=True)
else: # time recode is continuous with discretization, one explanatory variables varies while others held constant
# discretize breakpoints
breaks = my_pwlf.fit(myBopt.x_opt)
breaks_int = []
for breakpoint in breaks:
breaks_int.append(round(breakpoint,0))
# construct regression matrix
result = [];
template = [0] * ( len(breaks_int) - 1 ) # create a 0-initialized array of the length of the number of segments
cursorPosition = 0;
cursorValue = 1;
cursorMax = breaks_int[cursorPosition+1]
for row in range( int(breaks_int[-1]) ):
thisrow = template.copy()
# change the value for this row
thisrow[cursorPosition] = cursorValue
result.append(thisrow)
# refer to the result to build on next time
template = thisrow
# move the cursor and reset its values
if (cursorValue >= cursorMax):
cursorPosition += 1
if cursorPosition >= (len(breaks_int) - 1):
break
cursorValue = 1
cursorMax = breaks_int[cursorPosition+1] - breaks_int[cursorPosition]
else:
cursorValue += 1
# transpose A so the form is correct
result = list(map(list, zip(*result)))
# add intercept row
result.append([1]*len(result[0]))
# transpose to regression matrix form
A = (np.array(result)).T
# calculate beta and sse
# note: y-intercept is last value in beta
beta, sse, rank, s = np.linalg.lstsq(A, y, rcond=None)
# predict for the determined points
xHat = np.linspace(min(x), max(x), num=36, endpoint=True)
yHat = np.dot(A,beta)
# calculate n
n = len(x)
# calculate k
k = len(beta)
# calculate residuals
e = yHat - y
# calculate variance
variance = np.dot(e, e) / (n - k)
# calculate se
se = np.sqrt(variance * (np.linalg.inv(np.dot(A.T,A)).diagonal()))
# calculate t-value
t = beta / se
# calculate p-value
pvalues = 2.0 * stats.t.sf(np.abs(t), df=n-k-1)
# calculate sums of squares, means of squares, and standard error
# because k includes y-intercept: n-(k+1) => (n-k) for r_sq_adj, mse, and dof , (k) => (k-1) for dof and msr
ybar = np.ones(n) * np.mean(y)
ydiff = y - ybar
sst = np.dot(ydiff, ydiff)
sse = sse[0]
ssr = (sst - sse)
mse = sse / (n - k)
msr = ssr / (k - 1)
S = math.sqrt(mse)
# calculate F-statistic
Fstat = (msr / mse)
# calculate degrees of freedom (regression, residual/errors, and total)
dof = [(k - 1),(n - k),(n - 1)]
# calculate r-squared, multiple r, and r-squared adjusted
r_sq = 1.0 - (sse / sst)
r_mult = math.sqrt(r_sq)
r_sq_adj = 1 - ((n - 1) / (n - k) * (1 - r_sq))
# construct independent variables dataframe
df_variables = pd.DataFrame()
loc = 0
colnum = 0
for arrays in result:
col = "col" + str(colnum)
df_variables.insert(loc=loc,column=col,value=arrays)
loc += 1
colnum += 1
# drop y-intercept column
df_variables.drop(df_variables.columns[-1], axis=1, inplace=True)
# populate yHats array unique to this option
yHat_values = []
for yHats in yHat:
yHat_values.append("${:,.2f}".format(yHats))
# in discrete calcs y-intercept parameters are listed last as opposed to first in pwlf, so we need to reorder
new_beta, new_se, new_t, new_pvalues = [], [], [], []
new_beta.append(beta[-1])
new_se.append(se[-1])
new_t.append(t[-1])
new_pvalues.append(pvalues[-1])
for i in range(0,k-1):
new_beta.append(beta[i])
new_se.append(se[i])
new_t.append(t[i])
new_pvalues.append(pvalues[i])
beta = new_beta
se = new_se
t = new_t
pvalues = new_pvalues
# complete dataframes
# insert yHats into df
df.insert(loc=3,column='col4',value=yHat_values)
# insert ind variables into df
loc = 4
for columns in df_variables:
df.insert(loc=loc,column='x-'+str(loc-3),value=df_variables[columns])
loc += 1
# build summary statistics dataframe
regres_stats_labels = ["Multiple R","R Square","Adjusted R Square","Standard Error","Observations"]
regress_stats = ["{:0.2f}".format(r_mult),"{:0.2f}".format(r_sq),"{:0.2f}".format(r_sq_adj),"{:0.2f}".format(S),n]
df_regress_stats = pd.DataFrame({"Regression":regres_stats_labels,"Statistics":regress_stats})
# build ANOVA dataframe
SS = ["{:0.2f}".format(ssr),"{:0.2f}".format(sse),"{:0.2f}".format(sst)]
MS = ["{:0.2f}".format(msr),"{:0.2f}".format(mse),'']
F = ["{:0.2f}".format(Fstat),'','']
anova_labels = ['Regression','Residual','Total']
df_anova = pd.DataFrame({'':anova_labels,'df':dof,'SS':SS,'MS':MS,'F':F})
# build coefficients dataframe
df_coef_labels = []
df_coef_labels.append('Y-Intercept')
for i in range(1,k):
df_coef_labels.append('x-'+str(i))
beta_format, se_format, t_format, pvalues_format = [], [], [], []
for i in range(k):
roundbeta, roundse, roundt, roundp = "{:0.2f}".format(beta[i]), "{:0.2f}".format(se[i]), "{:0.2f}".format(t[i]), "{:0.2f}".format(pvalues[i])
beta_format.append(roundbeta)
se_format.append(roundse)
t_format.append(roundt)
pvalues_format.append(roundp)
df_coef = pd.DataFrame({'':df_coef_labels,"Coefficients":beta_format,"Standard Error":se_format,"t Stat":t_format,"P-value":pvalues_format})
# plot the results and save as a temporary file to be overwritten each iterations
plt.figure()
plt.plot(x, y, '-')
plt.plot(xHat, yHat, 'r--')
# # provide number of segments from model
num_segments = str(myBopt.x_opt).replace("[","").replace(".]","")
# provide function value from model
func_value = "{:0.2f}".format(myBopt.fx_opt)
# create a dictionary of variables to pass to display
regression_output = {
"num_segments":num_segments,
"func_value":func_value,
"df_regress_stats":df_regress_stats,
"df_anova":df_anova,
"df_coef":df_coef,
"x":x,
"y":y,
"xHat":xHat,
"yHat":yHat,
"df":df,
"plt":plt,
}
# emitting a pyqtSignal with output dictionary data
self.signal.emit(regression_output)
# main window class
class DPR(QtWidgets.QMainWindow):
def __init__(self, parent=None):
# call the parent class's constructor
QtWidgets.QMainWindow.__init__(self, parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.ui.pushButton_1.clicked.connect(self.select_file)
self.ui.pushButton_2.clicked.connect(self.run_datapoints)
self.ui.dateEdit_1.setDateTime(QtCore.QDateTime.currentDateTime())
self.ui.dateEdit_2.setDateTime(QtCore.QDateTime.currentDateTime())
self.ui.dateEdit_1.dateChanged.connect(self.update_date)
self.ui.radioButton_1.setChecked(True)
self.ui.radioButton_4.setChecked(True)
self.ui.pushButton_3.clicked.connect(self.run_regression)
self.ui.lineEdit_3.setText("5")
self.ui.lineEdit_4.setText("10")
self.ui.pushButton_4.clicked.connect(self.write_excel)
self.ui.graphicsView_1.hide()
self.ui.graphicsView_2.hide()
self.MessageBox = ctypes.windll.user32.MessageBoxW
# after first date edit is changed, update second date edit to be a year later
def update_date(self):
get_date = self.ui.dateEdit_1.date().toString("yyyy-M-d")
new_datetime = pd.to_datetime(get_date) + relativedelta(months=+12)
change_datetime = QtCore.QDateTime.fromString(str(new_datetime), "yyyy-M-d hh:mm:ss")
self.ui.dateEdit_2.setDateTime(change_datetime)
# check if file is selected
def select_file(self):
filename, _ = QtWidgets.QFileDialog.getOpenFileName(None, "Select File", "","Text Files (*.txt)")
if filename:
# outputs
self.ui.lineEdit_1.setText(filename)
self.filename = filename
def run_datapoints(self):
delimiter = str(self.ui.comboBox_1.currentText())
has_headers = self.ui.checkBox_1.isChecked()
if self.ui.lineEdit_1.text() == "":
self.MessageBox(None, "No file selected.", "File Error", 0)
return
try:
data = self.prepare_data(delimiter, has_headers)
except pd.errors.EmptyDataError:
self.MessageBox(None, "No data in file.", "Empty Data Error", 0)
return
if data is 0:
self.MessageBox(None, "Problem reading file. Check header declaration.", "Attribute Error", 0)
return
elif data is 1:
self.MessageBox(None, "Column 1 should be date type.", "Attribute Error", 0)
return
elif data is 2:
self.MessageBox(None, "Column 2 should be date type.", "Attribute Error", 0)
return
elif data is 3:
self.MessageBox(None, "Column 3 should be currency.", "Attribute Error", 0)
return
# disable calculate button
self.ui.pushButton_2.setEnabled(False)
start_date = pd.to_datetime(self.ui.dateEdit_1.date().toString("M/d/yyyy"))
end_date = pd.to_datetime(self.ui.dateEdit_2.date().toString("M/d/yyyy"))
pb_inc = (end_date - start_date).days + 36 #number of items in the 2 loops in datapoints fxs
if self.ui.radioButton_1.isChecked():
option = "option1"
elif self.ui.radioButton_2.isChecked():
option = "option2"
else: option = "option3"
self.worker_thread = DataPointsWorkThread(data, start_date, end_date, pb_inc, option)
self.worker_thread.signal.connect(self.display_datapoints)
self.worker_thread.signal_pb.connect(self.update_progressbar)
self.worker_thread.start()
def update_progressbar(self, pb_update):
self.ui.progressBar_1.setValue(pb_update)
# construct raw data dataframe from file data
def prepare_data(self, delimiter, has_headers):
if delimiter == "Tab Delimited":
sep = "\t"
elif delimiter == 'Comma Delimited':
sep = ","
elif delimiter == 'Pipe Delimited':
sep = "|"
if has_headers: # data file has headers
try:
data = pd.read_csv(self.filename, skiprows=1, sep=sep, header=None)
except AttributeError:
return 0
else: # data file does not have headers
try:
data = pd.read_csv(self.filename, sep=sep, header=None)
except AttributeError:
return 0
data.columns = ["a", "b", "c"]
try:
data['a'] =
|
pd.to_datetime(data['a'])
|
pandas.to_datetime
|
# Copyright (c) 2019-2021 - for information on the respective copyright owner
# see the NOTICE file and/or the repository
# https://github.com/boschresearch/pylife
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, os, copy
import warnings
import pytest
import numpy as np
import pandas as pd
import numpy.testing as testing
import pylife.strength.meanstress as MST
from pylife.strength.sn_curve import FiniteLifeCurve
def goodman_signal_sm():
Sm = np.array([-4., -2., -1., 0., 0.4, 2./3., 7./6.])
Sa = np.array([ 2., 2., 3./2., 1., 0.8, 2./3., 7./12.])
return pd.DataFrame({'sigma_m': Sm, 'sigma_a': Sa })
def goodman_signal_r():
Sm = np.array([-4., -2., -1., 0., 0.4, 2./3., 7./6.])
Sa = np.array([ 2., 2., 3./2., 1., 0.8, 2./3., 7./12.])
warnings.simplefilter('ignore', RuntimeWarning)
R = (Sm-Sa)/(Sm+Sa)
warnings.simplefilter('default', RuntimeWarning)
return pd.DataFrame({'sigma_a': Sa, 'R': R})
def five_segment_signal_sm():
Sm = np.array([-12./5., -2., -1., 0., 2./5., 2./3., 7./6., 1.+23./75., 2.+1./150., 3.+11./25., 3.+142./225.])
Sa = np.array([ 6./5., 2., 3./2., 1., 4./5., 2./3., 7./12., 14./25., 301./600., 86./225., 43./225.])
return pd.DataFrame({'sigma_m': Sm, 'sigma_a': Sa })
def five_segment_signal_r():
Sm = np.array([-12./5., -2., -1., 0., 2./5., 2./3., 7./6., 1.+23./75., 2.+1./150., 3.+11./25., 3.+142./225.])
Sa = np.array([ 6./5., 2., 3./2., 1., 4./5., 2./3., 7./12., 14./25., 301./600., 86./225., 43./225.])
warnings.simplefilter('ignore', RuntimeWarning)
R = (Sm-Sa)/(Sm+Sa)
warnings.simplefilter('default', RuntimeWarning)
return pd.DataFrame({'sigma_a': Sa, 'R': R })
def test_FKM_goodman_plain_sm():
cyclic_signal = goodman_signal_sm()
Sa = cyclic_signal.sigma_a.to_numpy()
Sm = cyclic_signal.sigma_m.to_numpy()
M = 0.5
R_goal = 1.
testing.assert_raises(ValueError, MST.FKM_goodman, Sa, Sm, M, M/3, R_goal)
R_goal = -1.
res = MST.FKM_goodman(Sa, Sm, M, M/3, R_goal)
np.testing.assert_array_almost_equal(res, np.ones_like(res))
Sm = np.array([5])
Sa = np.array([0])
res = MST.FKM_goodman(Sa, Sm, M, M/3, R_goal)
assert np.equal(res,0.)
def test_FKM_goodman_single_M_sm():
cyclic_signal = goodman_signal_sm()
M = 0.5
R_goal = -1.
res = cyclic_signal.meanstress_mesh.FKM_goodman(pd.Series({ 'M':M, 'M2':M/3 }), R_goal).sigma_a
np.testing.assert_array_almost_equal(res, np.ones_like(res))
def test_FKM_goodman_single_M_R():
cyclic_signal = goodman_signal_r()
M = 0.5
R_goal = -1.
res = cyclic_signal.meanstress_mesh.FKM_goodman(
|
pd.Series({ 'M':M, 'M2':M/3 })
|
pandas.Series
|
# coding=utf-8
"""
对train和test数据进行特征工程,生成的数据提供给make_tfrecords.py
@author: yuhaitao
"""
import pandas as pd
import os
import numpy as np
import gc
import pickle
import datetime
import logging
import sys
import json
import multiprocessing
from data_loader import myDataLoader
def make_features(data, params, out_dir, label_id, mode):
"""
特征工程,并保存到新的数据文件中
"""
with open('./feature_info.json', 'r') as f:
feature_infos = json.load(f)
feature_imps = pd.read_csv(
f'./data/feature_imps/feature_imps_{label_id}.csv', index_col=False)
# 去掉后面特征
if params['drop_1500']:
del_feats = list(feature_imps['feature'].values.astype(str)[-1500:])
data_new = data.drop(axis=1, columns=del_feats)
print(f'After drop 1500, data shape:{data_new.shape}')
imp_feats = list(feature_imps['feature'].values.astype(str)[:50])
wide_imp_feats = []
deep_imp_feats = []
for feat in imp_feats:
if data[feat].dtype == float:
deep_imp_feats.append(feat)
elif data[feat].dtype == int:
wide_imp_feats.append(feat)
else:
raise ValueError
# wide特征交叉
if params['wide_cross']:
str_df = pd.DataFrame()
for i in range(len(wide_imp_feats) - 1):
for j in range(i + 1, len(wide_imp_feats)):
i_name, j_name = wide_imp_feats[i], wide_imp_feats[j]
str_df['c_' + i_name + '_' + j_name] = data_new[i_name].astype(
str).values + '_' + data_new[j_name].astype(str).values
def get_cross(x, feature_infos):
i_name, j_name = x.name.split('_')[1], x.name.split('_')[2]
i_list, j_list = feature_infos[i_name]['list'], feature_infos[j_name]['list']
out = []
for one in x:
i, j = int(one.split('_')[0]), int(one.split('_')[1])
if i not in i_list or j not in j_list:
out.append(0)
else:
out.append(i_list.index(i) * len(j_list) +
j_list.index(j) + 1)
return out
cross_df = str_df.apply(get_cross, args=(feature_infos,), axis=0)
data_new = pd.concat([data_new, cross_df], axis=1)
print(f'After wide cross, data shape:{data_new.shape}')
# data_new.to_csv(os.path.join(
# out_dir, f'{label_id}_feature_data_widecross_{mode}.csv'), index=False)
# print(f'feature data saved.')
# deep 特征分桶
if params['bucket']:
def get_bucket(x, d_name, feature_infos):
d_min, d_max = feature_infos[d_name]['min'], feature_infos[d_name]['max']
if x[0] > d_max:
return 11
elif x[0] < d_min:
return 0
elif x[0] == d_max:
return 10
else:
return int(10 * (x[0] - d_min) / (d_max - d_min)) + 1
bucket_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
import pandas as pd
import matplotlib.pyplot as plt
from asreview.state.utils import open_state
from scipy.stats import spearmanr
def probability_matrix_from_h5_state(state_fp):
"""Get the probability matrix from an .h5 state file.
Arguments
----------
state_fp: str
Path to state file.
Returns
-------
pandas.DataFrame:
A dataframe of shape (num_papers, num_queries), with in (i,j) the probability
that paper i was relevant according to the model at query j. Note that the row
index starts at 0, but the column index starts at 1.
"""
proba_dict = {}
with open_state(state_fp, read_only=True) as state:
queries = [int(num) for num in state.f['results'].keys()]
total_queries = max(queries)
for i in range(1, total_queries+1):
proba_dict[i] = state.f[f'results/{i}/proba'][:]
proba_matrix =
|
pd.DataFrame.from_dict(proba_dict)
|
pandas.DataFrame.from_dict
|
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import os
import glob
import subprocess
from libraries.lib_percentiles import *
from libraries.lib_gtap_to_final import gtap_to_final
from libraries.lib_common_plotting_functions import greys, quint_colors, quint_labels
from libraries.lib_country_params import get_FD_scale_fac,iso_to_name
from libraries.lib_get_hh_survey import get_hh_survey#, get_miembros_hogar
from libraries.lib_survey_categories import get_dict_gtap_to_final
from libraries.lib_results_to_excel import save_to_results_file
from matplotlib.ticker import FormatStrFormatter
import matplotlib as mpl
mpl.rcParams['hatch.linewidth'] = 0.2
import seaborn as sns
div_pal = sns.color_palette('BrBG', n_colors=11)
def plot_expenditures_by_category(pais,hies_FD,hies_FD_tot):
out_dir = 'output/'
if pais == 'brb': out_dir = '/Users/brian/Desktop/Dropbox/IDB/Barbados/output/'
####################
# Plot expenditures by category
# --> as fraction of total expenditures
hies_FD = hies_FD.reset_index().set_index(['cod_hogar','quintile'])
hies_FD_tot = hies_FD_tot.reset_index().set_index(['cod_hogar','quintile'])
final_FD_quints = pd.DataFrame(index=hies_FD_tot.sum(level='quintile').index).sort_index()
# Reset df
do_not_plot = []
plt.figure(figsize=(6,6))
fdict = get_dict_gtap_to_final()
for _h in fdict:
hies_FD_tot[_h] = hies_FD[[fdict[_h][1]]].sum(axis=1)
final_FD_quints[_h] = 100.*(hies_FD_tot[['hhwgt',_h]].prod(axis=1)/hies_FD_tot['totex_hh']).sum(level='quintile')/hies_FD_tot['hhwgt'].sum(level='quintile')
_ = final_FD_quints.T.copy()
_.columns = ['Q1','Q2','Q3','Q4','Q5']
##########################################################################################
# Record sample (all countries) stats in out_dir+'all_countries/hh_expenditures_table.csv'
try: hhexp = pd.read_csv(out_dir+'all_countries/hh_expenditures_table.csv').set_index('category')
except: hhexp = pd.DataFrame({pais.upper():0,'category':[fdict[i][1] for i in fdict]},index=None).set_index('category')
for _ex in fdict:
hhexp.loc[fdict[_ex][1],pais.upper()] = _.loc[_ex].mean()
try: hhexp.to_csv(out_dir+'all_countries/hh_expenditures_table.csv')
except: pass
##########################################################################################
##########################################################################################
# Record sample (all countries) stats in out_dir+'all_countries/hh_regressivity_table.csv'
for _q in ['Q1','Q2','Q3','Q4']:
try: hhreg = pd.read_csv(out_dir+'all_countries/hh_regressivity_table_'+_q+'.csv').set_index('category')
except: hhreg = pd.DataFrame({pais.upper():0,'category':[fdict[i][1] for i in fdict]},index=None).set_index('category')
for _ex in fdict:
hhreg.loc[fdict[_ex][1],pais.upper()] = _.loc[_ex,'Q1']/_.loc[_ex,'Q5']
try: hhreg.to_csv(out_dir+'all_countries/hh_regressivity_table_'+_q+'.csv')
except: pass
##########################################################################################
_ = _[['Q1','Q5']].T.sort_values(by='Q1',axis=1)
null_col = []
for _c in _:
if round(_[_c].mean(),1)==0: null_col.append(_c)
if _[_c].mean()<0.1: do_not_plot.append(_c)
_ = _.drop(null_col,axis=1)
final_FD_quints.to_csv(out_dir+'expenditures/'+pais+'_gasto_by_cat_and_quint.csv')
col_wid=_.shape[1]/2
ax = plt.barh(np.arange(0,_.shape[1],1)*col_wid,_.iloc[0],color=sns.color_palette('BrBG', n_colors=11)[2],height=2.5)
plt.barh(np.arange(0,_.shape[1],1)*col_wid+2.5,_.iloc[1],color=sns.color_palette('BrBG', n_colors=11)[8],height=2.5)
plt.gca().grid(False)
sns.despine(bottom=True)
plt.gca().set_yticks(np.arange(0,_.shape[1],1)*col_wid+1)
plt.gca().set_yticklabels([fdict[_h][1] for _h in _.columns],ha='right',fontsize=10,weight='light',color=greys[7])
plt.gca().set_xticklabels([])
ax = plt.gca()
_y = [0.,0.]
rects = ax.patches
for rect in rects:
if (rect.get_y()+rect.get_height()/2.) > _y[0]:
_y.append(rect.get_y()+rect.get_height()/2.);_y.sort();_y.pop(0)
for rect in rects:
_w = rect.get_width()
pct = ''
if (rect.get_y()+rect.get_height()/2.) in _y: pct = '%'
ax.annotate(str(round(_w,1))+pct,xy=(rect.get_x()+rect.get_width()+0.5, rect.get_y()+rect.get_height()/2.-0.1),
ha='left', va='center',color=greys[7],fontsize=7,zorder=100,clip_on=False,style='italic')
ax.annotate('Wealthiest quintile',xy=(0.8,_y[1]),ha='left',va='center',color=greys[0],fontsize=7,zorder=100,style='italic')
ax.annotate('Poorest quintile',xy=(0.8,_y[0]),ha='left',va='center',color=greys[7],fontsize=7,zorder=100,style='italic')
plt.title('Household expenditures in '+iso_to_name[pais],weight='bold',color=greys[7],fontsize=12,loc='right')
plt.draw()
try:
plt.gcf().savefig(out_dir+'expenditures/'+pais+'_gastos_all_categories.pdf',format='pdf',bbox_inches='tight')
plt.gcf().savefig(out_dir+'expenditures/'+pais+'_gastos_all_categories.png',format='png',bbox_inches='tight')
except: pass
plt.cla(); plt.close('all')
return hies_FD,hies_FD_tot,null_col
def plot_gtap_exp(pais,do_tax_food=True,verbose=False):
out_dir = 'output/'
if pais == 'brb': out_dir = '/Users/brian/Desktop/Dropbox/IDB/Barbados/output/'
############################
# Kuishuang's code (mostly):
# load household survey data
hh_hhsector = get_hh_survey(pais)
hh_hhsector = hh_hhsector.drop([i for i in hh_hhsector.columns if 'ing' in i or 'ict' in i],axis=1)
#hh_hhsector = hh_hhsector.fillna(1E5)#flag
if verbose: print(hh_hhsector.shape)
# load bridge matrix
xl = pd.ExcelFile('consumption_and_household_surveys/2017-10-13/Bridge_matrix_consumption_items_to_GTAP_power_sectors.xlsx')
if pais in xl.sheet_names: # all sheet names
print('using '+pais+' tab')
bridge_to_use = xl.parse(pais).fillna(0).drop(['Item_english'],axis = 1).set_index('Item') # read the specific sheet
else:
if verbose: print('using default tab')
bridge_to_use = xl.parse('nae_of_default_tab').fillna(0).drop(['Item_english'],axis = 1).set_index('Item')
cols_to_drop = []
for i in bridge_to_use.columns:
if verbose: print(i,bridge_to_use[i].sum())
if bridge_to_use[i].sum(axis=0)==0:
cols_to_drop.append(i)
bridge_to_use = bridge_to_use.drop(cols_to_drop,axis=1)
# household survey in GTAP sectors
hh_gtap_sector = hh_hhsector[bridge_to_use.index].fillna(0).dot(bridge_to_use)
hh_gtap_sector = hh_gtap_sector.reset_index()
try: hh_gtap_sector['cod_hogar'] = hh_gtap_sector['cod_hogar'].astype('int')
except: hh_gtap_sector['cod_hogar'] = hh_gtap_sector['cod_hogar'].astype('str')
hh_gtap_sector = hh_gtap_sector.reset_index().set_index('cod_hogar')
## Run test.
#print(hh_hhsector.columns)
#print(hh_hhsector.head())
#_hh_hhsector = hh_hhsector.copy()
#for _c in _hh_hhsector.columns:
# if _c != 'gasto_ali':#and _c != 'gasto_alihogar':
# _hh_hhsector[_c] = 0
#_hh_gtap_sector = _hh_hhsector[bridge_to_use.index].fillna(0).dot(bridge_to_use)
if verbose: print(hh_gtap_sector.head(8))
# calcuate each household's share of national consumption, by category
hh_share = (hh_gtap_sector.mul(hh_hhsector.factor_expansion, axis=0).fillna(0))/(hh_gtap_sector.mul(hh_hhsector.factor_expansion, axis=0).fillna(0).sum())
# Read household consumption vector from GTAP
_iot_code = pais if pais != 'brb' else 'xcb'
try:
hh_fd_file = 'GTAP_power_IO_tables_with_imports/Household_consumption_both_domestic_import.xlsx'
household_FD = get_FD_scale_fac(pais)*pd.read_excel(hh_fd_file,index_col=[0])[_iot_code].squeeze()
except:
if pais == 'brb': household_FD = get_FD_scale_fac(pais)*pd.read_excel('GTAP_power_IO_tables/xcbIOT.xlsx',sheet_name='Final_Demand',index_col=[0])['Hou'].squeeze()
else: assert(False)
# ^ get_FD_scale_fac(pais) != 1. ONLY IF pais == 'brb'
# Final demand matrix
hh_FD = household_FD*hh_share.fillna(0)
for i in hh_FD.columns: hh_FD[i]/=hh_hhsector['factor_expansion']
if verbose:
print(household_FD.head())
print(hh_FD.head(5))
####################
# Use gtap_to_final script to translate both expenditures & cc into HIES cats
hies_FD, hies_FD_tot, hies_sf = gtap_to_final(hh_hhsector,hh_FD,pais,verbose=True)
# Now, this df should be consistent with the FD vector
if verbose:
print((hh_FD.sum(axis=1)*hh_hhsector['factor_expansion']).sum())
print(hies_FD_tot[['totex_hh','hhwgt']].prod(axis=1).sum())
print('FD:',round(hies_FD_tot[['totex_hh','hhwgt']].prod(axis=1).sum(),3),round((hh_FD.sum(axis=1)*hh_hhsector['factor_expansion']).sum(),3))
assert(hies_FD_tot[['totex_hh','hhwgt']].prod(axis=1).sum()/(hh_FD.sum(axis=1)*hh_hhsector['factor_expansion']).sum()>0.999)
assert(hies_FD_tot[['totex_hh','hhwgt']].prod(axis=1).sum()/(hh_FD.sum(axis=1)*hh_hhsector['factor_expansion']).sum()<1.001)
####################
####################
if pais == 'brb':
energy_tax_total = get_FD_scale_fac(pais)*pd.read_csv('/Users/brian/Desktop/Dropbox/IDB/Barbados/output/tax_cost_to_hh_in_gtap_cats.csv').set_index('cod_hogar')
final_CC,wgts,_ = gtap_to_final(hh_hhsector,energy_tax_total,pais)
hhwgts = wgts[['pcwgt','hhwgt','hhsize']].copy().dropna()
final_CC_ind = final_CC.copy()
final_CC_CO2 = final_CC.copy()
final_CC_nonCO2 = final_CC.copy()
for col in final_CC_nonCO2.columns: final_CC_nonCO2[col].values[:] = 0
final_CC_dir = final_CC.copy()
for col in final_CC_dir.columns: final_CC_dir[col].values[:] = 0
#print(hhwgts.shape[0],hhwgts.dropna().shape[0])
# HACK: ^ should be no NAs in this df
else:
# Indirect carbon costs - CO2
ccdf_ind_CO2 = get_FD_scale_fac(pais)*pd.read_csv(out_dir+'carbon_cost/CC_per_hh_indirect_'+pais+'_CO2.csv').set_index('cod_hogar')
# Indirect carbon costs - non-CO2
ccdf_ind_nonCO2 = get_FD_scale_fac(pais)*pd.read_csv(out_dir+'carbon_cost/CC_per_hh_indirect_'+pais+'_nonCO2.csv').set_index('cod_hogar')
# Indirect carbon costs (allGHG)
ccdf_ind = get_FD_scale_fac(pais)*pd.read_csv(out_dir+'carbon_cost/CC_per_hh_indirect_'+pais+'_allGHG.csv').set_index('cod_hogar')
# Direct carbon costs (allGHG)
ccdf_dir = get_FD_scale_fac(pais)*pd.read_csv(out_dir+'carbon_cost/CC_per_hh_direct_'+pais+'_allGHG.csv').set_index('cod_hogar')
# ^ these files are per household (multiply by factor_expansion for total)
# HACK
_bypass = pd.DataFrame(index=ccdf_ind.index.copy())
hacker_dict = {'col':['frac_gas'],
'gtm':['frac_gas'],
'pan':['frac_gas'],
'hnd':['frac_gas'],
'nic':['frac_gas','frac_water'],
'pry':['frac_gas','frac_electricity']}
if pais in hacker_dict:
for _set in hacker_dict[pais]:
_gtap_cols = get_dict_gtap_to_final()[_set][0]
_i = [i for i in _gtap_cols if i in ccdf_ind.columns]
_d = [d for d in _gtap_cols if d in ccdf_dir.columns]
_bypass[_set] = ccdf_ind[_i].sum(axis=1) + ccdf_dir[_d].sum(axis=1)
_bypass[_set] *= hh_hhsector['factor_expansion']
try:
ccdf_ind_CO2[_i] = [0,0]
ccdf_ind_nonCO2[_i] = [0,0]
ccdf_ind[_i] = [0,0]
except: ccdf_ind_CO2[_i],ccdf_ind_nonCO2[_i],ccdf_ind[_i] = 0,0,0
try: ccdf_dir[_d] = [0,0]
except: ccdf_dir[_d] = 0
_bypass = _bypass.sum()*1E-6*get_FD_scale_fac(pais)
if not do_tax_food:
ccdf_ind_CO2[['pdr','wht','gro','v_f','osd','c_b','ocr','ctl','oap','rmk','fsh','cmt','omt','vol','mil','pcr','sgr','ofd','b_t']] = 0
ccdf_ind_nonCO2[['pdr','wht','gro','v_f','osd','c_b','ocr','ctl','oap','rmk','fsh','cmt','omt','vol','mil','pcr','sgr','ofd','b_t']] = 0
ccdf_ind[['pdr','wht','gro','v_f','osd','c_b','ocr','ctl','oap','rmk','fsh','cmt','omt','vol','mil','pcr','sgr','ofd','b_t']] = 0
# No food categories in ccdf_dir
final_CC_ind,wgts,_ = gtap_to_final(hh_hhsector,ccdf_ind,pais)
final_CC_dir,wgts,_ = gtap_to_final(hh_hhsector,ccdf_dir,pais)
final_CC = final_CC_ind + final_CC_dir
#final_CC_tot = final_CC_ind_tot + final_CC_dir_tot
final_CC_ind_CO2,wgts,_ = gtap_to_final(hh_hhsector,ccdf_ind_CO2,pais)
final_CC_CO2 = final_CC_ind_CO2 + final_CC_dir
#final_CC_tot_CO2 = final_CC_ind_tot_CO2 + final_CC_dir_tot
final_CC_nonCO2,wgts,_ = gtap_to_final(hh_hhsector,ccdf_ind_nonCO2,pais)
hhwgts = wgts[['pcwgt','hhwgt','hhsize']].copy()
if verbose:
#print('FD:',round(hhwgts[['totex_hh','hhwgt']].prod(axis=1).sum(),1),round((hh_FD.sum(axis=1)*hh_hhsector['factor_expansion']).sum(),3))
print('Direct costs:',round((final_CC_dir.sum(axis=1)*hh_hhsector['factor_expansion']).sum(),1),
round((ccdf_dir.sum(axis=1)*hh_hhsector['factor_expansion']).sum(),1))
print('Indirect cost:',round((final_CC_ind.sum(axis=1)*hh_hhsector['factor_expansion']).sum(),1),
round((ccdf_ind.sum(axis=1)*hh_hhsector['factor_expansion']).sum(),1))
assert((final_CC_dir.sum(axis=1)*hhwgts['hhwgt']).sum()/(ccdf_dir.sum(axis=1)*hh_hhsector['factor_expansion']).sum()>0.99)
assert((final_CC_dir.sum(axis=1)*hhwgts['hhwgt']).sum()/(ccdf_dir.sum(axis=1)*hh_hhsector['factor_expansion']).sum()<1.01)
assert((final_CC_ind.sum(axis=1)*hhwgts['hhwgt']).sum()/(ccdf_ind.sum(axis=1)*hh_hhsector['factor_expansion']).sum()>0.99)
assert((final_CC_ind.sum(axis=1)*hhwgts['hhwgt']).sum()/(ccdf_ind.sum(axis=1)*hh_hhsector['factor_expansion']).sum()<1.01)
# 5 dataframes with results in them
# --> final_CC
# --> final_CC_CO2 & final_CC_nonCO2
# --> final_CC_ind & final_CC_dir
#hhwgts = wgts[['pcwgt','hhwgt','hhsize']].copy()
# ^ plus this, with necessary weights
#########################
# Assign decile based on totex (household expenditures, mapped to gtap)
hies_FD_tot['pais'] = pais
if 'quintile' not in hies_FD_tot.columns:
_deciles=np.arange(0.10, 1.01, 0.10)
_quintiles=np.arange(0.20, 1.01, 0.20)
hies_FD_tot = hies_FD_tot.groupby('pais',sort=True).apply(lambda x:match_percentiles(x,perc_with_spline(reshape_data(x.totex_pc),reshape_data(x.pcwgt),_deciles),'decile','totex_pc'))
hies_FD_tot = hies_FD_tot.groupby('pais',sort=True).apply(lambda x:match_percentiles(x,perc_with_spline(reshape_data(x.totex_pc),reshape_data(x.pcwgt),_quintiles),'quintile','totex_pc'))
hies_FD_tot = hies_FD_tot.drop(['pais'],axis=1)
hies_FD['decile'] = hies_FD_tot['decile'].copy()
hies_FD['quintile'] = hies_FD_tot['quintile'].copy()
###################################
# Price hikes in all goods due to gasoline increase (% of current price)
fdict = get_dict_gtap_to_final()
try: df = pd.read_csv(out_dir+'all_countries/price_increase_full.csv').set_index('category')
except: df = pd.DataFrame({pais.upper():0,'category':[fdict[i][1] for i in fdict]},index=None).set_index('category')
for i in fdict:
table_value = None
gtap_cat_array = get_dict_gtap_to_final()[i][0]
#table_value_n = (final_CC_ind_tot['hhwgt']*(final_CC_ind[fdict[i][0]].sum(axis=1)+final_CC_dir[fdict[i][0]].sum(axis=1))/1E6).sum()
# ^ this is already zero when there's no data in the survey
if pais == 'brb':
table_value_n = energy_tax_total[[_g for _g in gtap_cat_array if _g in energy_tax_total.columns]].sum(axis=1).sum()
table_value_d = get_FD_scale_fac(pais)*float(pd.read_excel('GTAP_power_IO_tables/xcbIOT.xlsx',sheet_name='Final_Demand',index_col=[0])['Hou'].squeeze()[gtap_cat_array].sum())
# ^ get_FD_scale_fac(pais) != 1. ONLY IF pais == 'brb'
else:
table_value_n = ((ccdf_ind[[_g for _g in gtap_cat_array if _g in ccdf_ind.columns]].sum(axis=1)
+ccdf_dir[[_g for _g in gtap_cat_array if _g in ccdf_dir.columns]].sum(axis=1))*hh_hhsector['factor_expansion']).sum()/1E6
#table_value_d = get_FD_scale_fac(pais)*float(pd.read_excel('GTAP_power_IO_tables/'
# +_iot_code+'IOT.xlsx','Final_Demand',index_col=[0])['Hou'].squeeze()[gtap_cat_array].sum())
_fname = 'GTAP_power_IO_tables_with_imports/Household_consumption_both_domestic_import.xlsx'
table_value_d = get_FD_scale_fac(pais)*float(pd.read_excel(_fname,index_col=[0])[pais].squeeze()[gtap_cat_array].sum())
# ^ get_FD_scale_fac(pais) != 1. ONLY IF pais == 'brb'. so this should be deleted
if table_value_n == 0 and table_value_d != 0:
print('BYPASS:',pais,_bypass)
try: table_value_n = float(_bypass[i])
except: pass
# throw results...look how clever we are!
if verbose:
print(i,table_value_n,table_value_d)
print('ind:',(ccdf_ind[[_g for _g in gtap_cat_array if _g in ccdf_ind.columns]].sum(axis=1)*hh_hhsector['factor_expansion']).sum()/1E6)
print('dir:',(ccdf_dir[[_g for _g in gtap_cat_array if _g in ccdf_dir.columns]].sum(axis=1)*hh_hhsector['factor_expansion']).sum()/1E6)
table_value = round(100*table_value_n/table_value_d,1)
df.loc[fdict[i][1],pais.upper()] = table_value
if pais == 'brb':
df['BRB']/=1000.
df.loc['Petroleum, gasoline & diesel'] = 6.2
# HACK: don't understand why *=1/1000. would be justified; haven't checked units
# HACK: not sure why 'Petroleum, gasoline & diesel' doesn't come through analysis
_df = df.sort_values(pais.upper(),ascending=False).drop([fdict[i][1] for i in cols_to_drop])[pais.upper()]
_df.name = '[%]'
_df.index.name = 'Relative increase'
_df.round(1).to_latex(out_dir+'latex/pct_change_'+pais.lower()+'.tex')
with open(out_dir+'latex/pct_change_'+pais.lower()+'.tex', 'r') as f:
with open(out_dir+'latex/out_pct_change_'+pais.lower()+'.tex', 'w') as f2:
f2.write(r'\documentclass[10pt]{article}'+'\n')
f2.write(r'\usepackage{amssymb} %maths'+'\n')
f2.write(r'\usepackage{amsmath} %maths'+'\n')
f2.write(r'\usepackage{booktabs}'+'\n')
f2.write(r'\begin{document}'+'\n')
f2.write(f.read())
f2.write(r'\end{document}')
f2.close()
subprocess.call('cd '+out_dir+'latex/; pdflatex out_pct_change_'+pais.lower()+'.tex',shell=True)
for f in glob.glob(out_dir+'latex/*.aux'): os.remove(f)
for f in glob.glob(out_dir+'latex/*.log'): os.remove(f)
for f in glob.glob(out_dir+'latex/out_*.tex'): os.remove(f)
if pais != 'brb': df.to_csv('output/all_countries/price_increase_full.csv')
hies_FD,hies_FD_tot,cols_to_drop = plot_expenditures_by_category(pais,hies_FD,hies_FD_tot)
###################################
# Current spending on all energy (electricity, petroleum, gasoline, diesel, natural gas, & coal), as % of totex
energy_categories = [fdict['frac_fuels'][1],fdict['frac_gas'][1],fdict['frac_char'][1]]
# ^ includes: gasto_tcomb = Household expenditure on transportation fuels
# ^ gasto_vpgk = Household expenditure on petroleum, gasoline and kerosene for domestic use
# ^ gasto_vlp = Household expenditure on liquified petroleum gas for domestic use
# ^ gasto_vdi = Household expenditure on diesel for domestic use"
final_FD_quints = pd.DataFrame(index=hies_FD.reset_index().set_index('quintile').sum(level='quintile').index).sort_index()
final_FD_quints['Direct fuel consumption'] = 100.*((hies_FD_tot['hhwgt']*hies_FD[energy_categories].sum(axis=1)/hies_FD_tot['totex_hh']).sum(level='quintile')
/hies_FD_tot['hhwgt'].sum(level='quintile'))
_hack = final_CC_dir.copy()
_hack['quintile'] = hies_FD_tot.reset_index('quintile')['quintile'].copy()
_hack = _hack.reset_index().set_index(['cod_hogar','quintile'])
final_FD_quints['Direct fuel consumption tax'] = (100./1E6*(_hack.sum(axis=1)*hhwgts['hhwgt']).sum(level='quintile')
/hies_FD_tot[['totex_pc','pcwgt']].prod(axis=1).sum(level='quintile'))
final_FD_quints.plot(final_FD_quints.index,'Direct fuel consumption',kind='bar',color=quint_colors,legend=False)
plt.gca().set_xticklabels(quint_labels,ha='center',rotation=0)
plt.ylabel('Direct fuel consumption [% of total expenditures]',fontsize=11,weight='bold',labelpad=8)
plt.xlabel('')
plt.ylim([0,final_FD_quints[['Direct fuel consumption','Direct fuel consumption tax']].sum(axis=1).max()*1.05])
rects = plt.gca().patches
for rect in rects:
_w = rect.get_height()
plt.gca().annotate(str(round(_w,1))+'%',xy=(rect.get_x()+rect.get_width()/2, rect.get_y()+rect.get_height()+0.025),
ha='center', va='bottom',color='black',fontsize=8,weight='bold',clip_on=False)
plt.gca().grid(False)
sns.despine()
plt.draw()
plt.gcf().savefig(out_dir+'expenditures/'+pais+'_gasoline_as_pct_by_quintile.pdf',format='pdf',bbox_inches='tight')
plt.gcf().savefig(out_dir+'expenditures/'+pais+'_gasoline_as_pct_by_quintile.png',format='png',bbox_inches='tight')
############################
# Try to plot tax on top of expenditures
#ax = plt.gca()
plt.cla()
final_FD_quints.plot(final_FD_quints.index,'Direct fuel consumption',kind='bar',color=quint_colors,legend=False)
# Below labels the total cost, etc, by quintile
if False:
rects = plt.gca().patches
for rect in rects:
_w = rect.get_height()
plt.gca().annotate(str(round(_w,1))+'%',xy=(rect.get_x()+rect.get_width()-0.025, rect.get_y()+rect.get_height()/2.),
ha='right', va='center',color='black',fontsize=8,weight='bold',clip_on=False)
final_FD_quints.plot(final_FD_quints.index,'Direct fuel consumption tax',kind='bar',color=sns.color_palette('Set1', n_colors=9)[5],legend=False,bottom=final_FD_quints['Direct fuel consumption'],ax=plt.gca())
plt.ylim([0,final_FD_quints[['Direct fuel consumption','Direct fuel consumption tax']].sum(axis=1).max()*1.05])
plt.gca().grid(False)
sns.despine()
plt.gca().set_xticklabels(quint_labels,ha='center',rotation=0)
plt.ylabel('Direct fuel consumption [% of total expenditures]',fontsize=11,weight='bold',labelpad=8)
plt.xlabel('')
plt.gcf().savefig(out_dir+'expenditures/'+pais+'_gasoline_as_pct_by_quintile_with_tax.pdf',format='pdf',bbox_inches='tight')
plt.gcf().savefig(out_dir+'expenditures/'+pais+'_gasoline_as_pct_by_quintile_with_tax.png',format='png',bbox_inches='tight')
plt.cla()
###################################
# Put quintile info into final_CC_tot, final_CC_tot_CO2, final_CC_tot_nonCO2
hies_FD_tot = hies_FD_tot.reset_index().set_index('cod_hogar')
try: hies_FD_tot['quintile'] = hies_FD_tot['quintile'].astype('int')
except: hies_FD_tot['quintile'] = hies_FD_tot['quintile'].astype('str')
#
hhwgts['quintile'] = hies_FD_tot['quintile'].copy()
hhwgts = hhwgts.reset_index().set_index(['cod_hogar','quintile'])
#
final_CC['quintile'] = hies_FD_tot['quintile'].copy()
final_CC = final_CC.reset_index().set_index(['cod_hogar','quintile'])
#
try:
final_CC_ind['quintile'] = hies_FD_tot['quintile'].copy()
final_CC_ind = final_CC_ind.reset_index().set_index(['cod_hogar','quintile'])
#
final_CC_dir['quintile'] = hies_FD_tot['quintile'].copy()
final_CC_dir = final_CC_dir.reset_index().set_index(['cod_hogar','quintile'])
#
final_CC_CO2['quintile'] = hies_FD_tot['quintile'].copy()
final_CC_CO2 = final_CC_CO2.reset_index().set_index(['cod_hogar','quintile'])
#
final_CC_nonCO2['quintile'] = hies_FD_tot['quintile'].copy()
final_CC_nonCO2 = final_CC_nonCO2.reset_index().set_index(['cod_hogar','quintile'])
#
except: pass
# ^ this (t/e) pair is for pais != 'brb'
hies_FD_tot = hies_FD_tot.reset_index().set_index(['cod_hogar','quintile'])
##########################################################################################
# Record sample (all countries) stats in hh_tax_cost_table.csv
# total cost
try: hhcost_t = pd.read_csv('output/all_countries/hh_tax_cost_table.csv').set_index('quintile')
except: hhcost_t = pd.DataFrame({pais.upper():0,'quintile':['Q1','Q2','Q3','Q4','Q5']},index=None).set_index('quintile')
# Direct
try: hhcost_d = pd.read_csv('output/all_countries/hh_direct_tax_cost_table.csv').set_index('quintile')
except: hhcost_d = pd.DataFrame({pais.upper():0,'quintile':['Q1','Q2','Q3','Q4','Q5']},index=None).set_index('quintile')
# Indirect
try: hhcost_i = pd.read_csv('output/all_countries/hh_indirect_tax_cost_table.csv').set_index('quintile')
except: hhcost_i = pd.DataFrame({pais.upper():0,'quintile':['Q1','Q2','Q3','Q4','Q5']},index=None).set_index('quintile')
# Direct frac of tax
try: taxfrac_d = pd.read_csv('output/all_countries/hh_direct_tax_frac_table.csv').set_index('quintile')
except: taxfrac_d = pd.DataFrame({pais.upper():0,'quintile':['Q1','Q2','Q3','Q4','Q5']},index=None).set_index('quintile')
# Indirect frac of tax
try: taxfrac_i = pd.read_csv('output/all_countries/hh_indirect_tax_frac_table.csv').set_index('quintile')
except: taxfrac_i = pd.DataFrame({pais.upper():0,'quintile':['Q1','Q2','Q3','Q4','Q5']},index=None).set_index('quintile')
# Indirect frac of tax - FOOD, ELEC, and PUBTRANS
try: taxfrac_if = pd.read_csv('output/all_countries/hh_indirect_tax_foodnonCO2_frac_table.csv').set_index('quintile')
except: taxfrac_if = pd.DataFrame({pais.upper():0,'quintile':['Q1','Q2','Q3','Q4','Q5']},index=None).set_index('quintile')
try: taxfrac_ie = pd.read_csv('output/all_countries/hh_indirect_tax_elecCO2_frac_table.csv').set_index('quintile')
except: taxfrac_ie = pd.DataFrame({pais.upper():0,'quintile':['Q1','Q2','Q3','Q4','Q5']},index=None).set_index('quintile')
try: taxfrac_ipt = pd.read_csv('output/all_countries/hh_indirect_tax_pubtransCO2_frac_table.csv').set_index('quintile')
except: taxfrac_ipt = pd.DataFrame({pais.upper():0,'quintile':['Q1','Q2','Q3','Q4','Q5']},index=None).set_index('quintile')
_ = (100./1E6)*(final_CC.sum(axis=1)*hhwgts['hhwgt']).sum(level='quintile')/hies_FD_tot[['totex_pc','pcwgt']].prod(axis=1).sum(level='quintile')
for _nq in [1,2,3,4,5]: hhcost_t.loc['Q'+str(_nq),pais.upper()] = _.loc[_nq]
if pais != 'brb':
_ = (100./1E6)*(final_CC_dir.sum(axis=1)*hhwgts['hhwgt']).sum(level='quintile')/hies_FD_tot[['totex_pc','pcwgt']].prod(axis=1).sum(level='quintile')
for _nq in [1,2,3,4,5]: hhcost_d.loc['Q'+str(_nq),pais.upper()] = _.loc[_nq]
_ = (100./1E6)*(final_CC_ind.sum(axis=1)*hhwgts['hhwgt']).sum(level='quintile')/hies_FD_tot[['totex_pc','pcwgt']].prod(axis=1).sum(level='quintile')
for _nq in [1,2,3,4,5]: hhcost_i.loc['Q'+str(_nq),pais.upper()] = _.loc[_nq]
#
#
_ = (100.)*(final_CC_dir.sum(axis=1)*hhwgts['hhwgt']).sum(level='quintile')/(final_CC.sum(axis=1)*hhwgts['hhwgt']).sum(level='quintile')
for _nq in [1,2,3,4,5]: taxfrac_d.loc['Q'+str(_nq),pais.upper()] = _.loc[_nq]
_ = (100.)*(final_CC_ind.sum(axis=1)*hhwgts['hhwgt']).sum(level='quintile')/(final_CC.sum(axis=1)*hhwgts['hhwgt']).sum(level='quintile')
for _nq in [1,2,3,4,5]: taxfrac_i.loc['Q'+str(_nq),pais.upper()] = _.loc[_nq]
#
_ = (100.)*(final_CC_nonCO2[fdict['frac_food'][1]]*hhwgts['hhwgt']).sum(level='quintile')/(final_CC.sum(axis=1)*hhwgts['hhwgt']).sum(level='quintile')
for _nq in [1,2,3,4,5]: taxfrac_if.loc['Q'+str(_nq),pais.upper()] = _.loc[_nq]
_ = (100.)*(final_CC_CO2[fdict['frac_electricity'][1]]*hhwgts['hhwgt']).sum(level='quintile')/(final_CC.sum(axis=1)*hhwgts['hhwgt']).sum(level='quintile')
for _nq in [1,2,3,4,5]: taxfrac_ie.loc['Q'+str(_nq),pais.upper()] = _.loc[_nq]
_ = (100.)*(final_CC_CO2[fdict['frac_pubtrans'][1]]*hhwgts['hhwgt']).sum(level='quintile')/(final_CC.sum(axis=1)*hhwgts['hhwgt']).sum(level='quintile')
for _nq in [1,2,3,4,5]: taxfrac_ipt.loc['Q'+str(_nq),pais.upper()] = _.loc[_nq]
hhcost_t.to_csv(out_dir+'all_countries/hh_tax_cost_table.csv')
hhcost_d.to_csv(out_dir+'all_countries/hh_direct_tax_cost_table.csv')
hhcost_i.to_csv(out_dir+'all_countries/hh_indirect_tax_cost_table.csv')
taxfrac_d.to_csv(out_dir+'all_countries/hh_direct_tax_frac_table.csv')
taxfrac_i.to_csv(out_dir+'all_countries/hh_indirect_tax_frac_table.csv')
taxfrac_if.to_csv(out_dir+'all_countries/hh_indirect_tax_foodnonCO2_frac_table.csv')
taxfrac_ie.to_csv(out_dir+'all_countries/hh_indirect_tax_elecCO2_frac_table.csv')
taxfrac_ipt.to_csv(out_dir+'all_countries/hh_indirect_tax_pubtransCO2_frac_table.csv')
##########################################################################################
###################################
# Cost of indirect carbon price increase (in $)
final_FD_quints = pd.DataFrame(index=hies_FD.reset_index().set_index('quintile').sum(level='quintile').index).sort_index()
final_FD_quints['indirect USD'] = (final_CC_ind.sum(axis=1)*hhwgts['hhwgt']).sum(level='quintile')/hhwgts['pcwgt'].sum(level='quintile')
final_FD_quints.plot(final_FD_quints.index,'indirect USD',kind='bar',color=quint_colors,legend=False)
plt.gca().set_xticklabels(quint_labels,ha='right')
plt.ylabel('Indirect carbon cost [INT$ per capita]',fontsize=11,labelpad=8)
plt.xlabel('')
plt.title(iso_to_name[pais],fontsize=14,weight='bold')
rects = plt.gca().patches
for rect in rects:
_w = rect.get_width()
plt.gca().annotate('$'+str(int(round(_w,0))),xy=(rect.get_x()+rect.get_width()/2,rect.get_y()+rect.get_height()+0.05),
ha='left', va='center',color='black',fontsize=8,weight='bold',clip_on=False)
plt.gca().grid(False)
sns.despine(left=True)
plt.draw()
plt.gcf().savefig(out_dir+'expenditures/'+pais+'_indirect_tax_total_USD_by_quintile.pdf',format='pdf',bbox_inches='tight')
plt.gcf().savefig(out_dir+'expenditures/'+pais+'_indirect_tax_total_USD_by_quintile.png',format='png',bbox_inches='tight')
# Plot total cost (stacked) in INT$
plt.cla()
final_FD_quints['direct USD'] = (final_CC_dir.sum(axis=1)*hhwgts['hhwgt']).sum(level='quintile')/hhwgts['pcwgt'].sum(level='quintile')
final_FD_quints.plot(final_FD_quints.index,'direct USD',kind='bar',color=quint_colors,legend=False)
final_FD_quints.plot(final_FD_quints.index,'indirect USD',kind='bar',color=quint_colors,legend=False,alpha=0.5,ax=plt.gca(),bottom=final_FD_quints['direct USD'])
plt.gca().set_xticklabels(quint_labels,ha='right')
plt.ylabel('Total carbon tax burden [INT$ per capita]',fontsize=11,labelpad=8)
plt.xlabel('')
sns.despine(left=True)
plt.gca().grid(False)
plt.draw()
plt.gcf().savefig(out_dir+'expenditures/'+pais+'_tax_total_USD_by_quintile.pdf',format='pdf',bbox_inches='tight')
plt.gcf().savefig(out_dir+'expenditures/'+pais+'_tax_total_USD_by_quintile.png',format='png',bbox_inches='tight')
plt.cla()
###################################
# Cost of indirect carbon price increase (% of totex)
final_FD_quints = pd.DataFrame(index=hies_FD.reset_index().set_index('quintile').sum(level='quintile').index).sort_index()
final_FD_quints['pct of expenditures'] = (100./1E6)*(final_CC_ind.sum(axis=1)*hhwgts['hhwgt']).sum(level='quintile')/hies_FD_tot[['pcwgt','totex_pc']].prod(axis=1).sum(level='quintile')
final_FD_quints.plot(final_FD_quints.index,'pct of expenditures',kind='bar',color=quint_colors,legend=False)
plt.gca().set_xticklabels(quint_labels,ha='right')
plt.ylabel('Indirect carbon cost relative to expenditures [%]',fontsize=11,weight='bold',labelpad=8)
plt.xlabel('')
plt.title(iso_to_name[pais],fontsize=14,weight='bold')
rects = plt.gca().patches
for rect in rects:
_w = rect.get_width()
plt.gca().annotate(str(round(_w,1))+'%',xy=(rect.get_x()+1.025*rect.get_width(),rect.get_y()+rect.get_height()/2.),
ha='left', va='center',color='black',fontsize=8,weight='bold',clip_on=False)
plt.gca().grid(False)
sns.despine(left=True)
plt.draw()
plt.gcf().savefig(out_dir+'expenditures/'+pais+'_indirect_tax_as_pct_of_gastos_by_quintile.pdf',format='pdf',bbox_inches='tight')
plt.gcf().savefig(out_dir+'expenditures/'+pais+'_indirect_tax_as_pct_of_gastos_by_quintile.png',format='png',bbox_inches='tight')
plt.cla()
###################################
# Cost of direct carbon price increase (in $)
final_FD_quints = pd.DataFrame(index=hies_FD.reset_index().set_index('quintile').sum(level='quintile').index).sort_index()
final_FD_quints['total USD'] = (final_CC_dir.sum(axis=1)*hhwgts['hhwgt']).sum(level='quintile')/hhwgts['pcwgt'].sum(level='quintile')
final_FD_quints.plot(final_FD_quints.index,'total USD',kind='bar',color=quint_colors,legend=False)
plt.gca().set_xticklabels(quint_labels,ha='right')
plt.ylabel('Carbon tax on fuels [INT$ per capita]',fontsize=11,weight='bold',labelpad=8)
plt.xlabel('')
plt.title(iso_to_name[pais],fontsize=14,weight='bold')
rects = plt.gca().patches
for rect in rects:
_w = rect.get_width()
plt.gca().annotate('$'+str(int(round(_w,0))),xy=(rect.get_x()+1.025*rect.get_width(),rect.get_y()+rect.get_height()/2.),
ha='left', va='center',color='black',fontsize=8,weight='bold',clip_on=False)
plt.gca().grid(False)
sns.despine(left=True)
plt.draw()
plt.gcf().savefig(out_dir+'expenditures/'+pais+'_direct_tax_total_USD_by_quintile.pdf',format='pdf',bbox_inches='tight')
plt.cla()
###################################
# Cost of direct carbon price increase (% of tot_exp)
final_FD_quints = pd.DataFrame(index=hies_FD.reset_index().set_index('quintile').sum(level='quintile').index).sort_index()
final_FD_quints['pct of expenditures'] = 100./1E6*(final_CC_dir.sum(axis=1)*hhwgts['hhwgt']).sum(level='quintile')/hies_FD_tot[['totex_pc','pcwgt']].prod(axis=1).sum(level='quintile')
final_FD_quints.plot(final_FD_quints.index,'pct of expenditures',kind='bar',color=quint_colors,legend=False)
plt.gca().set_xticklabels(quint_labels,ha='right')
#_x_ticks = plt.gca().get_xticks()
#plt.gca().set_xticklabels([str(round(_x,1)) for _x in _x_ticks[::2]])
plt.ylabel('Carbon tax on direct fuel consumption [% of total expenditures]',fontsize=11,weight='bold',labelpad=8)
plt.xlabel('')
plt.title(iso_to_name[pais],fontsize=14,weight='bold')
rects = plt.gca().patches
for rect in rects:
_w = rect.get_width()
plt.gca().annotate(str(round(_w,1))+'%',xy=(rect.get_x()+rect.get_width()+0.002,rect.get_y()+rect.get_height()/2.),
ha='left', va='center',color='black',fontsize=8,clip_on=False,weight='bold')
plt.gca().grid(False)
sns.despine()
plt.draw()
plt.gcf().savefig(out_dir+'expenditures/'+pais+'_direct_tax_as_pct_of_gastos_by_quintile.pdf',format='pdf',bbox_inches='tight')
plt.cla()
###################################
# Cost of direct & indirect carbon price increase (% of totex)
do_column_annotations = False
plt.figure(figsize=(6,6))
final_FD_quints = pd.DataFrame(index=hies_FD.reset_index().set_index('quintile').sum(level='quintile').index).sort_index()
##########
# All CO2-related costs
final_FD_quints['CO2 expenditures'] = (100./1E6)*(final_CC_CO2.sum(axis=1)*hhwgts['hhwgt']).sum(level='quintile')/hies_FD_tot[['pcwgt','totex_pc']].prod(axis=1).sum(level='quintile')
##########
# All nonCO2-related costs
final_FD_quints['nonCO2 expenditures'] = (100./1E6)*(final_CC_nonCO2.sum(axis=1)*hhwgts['hhwgt']).sum(level='quintile')/hies_FD_tot[['pcwgt','totex_pc']].prod(axis=1).sum(level='quintile')
orig_columns = final_FD_quints.columns
##########
# This grabs the largest category endogenously
find_max_CO2 =
|
pd.DataFrame({'abs':[]})
|
pandas.DataFrame
|
import pandas as pd
import argparse
import numpy as np
from sklearn.linear_model import LinearRegression
parser = argparse.ArgumentParser(
description="Générer un vecteur à partir d'un modèle sauvegardé")
parser.add_argument("input_normal")
parser.add_argument("input_doc2vec")
parser.add_argument("train_file")
parser.add_argument("validation_file")
parser.add_argument("test_file")
parser.add_argument("output_file")
args = parser.parse_args()
input_normal_name = args.input_normal
input_doc2vec_name = args.input_doc2vec
train_file_name = args.train_file
validation_file_name = args.validation_file
test_file_name = args.test_file
output_file_name = args.output_file
df_embeddings = pd.read_csv(input_normal_name, sep=",", index_col=0)
df_hindex = pd.read_csv(input_doc2vec_name, sep=",", index_col=0)
df_train = pd.read_csv(train_file_name, sep=";", index_col=0)
df_validation =
|
pd.read_csv(validation_file_name, sep=";", index_col=0)
|
pandas.read_csv
|
# Copyright(C) 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0(the "License"); you may not
# use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.See the
# License for the specific language governing permissions and limitations under
# the License.
import operator
import os
import string
import warnings
from datetime import date, datetime
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pytest
from pytest import param
import ibis
import ibis.expr.datatypes as dt
from ibis import literal as L
import ibis.expr.types as ir
from ibis.expr.window import rows_with_max_lookback
sa = pytest.importorskip('sqlalchemy')
pytest.importorskip('snowflake')
pytestmark = pytest.mark.snowflake
@pytest.fixture
def guid(con):
name = ibis.util.guid()
try:
yield name
finally:
con.drop_table(name, force=True)
@pytest.fixture
def guid2(con):
name = ibis.util.guid()
try:
yield name
finally:
con.drop_table(name, force=True)
@pytest.mark.parametrize(
('left_func', 'right_func'),
[
param(
lambda t: t.double_col.cast('int8'),
lambda at: sa.cast(at.c.double_col, sa.SMALLINT),
id='double_to_int8',
),
param(
lambda t: t.double_col.cast('int16'),
lambda at: sa.cast(at.c.double_col, sa.SMALLINT),
id='double_to_int16',
),
param(
lambda t: t.string_col.cast('double'), # https://docs.snowflake.com/en/sql-reference/data-types-numeric.html#double-double-precision-real
lambda at: sa.cast(
at.c.string_col, sa.DECIMAL
),
id='string_to_double',
),
param(
lambda t: t.string_col.cast('float'),
lambda at: sa.cast(at.c.string_col, sa.FLOAT),
id='string_to_float',
),
param(
lambda t: t.string_col.cast('decimal'),
lambda at: sa.cast(at.c.string_col, sa.NUMERIC(9, 0)),
id='string_to_decimal_no_params',
),
param(
lambda t: t.string_col.cast('decimal(9, 3)'),
lambda at: sa.cast(at.c.string_col, sa.NUMERIC(9, 3)),
id='string_to_decimal_params',
),
],
)
def test_cast(alltypes, at, translate, left_func, right_func):
left = left_func(alltypes)
right = right_func(at)
assert str(translate(left).compile()) == str(right.compile())
def test_date_cast(alltypes, at, translate):
result = alltypes.date_string_col.cast('date')
expected = sa.cast(at.c.date_string_col, sa.DATE)
assert str(translate(result)) == str(expected)
@pytest.mark.parametrize(
'column',
[
'`INDEX`',
'Unnamed: 0',
'id',
'bool_col',
'tinyint_col',
'smallint_col',
'int_col',
'bigint_col',
'float_col',
'double_col',
'date_string_col',
'string_col',
'timestamp_col',
'year',
'month',
],
)
def test_noop_cast(alltypes, at, translate, column):
col = alltypes[column]
result = col.cast(col.type())
expected = at.c[column]
assert result.equals(col)
assert str(translate(result)) == str(expected)
def test_timestamp_cast_noop(alltypes, at, translate):
result1 = alltypes.timestamp_col.cast('timestamp')
result2 = alltypes.int_col.cast('timestamp')
assert isinstance(result1, ir.TimestampColumn)
assert isinstance(result2, ir.TimestampColumn)
expected1 = at.c.timestamp_col
assert str(translate(result1)) == "CAST({} AS TIMESTAMP)".format(str(expected1))
@pytest.mark.parametrize(
('func', 'expected'),
[
param(operator.methodcaller('year'), 2015, id='year'),
param(operator.methodcaller('month'), 9, id='month'),
param(operator.methodcaller('day'), 1, id='day'),
param(operator.methodcaller('hour'), 14, id='hour'),
param(operator.methodcaller('minute'), 48, id='minute'),
param(operator.methodcaller('second'), 5, id='second'),
param(lambda x: x.day_of_week.index(), 1, id='day_of_week_index'),
param(
lambda x: x.day_of_week.full_name(),
'Tue',
id='day_of_week_full_name',
),
],
)
def test_simple_datetime_operations(con, func, expected, translate):
value= L('2015-09-01 14:48:05.359').cast(dt.string).cast(dt.timestamp)
assert con.execute(func(value)) == expected
@pytest.mark.parametrize(
('func', 'left', 'right', 'expected'),
[
param(operator.add, L(3), L(4), 7, id='add'),
param(operator.sub, L(3), L(4), -1, id='sub'),
param(operator.mul, L(3), L(4), 12, id='mul'),
param(operator.truediv, L(12), L(4), 3, id='truediv_no_remainder'),
param(operator.pow, L(12), L(2), 144, id='pow'),
param(operator.mod, L(12), L(5), 2, id='mod'),
param(operator.truediv, L(7), L(2), 3.5, id='truediv_remainder'),
param(operator.floordiv, L(7), L(2), 3, id='floordiv'),
param(
lambda x, y: x.floordiv(y), L(7), 2, 3, id='floordiv_no_literal'
),
param(
lambda x, y: x.rfloordiv(y), L(2), 7, 3, id='rfloordiv_no_literal'
),
],
)
def test_binary_arithmetic(con, func, left, right, expected):
expr = func(left, right)
result = con.execute(expr)
assert result == expected
@pytest.mark.parametrize(
('value', 'expected'),
[
param(L('foo_bar'), 'VARCHAR', id='text'),
param(L(5), 'INTEGER', id='integer'),
param(ibis.NA, None, id='null'),
# TODO(phillipc): should this really be double?
param(L(1.2345), 'DECIMAL', id='numeric'),
param(
L('2015-09-01 14:48:05.359').cast(dt.string).cast(dt.timestamp),
'TIMESTAMP_NTZ',
id='timestamp_without_time_zone',
)
],
)
def test_typeof(con, value, expected):
assert con.execute(value.typeof()) == expected
@pytest.mark.parametrize(('value', 'expected'), [(0, None), (5.5, 5.5)])
def test_nullifzero(con, value, expected):
assert con.execute(L(value).nullifzero()) == expected
@pytest.mark.parametrize(('value', 'expected'), [('foo_bar', 7), ('', 0)])
def test_string_length(con, value, expected):
assert con.execute(L(value).length()) == expected
@pytest.mark.parametrize(
('op', 'expected'),
[
param(operator.methodcaller('left', 3), 'foo', id='left'),
param(operator.methodcaller('right', 3), 'bar', id='right'),
param(operator.methodcaller('substr', 0, 3), 'foo', id='substr_0_3'),
param(operator.methodcaller('substr', 4, 3), 'bar', id='substr_4, 3'),
param(operator.methodcaller('substr', 1), 'oo_bar', id='substr_1'),
],
)
def test_string_substring(con, op, expected):
value = L('foo_bar')
assert con.execute(op(value)) == expected
@pytest.mark.parametrize(
('opname', 'expected'),
[('lstrip', 'foo '), ('rstrip', ' foo'), ('strip', 'foo')],
)
def test_string_strip(con, opname, expected):
op = operator.methodcaller(opname)
value = L(' foo ')
assert con.execute(op(value)) == expected
@pytest.mark.parametrize(
('opname', 'count', 'char', 'expected'),
[('lpad', 6, ' ', ' foo'), ('rpad', 6, ' ', 'foo ')],
)
def test_string_pad(con, opname, count, char, expected):
op = operator.methodcaller(opname, count, char)
value = L('foo')
assert con.execute(op(value)) == expected
def test_string_reverse(con):
assert con.execute(L('foo').reverse()) == 'oof'
def test_string_upper(con):
assert con.execute(L('foo').upper()) == 'FOO'
def test_string_lower(con):
assert con.execute(L('FOO').lower()) == 'foo'
@pytest.mark.parametrize(
('haystack', 'needle', 'expected'),
[
('foobar', 'bar', True),
('foobar', 'foo', True),
('foobar', 'baz', False),
('100%', '%', True),
('a_b_c', '_', True),
],
)
def test_string_contains(con, haystack, needle, expected):
value = L(haystack)
expr = value.contains(needle)
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('value', 'expected'),
[('foo bar foo', 'Foo Bar Foo'), ('foobar Foo', 'Foobar Foo')],
)
def test_capitalize(con, value, expected):
assert con.execute(L(value).capitalize()) == expected
def test_repeat(con):
expr = L('bar ').repeat(3)
assert con.execute(expr) == 'bar bar bar '
def test_re_replace(con):
expr = L('fudge|||chocolate||candy').re_replace('\\|{2,3}', ', ')
assert con.execute(expr) == 'fudge, chocolate, candy'
def test_translate(con):
expr = L('faab').translate('a', 'b')
assert con.execute(expr) == 'fbbb'
@pytest.mark.parametrize(
('raw_value', 'expected'), [('a', 0), ('b', 1), ('d', -1), (None, 3)]
)
def test_find_in_set(demonstration, con, raw_value, expected):
value = L('a', dt.string)
haystack = demonstration.array1
expr = value.find_in_set(haystack)
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('raw_value', 'opname', 'expected'),
[
(None, 'isnull', True),
(1, 'isnull', False),
(None, 'notnull', False),
(1, 'notnull', True),
],
)
def test_isnull_notnull(con, raw_value, opname, expected):
lit = L(raw_value)
op = operator.methodcaller(opname)
expr = op(lit)
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
param(L('foobar').find('bar'), 3, id='find_pos'),
param(L('foobar').find('baz'), -1, id='find_neg'),
param(L('foobar').like('%bar'), True, id='like_left_pattern'),
param(L('foobar').like('foo%'), True, id='like_right_pattern'),
param(L('foobar').like('%baz%'), False, id='like_both_sides_pattern'),
param(L('foobar').like(['%bar']), True, id='like_list_left_side'),
param(L('foobar').like(['foo%']), True, id='like_list_right_side'),
param(L('foobar').like(['%baz%']), False, id='like_list_both_sides'),
param(
L('foobar').like(['%bar', 'foo%']), True, id='like_list_multiple'
),
param(L('foobarfoo').replace('foo', 'H'), 'HbarH', id='replace'),
param(L('a').ascii_str(), ord('a'), id='ascii_str'),
],
)
def test_string_functions(con, expr, expected):
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
param(L('San Francisco').re_search('San* [fF].*'), True, id='re_search_match'),
param(L('abcd').re_search(r'[\d]+'), False, id='re_search_no_match'),
param(
L('1222').re_search(r'[\d]+'), True, id='re_search_match_number'
),
],
)
def test_regexp(con, expr, expected):
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
param(
L('abcd').re_extract('([a-z]+)', 1), 'abcd', id='re_extract_whole'
),
param(
L('How are you doing today?').re_extract('\\b\\S*o\\S*\\b', 3), 'you', id='re_extract_first'
),
# valid group number but no match => NULL for snowflake
param(L('abcd').re_extract(r'(\d)', 1), None, id='re_extract_no_match'),
# match but not a valid group number => NULL
param(L('abcd').re_extract('abcd', 3), None, id='re_extract_match'),
],
)
def test_regexp_extract(con, expr, expected):
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
param(ibis.NA.fillna(5), 5, id='filled'),
param(L(5).fillna(10), 5, id='not_filled'),
param(L(5).nullif(5), None, id='nullif_null'),
param(L(10).nullif(5), 10, id='nullif_not_null'),
],
)
def test_fillna_nullif(con, expr, expected):
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
param(ibis.coalesce(5, None, 4), 5, id='first'),
param(ibis.coalesce(ibis.NA, 4, ibis.NA), 4, id='second'),
param(ibis.coalesce(ibis.NA, ibis.NA, 3.14), 3.14, id='third'),
],
)
def test_coalesce(con, expr, expected):
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
param(ibis.coalesce(ibis.NA, ibis.NA), None, id='all_null'),
param(
ibis.coalesce(ibis.NA, ibis.NA, ibis.NA.cast('double')),
None,
id='all_nulls_with_one_cast',
),
param(
ibis.coalesce(
ibis.NA.cast('int8'),
ibis.NA.cast('int8'),
ibis.NA.cast('int8'),
),
None,
id='all_nulls_with_all_cast',
),
],
)
def test_coalesce_all_na(con, expr, expected):
assert con.execute(expr) == expected
def test_numeric_builtins_work(alltypes, df):
expr = alltypes.double_col.fillna(0)
result = expr.execute()
expected = df.double_col.fillna(0)
expected.name = 'tmp'
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
('op', 'pandas_op'),
[
param(
lambda t: (t.double_col > 20).ifelse(10, -20),
lambda df: pd.Series(
np.where(df.double_col > 20, 10, -20), dtype='int8'
),
id='simple',
),
param(
lambda t: (t.double_col > 20).ifelse(10, -20).abs(),
lambda df: pd.Series(
np.where(df.double_col > 20, 10, -20), dtype='int8'
).abs(),
id='abs',
),
],
)
def test_ifelse(alltypes, df, op, pandas_op):
expr = op(alltypes)
result = expr.execute()
result.name = None
expected = pandas_op(df)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
('distinct1', 'distinct2', 'expected1', 'expected2'),
[
(True, True, 'UNION', 'UNION'),
(True, False, 'UNION', 'UNION ALL'),
(False, True, 'UNION ALL', 'UNION'),
(False, False, 'UNION ALL', 'UNION ALL'),
],
)
def test_union_cte(alltypes, distinct1, distinct2, expected1, expected2):
t = alltypes
expr1 = t.group_by(t.string_col).aggregate(metric=t.double_col.sum())
expr2 = expr1.view()
expr3 = expr1.view()
expr = expr1.union(expr2, distinct=distinct1).union(
expr3, distinct=distinct2
)
result = '\n'.join(
map(
lambda line: line.rstrip(), # strip trailing whitespace
str(
expr.compile().compile(compile_kwargs=dict(literal_binds=True))
).splitlines(),
)
)
expected = """\
WITH anon_1 AS
(SELECT t0.string_col AS string_col, sum(t0.double_col) AS metric
FROM functional_alltypes AS t0 GROUP BY t0.string_col),
anon_2 AS
(SELECT t0.string_col AS string_col, sum(t0.double_col) AS metric
FROM functional_alltypes AS t0 GROUP BY t0.string_col),
anon_3 AS
(SELECT t0.string_col AS string_col, sum(t0.double_col) AS metric
FROM functional_alltypes AS t0 GROUP BY t0.string_col)
(SELECT anon_1.string_col, anon_1.metric
FROM anon_1 {} SELECT anon_2.string_col, anon_2.metric
FROM anon_2) {} SELECT anon_3.string_col, anon_3.metric
FROM anon_3""".format(
expected1, expected2
)
assert str(result) == expected
@pytest.mark.parametrize(
('func', 'pandas_func'),
[
param(
lambda t, cond: t.bool_col.count(),
lambda df, cond: df.bool_col.count(),
id='count',
),
param(
lambda t, cond: t.double_col.sum(),
lambda df, cond: df.double_col.sum(),
id='sum',
),
param(
lambda t, cond: t.double_col.mean(),
lambda df, cond: df.double_col.mean(),
id='mean',
),
param(
lambda t, cond: t.double_col.min(),
lambda df, cond: df.double_col.min(),
id='min',
),
param(
lambda t, cond: t.double_col.max(),
lambda df, cond: df.double_col.max(),
id='max',
),
param(
lambda t, cond: t.double_col.var(),
lambda df, cond: df.double_col.var(),
id='var',
),
param(
lambda t, cond: t.double_col.std(),
lambda df, cond: df.double_col.std(),
id='std',
),
param(
lambda t, cond: t.double_col.var(how='sample'),
lambda df, cond: df.double_col.var(ddof=1),
id='samp_var',
),
param(
lambda t, cond: t.double_col.std(how='pop'),
lambda df, cond: df.double_col.std(ddof=0),
id='pop_std',
),
param(
lambda t, cond: t.bool_col.count(where=cond),
lambda df, cond: df.bool_col[cond].count(),
id='count_where',
),
param(
lambda t, cond: t.double_col.mean(where=cond),
lambda df, cond: df.double_col[cond].mean(),
id='mean_where',
),
param(
lambda t, cond: t.double_col.min(where=cond),
lambda df, cond: df.double_col[cond].min(),
id='min_where',
),
param(
lambda t, cond: t.double_col.max(where=cond),
lambda df, cond: df.double_col[cond].max(),
id='max_where',
),
param(
lambda t, cond: t.double_col.var(where=cond),
lambda df, cond: df.double_col[cond].var(),
id='var_where',
),
param(
lambda t, cond: t.double_col.std(where=cond),
lambda df, cond: df.double_col[cond].std(),
id='std_where',
),
param(
lambda t, cond: t.double_col.var(where=cond, how='sample'),
lambda df, cond: df.double_col[cond].var(),
id='samp_var_where',
),
param(
lambda t, cond: t.double_col.std(where=cond, how='pop'),
lambda df, cond: df.double_col[cond].std(ddof=0),
id='pop_std_where',
),
],
)
def test_aggregations(alltypes, df, func, pandas_func):
table = alltypes.limit(100)
df = df.head(table.count().execute())
cond = table.string_col.isin(['1', '7'])
expr = func(table, cond)
result = expr.execute()
expected = pandas_func(df, cond.execute())
np.testing.assert_allclose(result, expected)
def test_not_contains(alltypes, df):
n = 100
table = alltypes.limit(n)
expr = table.string_col.notin(['1', '7'])
result = expr.execute()
expected = ~df.head(n).string_col.isin(['1', '7'])
tm.assert_series_equal(result, expected, check_names=False)
def test_group_concat(alltypes, df):
expr = alltypes.string_col.group_concat()
result = expr.execute()
expected = ','.join(df.string_col.dropna())
assert result == expected
def test_distinct_aggregates(alltypes, df):
expr = alltypes.limit(100).double_col.nunique()
result = expr.execute()
assert result == df.head(100).double_col.nunique()
def test_not_exists(alltypes, df):
t = alltypes
t2 = t.view()
expr = t[~((t.string_col == t2.string_col).any())]
result = expr.execute()
left, right = df, t2.execute()
expected = left[left.string_col != right.string_col]
tm.assert_frame_equal(
result, expected, check_index_type=False, check_dtype=False
)
def test_subquery(alltypes, df):
t = alltypes
expr = (
t.mutate(d=t.double_col.fillna(0))
.limit(1000)
.group_by('string_col')
.size()
)
result = expr.execute().sort_values('string_col').reset_index(drop=True)
expected = (
df.assign(d=df.double_col.fillna(0))
.head(1000)
.groupby('string_col')
.string_col.count()
.reset_index(name='count')
.sort_values('string_col')
.reset_index(drop=True)
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('func', ['mean', 'sum'])
def test_rolling_window(alltypes, func, df):
t = alltypes
df = (
df[['double_col', 'timestamp_col']]
.sort_values('timestamp_col')
.reset_index(drop=True)
)
window = ibis.window(order_by=t.timestamp_col, preceding=6, following=0)
f = getattr(t.double_col, func)
df_f = getattr(df.double_col.rolling(7, min_periods=0), func)
result = (
t.projection([f().over(window).name('double_col')])
.execute()
.double_col
)
expected = df_f()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('func', ['min', 'max'])
def test_cumulative_ordered_window(alltypes, func, df):
t = alltypes
df = df.sort_values('timestamp_col').reset_index(drop=True)
window = ibis.cumulative_window(order_by=t.timestamp_col)
f = getattr(t.double_col, func)
expr = t.projection([(t.double_col - f().over(window)).name('double_col')])
result = expr.execute().double_col
expected = df.double_col - getattr(df.double_col, 'cum%s' % func)()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('func', ['min', 'max'])
def test_cumulative_partitioned_ordered_window(alltypes, func, df):
t = alltypes
df = df.sort_values(['string_col', 'timestamp_col']).reset_index(drop=True)
window = ibis.cumulative_window(
order_by=t.timestamp_col, group_by=t.string_col
)
f = getattr(t.double_col, func)
expr = t.projection([(t.double_col - f().over(window)).name('double_col')])
result = expr.execute().double_col
method = operator.methodcaller('cum{}'.format(func))
expected = df.groupby(df.string_col).double_col.transform(
lambda c: c - method(c)
)
tm.assert_series_equal(result, expected)
def test_null_column(alltypes):
t = alltypes
nrows = t.count().execute()
expr = t.mutate(na_column=ibis.NA).na_column
result = expr.execute()
tm.assert_series_equal(result, pd.Series([None] * nrows, name='na_column'))
def test_null_column_union(alltypes, df):
t = alltypes
s = alltypes[['double_col']].mutate(string_col=ibis.NA.cast('string'))
expr = t[['double_col', 'string_col']].union(s)
result = expr.execute()
nrows = t.count().execute()
expected = pd.concat(
[
df[['double_col', 'string_col']],
pd.concat(
[
df[['double_col']],
pd.DataFrame({'string_col': [None] * nrows}),
],
axis=1,
),
],
axis=0,
ignore_index=True,
)
|
tm.assert_frame_equal(result, expected)
|
pandas.util.testing.assert_frame_equal
|
from collections import OrderedDict
from datetime import timedelta
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas import DataFrame, Series, Timestamp, date_range, option_context
import pandas._testing as tm
def _check_cast(df, v):
"""
Check if all dtypes of df are equal to v
"""
assert all(s.dtype.name == v for _, s in df.items())
class TestDataFrameDataTypes:
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df["a"] = df["a"].astype(np.bool_)
df["b"] = df["b"].astype(np.int32)
df["c"] = df["c"].astype(np.float64)
result = pd.concat([df, df])
assert result["a"].dtype == np.bool_
assert result["b"].dtype == np.int32
assert result["c"].dtype == np.float64
result = pd.concat([df, df.astype(np.float64)])
assert result["a"].dtype == np.object_
assert result["b"].dtype == np.float64
assert result["c"].dtype == np.float64
def test_empty_frame_dtypes(self):
empty_df = pd.DataFrame()
tm.assert_series_equal(empty_df.dtypes, pd.Series(dtype=object))
nocols_df = pd.DataFrame(index=[1, 2, 3])
tm.assert_series_equal(nocols_df.dtypes, pd.Series(dtype=object))
norows_df = pd.DataFrame(columns=list("abc"))
tm.assert_series_equal(norows_df.dtypes, pd.Series(object, index=list("abc")))
norows_int_df = pd.DataFrame(columns=list("abc")).astype(np.int32)
tm.assert_series_equal(
norows_int_df.dtypes, pd.Series(np.dtype("int32"), index=list("abc"))
)
odict = OrderedDict
df = pd.DataFrame(odict([("a", 1), ("b", True), ("c", 1.0)]), index=[1, 2, 3])
ex_dtypes = pd.Series(
odict([("a", np.int64), ("b", np.bool_), ("c", np.float64)])
)
tm.assert_series_equal(df.dtypes, ex_dtypes)
# same but for empty slice of df
tm.assert_series_equal(df[:0].dtypes, ex_dtypes)
def test_datetime_with_tz_dtypes(self):
tzframe = DataFrame(
{
"A": date_range("20130101", periods=3),
"B": date_range("20130101", periods=3, tz="US/Eastern"),
"C": date_range("20130101", periods=3, tz="CET"),
}
)
tzframe.iloc[1, 1] = pd.NaT
tzframe.iloc[1, 2] = pd.NaT
result = tzframe.dtypes.sort_index()
expected = Series(
[
np.dtype("datetime64[ns]"),
DatetimeTZDtype("ns", "US/Eastern"),
DatetimeTZDtype("ns", "CET"),
],
["A", "B", "C"],
)
tm.assert_series_equal(result, expected)
def test_dtypes_are_correct_after_column_slice(self):
# GH6525
df = pd.DataFrame(index=range(5), columns=list("abc"), dtype=np.float_)
odict = OrderedDict
tm.assert_series_equal(
df.dtypes,
pd.Series(odict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
)
tm.assert_series_equal(
df.iloc[:, 2:].dtypes, pd.Series(odict([("c", np.float_)]))
)
tm.assert_series_equal(
df.dtypes,
pd.Series(odict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
)
def test_dtypes_gh8722(self, float_string_frame):
float_string_frame["bool"] = float_string_frame["A"] > 0
result = float_string_frame.dtypes
expected = Series(
{k: v.dtype for k, v in float_string_frame.items()}, index=result.index
)
|
tm.assert_series_equal(result, expected)
|
pandas._testing.assert_series_equal
|
# Import modules
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
import sklearn.metrics as metrics
# SAX package - source https://github.com/seninp/saxpy
from saxpy.alphabet import cuts_for_asize
from saxpy.sax import ts_to_string
from saxpy.paa import paa
from sklearn.preprocessing import StandardScaler, MinMaxScaler
# Plotting modules
import seaborn as sns
from collections import Counter
import matplotlib.pyplot as plt
plt.rcdefaults()
import plotly.figure_factory as ff
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from plotly.offline import init_notebook_mode
init_notebook_mode(connected = True)
import plotly.io as pio
########################################################################################
### Pre-Processing functions ###
########################################################################################
def reduce_mem_usage(df, verbose=True):
""""Function to reduce the memory usage of a dataframe.
Source: https://www.kaggle.com/caesarlupum/ashrae-start-here-a-gentle-introduction"""
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
start_mem = df.memory_usage().sum() / 1024**2
for col in df.columns:
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
end_mem = df.memory_usage().sum() / 1024**2
if verbose: print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(end_mem, 100 * (start_mem - end_mem) / start_mem))
return df
########################################################################################
### Pre-Mining functions ###
########################################################################################
### Data Selection functions
def multicol_2ndColumnSelection(df_multicol, allcol1, col2):
""""Function to select data from a multi-column dataframe based on the 2nd column value.
From a defined 2nd-level column of interest - col2,
the function loops over the dataframe from all the values interest from the 1st-level column - allcol1"""
df = pd.DataFrame()
for i in allcol1:
df[i] = df_multicol[i, col2].copy()
return df
def multi2singlecol_1stCol(df_in):
""""Function to transform a 2 column dataframe to a single one, while appending the 2nd column information
to a new attribute."""
# Extract upper level column meter_type information
meter_type_list = []
for meter_type, blg_id in df_in.columns.values:
meter_type_list.append(meter_type)
meter_type_list = list(set(meter_type_list))
dfs = []
for i in meter_type_list:
df1 = pd.melt(df_in[i].reset_index(),
id_vars=df_in.index.name,
var_name="building_id",
value_name=i)
df1.set_index(["building_id", df_in.index.name], inplace=True)
dfs.append(df1) # append to list
meter_df =
|
pd.concat(dfs, axis=1)
|
pandas.concat
|
import pandas as pd
import numpy as np
data_path = "/home/clairegayral/Documents/openclassroom/data/P4/"
res_path = "/home/clairegayral/Documents/openclassroom/res/P4/"
from sklearn import preprocessing
from sklearn.impute import KNNImputer
###################
#### open data ####
###################
product_category_name_translation = pd.read_csv(data_path
+ "product_category_name_translation.csv")
sellers = pd.read_csv(data_path + "olist_sellers_dataset.csv")
products = pd.read_csv(data_path + "olist_products_dataset.csv")
orders = pd.read_csv(data_path + "olist_orders_dataset.csv")
order_reviews = pd.read_csv(data_path + "olist_order_reviews_dataset.csv")
order_payments = pd.read_csv(data_path + "olist_order_payments_dataset.csv")
order_items = pd.read_csv(data_path + "olist_order_items_dataset.csv")
geolocation = pd.read_csv(data_path + "olist_geolocation_dataset.csv")
customers = pd.read_csv(data_path + "olist_customers_dataset.csv")
## Lien entre les tables :
## order-product
link_order_product = pd.merge(orders["order_id"],
order_items[["order_id","product_id"]],
on = "order_id", how = 'right')
link_order_product
## customer-order
link_customer_order = pd.merge(customers[["customer_unique_id","customer_id"]],
orders[["customer_id","order_id"]],
on = "customer_id", how = 'right')
##########################
#### Construction RFM ####
##########################
##
## Recency
##
tmp = pd.merge(customers[["customer_id","customer_unique_id"]],
orders[["customer_id", "order_id","order_purchase_timestamp"]],
on="customer_id", how="right")
## get the lastest order date of each customer
customer_last_timestamp = tmp[["customer_unique_id",
"order_purchase_timestamp"]].groupby("customer_unique_id").max()
## use datetime format
customer_last_timestamp = pd.to_datetime(customer_last_timestamp["order_purchase_timestamp"],
format = "%Y-%m-%d %H:%M:%S")
## substrack the date of the latest command in the data :
t_max = customer_last_timestamp.max()
recency = pd.Series(t_max-customer_last_timestamp, name = "recency")
## get the difference in decimal days format :
recency = recency / np.timedelta64(1, "D")
recency = recency.reset_index()
rfm = recency
##
## Frequency
##
frequency = tmp.customer_unique_id.value_counts()
frequency =
|
pd.Series(frequency)
|
pandas.Series
|
import duckdb
from pandas import DataFrame
import pytest
class TestInsertInto(object):
def test_insert_into_schema(self, duckdb_cursor):
# open connection
con = duckdb.connect()
con.execute('CREATE SCHEMA s')
con.execute('CREATE TABLE s.t (id INTEGER PRIMARY KEY)')
# make relation
df =
|
DataFrame([1],columns=['id'])
|
pandas.DataFrame
|
import streamlit as st
import pandas as pd
import altair as alt
import numpy as np
from sklearn.svm import SVR
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
@st.cache(allow_output_mutation=True) # add caching so we load the data only once
def load_data():
return pd.read_csv("data/fifa19.csv", encoding="UTF-8", index_col=0)
def write():
df = load_data()
#st.write(df.head())
st.header("Feature Correlation Analysis")
st.write("""Let's explore the relationships between some of the quantitative
variables in the dataset. Select an independent (x-axis) and a dependent
(y-axis) variable below and see a scatter plot of these two variables with
a fitted 5th degree polynomial line, and see their correlation coefficient
as well. You may also select the "use color" checkbox and select a third
variable to be represented by the color of the points on the plot. Hover
your mouse over the points on the plot to see which player that point
represents. Note that noise has been added to the variables in the plot due to
the high number of overlapping points in the dataset, but the correlation
coefficient is calculated using the original data.""")
# Here, we remove the extra text around the wage to get it as an integer
wage_array = df["Value"].to_numpy()
fixed_wages = []
for wage in wage_array:
if wage[-1]=="M":
wage = float(wage[1:-1])*1000000
elif wage[-1]=="K":
wage = float(wage[1:-1])*1000
else:
wage=0
fixed_wages.append(wage)
df["Player_Wage"] = fixed_wages
df = df[df.Player_Wage!='']
df["Player_Wage"] = df["Player_Wage"].astype(np.int64)*1000
correlation_options = ['Age', 'Overall', 'Potential', 'Player_Wage', 'International Reputation',
'Skill Moves', 'Crossing','Finishing', 'HeadingAccuracy', 'ShortPassing',
'Volleys', 'Dribbling', 'Curve', 'FKAccuracy', 'LongPassing',
'BallControl', 'Acceleration', 'SprintSpeed', 'Agility', 'Reactions',
'Balance', 'ShotPower', 'Jumping', 'Stamina', 'Strength', 'LongShots',
'Aggression', 'Interceptions', 'Positioning', 'Vision', 'Penalties',
'Composure', 'Marking', 'StandingTackle', 'SlidingTackle', 'GKDiving',
'GKHandling', 'GKKicking', 'GKPositioning', 'GKReflexes']
df_quant = df[correlation_options].copy().dropna()
noise = np.random.normal(0,0.3,df_quant.shape)
df_quant_noise = df_quant + noise
df_quant_noise["Name"] = df["Name"].copy()
df_quant_noise["Position"] = df["Position"].copy()
x_var = st.selectbox("Independent Variable", options = correlation_options, index=0)
y_var = st.selectbox("Dependent Variable", options = correlation_options, index=1)
use_color = st.checkbox("Use Color?", value = False)
if use_color:
color_var = st.selectbox("Color Variable", options = correlation_options, index=2)
chart = alt.Chart(df_quant_noise).mark_circle(color="#000000",size=10).encode(
x=alt.X(x_var, scale=alt.Scale(zero=False)),
y=alt.Y(y_var, scale=alt.Scale(zero=False)),
color=alt.Y(color_var),
tooltip = ["Name","Position"]
)
else:
chart = alt.Chart(df_quant_noise).mark_circle(color="#000000",size=10,opacity=.3).encode(
x=alt.X(x_var, scale=alt.Scale(zero=False)),
y=alt.Y(y_var, scale=alt.Scale(zero=False)),
tooltip = ["Name","Position"]
)
correlation = np.corrcoef(df_quant[x_var],df_quant[y_var])[0][1]
st.write("Correlation: %.2f" % correlation)
chart = chart + chart.transform_regression(x_var,y_var,method="poly",order=5).mark_line(color="#0000FF")
chart = chart.properties(
width=800, height=500
).interactive()
st.write(chart)
st.header("Machine Learning Exploration")
st.write("""Now we will examine how well we can predict attributes of a player using this
dataset. Below you can select a target variable and one or many predictor variables,
and a support vector regression model will be built using the input. We split the dataset
into a training set and a testing set, as is common practice in machine learning
(see [here](https://developers.google.com/machine-learning/crash-course/training-and-test-sets/splitting-data)).
You can see the mean-squared-error of the model on the testing portion of the
data, as well as a plot of the residuals.
A residual is the difference between the predicted values and the actual values, and
thus for a perfect classifier all residuals would be 0.""")
target_var = st.selectbox("Target Variable", options = correlation_options, index=1)
features = st.multiselect("Predictor Variables", options = correlation_options)
if features != []:
df_X = df_quant[features]
df_y = df_quant[target_var]
X_train, X_test, y_train, y_test = train_test_split(
df_X, df_y, test_size=0.25)
clf = make_pipeline(StandardScaler(), SVR())
clf.fit(X_train, y_train)
test_preds = clf.predict(X_test)
mse = mean_squared_error(y_test,test_preds)
st.write("Testing MSE = $$\\frac{1}{n}\Sigma_{i=1}^n(y_i-\hat{y}_i)^2$$ = %.2f" % mse)
residuals = y_test - test_preds
ml_df = pd.DataFrame({"residuals":residuals, "y_test":y_test, "predictions":test_preds})
ml_chart = alt.Chart(ml_df).mark_circle(color="#000000",size=10,opacity=.3).encode(
x=alt.X("y_test", scale=alt.Scale(zero=False), title="Actual"),
y=alt.Y("residuals", scale=alt.Scale(zero=False), title="Residuals")
).properties(
width=800, height=500
)
ml_chart += alt.Chart(
|
pd.DataFrame({'y': [0]})
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import itertools
import logging
import os
import shutil
import time
from multiprocessing import Pool, cpu_count
from pathlib import Path
import numpy as np
import pandas as pd
import tqdm
from Bio import SeqIO
from Bio.Blast.Applications import NcbimakeblastdbCommandline
from sklearn.cluster import AgglomerativeClustering, KMeans
import config
from common.align_fasta import FastaAligner
from common.check_input import CheckInput
from common.prepare_fasta import PrepFasta as pf
from common.prime_fasta import PrimeFastaWriter
from common.prime_fasta_processing import FastaFinalizer
from common.quick_union import QuickUnion
from report.report_generator import HtmlReportGenerator
from report.summary_table import ReportTableConstructor
parser = argparse.ArgumentParser(description=(
"REcomp2 - pipeline for comparative analysis of potentially unlimited"
" number of RepeatExplorer results"
),
epilog="Please report about all bugs")
parser.add_argument("-v", "--version",
help="show version",
action="version",
version=f"REcomp {config.PIPELINE_VERSION}")
parser.add_argument("i", help="path(s) to RE results (top level)",
type=str,
metavar="path")
parser.add_argument("p", help="prefix(es) for each paths",
type=str,
metavar="prefix")
parser.add_argument("out", help="path to output directory")
parser.add_argument("-r",
"--references",
help="path to fasta with references repeats",
metavar="REF")
parser.add_argument("-l", help="save logfile in output directory",
action="store_true", dest="log")
parser.add_argument("-c", help="number of CPU to use",
type=int, default=cpu_count(),
dest="cpu_number", metavar="CPU")
parser.add_argument("-io", "--include-other",
help=(
"include `other` contigs and clusters "
"in analysis (default: False)"
),
action="store_true", dest="include_other")
parser.add_argument("-ir", "--include-ribosomal",
action="store_true",
help=(
"include rDNA clusters (rank 4) in analysis "
"(default: False)"
),
dest="include_ribosomal")
parser.add_argument("--evalue",
help=(
"evalue threshold for alignments for supercluster "
"assembly (default: 1e-05)"
),
default=config.EVALUE,
type=float)
parser.add_argument("--low-memory",
help=("use small amount of RAM for 'all to all' "
"blast by using small chunk size (1000) but it "
"can take much time (default chunk size: 10000)"
),
action="store_true",
dest="low_memory")
parser.add_argument("-ss", "-superclusters-search",
help=(
"alignments for union of sequences in supercluster "
"can be performed either blastn or megablast (default)"
": blastn is slower and required more RAM but "
"more sensitive"
),
choices=["blastn", "megablast"],
default="megablast",
dest="task")
args = parser.parse_args()
# catch assertions
assert len(args.p.split()) == len(
set(args.p.split())), ("Prefixes are not unique")
assert len(args.i.split()) == len(
set(args.i.split())), ("Paths are not unique")
assert 0 < args.cpu_number <= cpu_count(), ("CPU count is not valid")
assert args.evalue >= 0.0, ("Wrong E-value thershold")
out_path = Path(args.out)
out_path.mkdir(parents=True, exist_ok=True)
# logging
if args.log:
logging.basicConfig(level=logging.DEBUG,
filename=Path(args.out).joinpath("REcomp.log"),
format=("\n%(asctime)s - %(funcName)s - "
"%(levelname)s -\n%(message)s\n"),
filemode="w")
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
formatter = logging.Formatter(
"\n%(asctime)s - %(funcName)s - %(levelname)s -\n%(message)s\n")
console.setFormatter(formatter)
logging.getLogger("").addHandler(console)
else:
logging.basicConfig(level=logging.INFO,
format=("\n%(asctime)s - %(funcName)s - "
"%(levelname)s -\n%(message)s\n"),)
logging.info(
(
f"------------------------------------------------------------------\n"
f"PIPELINE VERSION : {config.PIPELINE_VERSION}\n"
f" \n"
f"AUTHOR : <NAME> \n"
f"------------------------------------------------------------------\n"
)
)
logging.info(args)
# check input
check_input = CheckInput()
check_input.check_blast(os.environ["PATH"])
if args.references is not None:
check_input.check_references(args.references)
work_dirs = {path: prefix for path, prefix in zip(
args.i.split(), args.p.split())}
check_table = check_input.print_check_table(work_dirs)
logging.info((f"Pipeline will be in progress in 30 seconds\n"
f"Check matching of paths to RE results and their "
f"prefixes\n{check_table}"))
time.sleep(30)
# create folder structure
logging.info("creating directory structure")
fasta_path = Path(args.out).joinpath("fasta")
fasta_path.mkdir(parents=True, exist_ok=True)
prime_fasta = Path(args.out).joinpath("results", "prime_fasta")
prime_fasta.mkdir(parents=True, exist_ok=True)
final_fasta = Path(args.out).joinpath("results", "final_fasta")
final_fasta.mkdir(parents=True, exist_ok=True)
# prepare fasta files with ranks and "others"
logging.info("creating fasta containing all sequences for analysis")
for path, prefix in work_dirs.items():
fasta_prep = pf(path, args.references, prefix)
fasta_prep.create_united_fasta(fasta_path,
include_other=args.include_other,
include_ribosomal=args.include_ribosomal)
if args.references:
with open(Path(fasta_path).joinpath("fasta.fasta"), "a") as fasta:
for record in SeqIO.parse(args.references, "fasta"):
SeqIO.write(record, fasta, "fasta")
# chunk fasta for parallel
records_number = 0
record_iter = SeqIO.parse(
open(Path(fasta_path).joinpath("fasta.fasta")), "fasta")
chunk_size = config.CHUNK_SIZE
if args.low_memory:
chunk_size = config.CHUNK_SIZE / 10
logging.info(f"chunk size: {int(chunk_size)}")
time.sleep(3)
for i, batch in enumerate(fasta_prep.batch_iterator(record_iter, chunk_size)):
records_number += len(batch)
filename = Path(fasta_path).joinpath(f"fasta{i}.fasta")
with open(filename, "w") as handle:
count = SeqIO.write(batch, handle, "fasta")
logging.info(f"saving chunk {'/'.join(filename.parts[-3:])}")
# prepare connectivity table
cline = NcbimakeblastdbCommandline(
input_file=Path(fasta_path).joinpath("fasta.fasta"),
dbtype="nucl"
)
cline()
logging.info("running all to all blast")
fasta_aligner = FastaAligner(args.evalue,
args.task,
Path(fasta_path).joinpath("fasta.fasta"))
files = [path for path in fasta_path.rglob("*.fasta")
if any(map(str.isdigit, Path(path).stem))]
print(f"Running in {args.cpu_number} cpu(s) in parallel")
time.sleep(3)
pool = Pool(processes=args.cpu_number)
result = tqdm.tqdm(pool.imap_unordered(fasta_aligner.align_fasta, files),
total=len(files))
blast_table = pd.concat(result)
pool.close()
logging.info("all to all blast finished")
blast_table = blast_table[blast_table["qseqid"] != blast_table["sseqid"]]
logging.info("removing of junk alignments")
if args.include_other:
kmeans = KMeans(n_clusters=2).fit(blast_table[["qcovs"]].to_numpy())
else:
kmeans = AgglomerativeClustering(linkage="single").fit(
blast_table[["qcovs"]].to_numpy())
bt_kmeans = np.concatenate((blast_table.to_numpy(),
kmeans.labels_.reshape(-1, 1)), axis=1)
blast_table =
|
pd.DataFrame(data=bt_kmeans[0:, 0:])
|
pandas.DataFrame
|
from . import mol_utils as mu
from . import hyperparameters
import random
import yaml
from .models import load_encoder, load_decoder, load_property_predictor
import numpy as np
import pandas as pd
import os
from .mol_utils import fast_verify
class VAEUtils(object):
def __init__(self,
exp_file='exp.json',
encoder_file=None,
decoder_file=None,
directory=None):
# files
if directory is not None:
curdir = os.getcwd()
os.chdir(os.path.join(curdir, directory))
# exp_file = os.path.join(directory, exp_file)
# load parameters
self.params = hyperparameters.load_params(exp_file, False)
if encoder_file is not None:
self.params["encoder_weights_file"] = encoder_file
if decoder_file is not None:
self.params["decoder_weights_file"] = decoder_file
# char stuff
chars = yaml.safe_load(open(self.params['char_file']))
self.chars = chars
self.params['NCHARS'] = len(chars)
self.char_indices = dict((c, i) for i, c in enumerate(chars))
self.indices_char = dict((i, c) for i, c in enumerate(chars))
# encoder, decoder
self.enc = load_encoder(self.params)
self.dec = load_decoder(self.params)
self.encode, self.decode = self.enc_dec_functions()
self.data = None
if self.params['do_prop_pred']:
self.property_predictor = load_property_predictor(self.params)
# Load data without normalization as dataframe
df = pd.read_csv(self.params['data_file'])
df.iloc[:, 0] = df.iloc[:, 0].str.strip()
df = df[df.iloc[:, 0].str.len() <= self.params['MAX_LEN']]
self.smiles = df.iloc[:, 0].tolist()
if df.shape[1] > 1:
self.data = df.iloc[:, 1:]
self.estimate_estandarization()
if directory is not None:
os.chdir(curdir)
return
def estimate_estandarization(self):
print('Standarization: estimating mu and std values ...', end='')
# sample Z space
smiles = self.random_molecules(size=5000) # this was at 50000
batch = 250 # this was at 2500
Z = np.zeros((len(smiles), self.params['hidden_dim']))
for chunk in self.chunks(list(range(len(smiles))), batch):
sub_smiles = [smiles[i] for i in chunk]
one_hot = self.smiles_to_hot(sub_smiles)
Z[chunk, :] = self.encode(one_hot, False)
self.mu = np.mean(Z, axis=0)
self.std = np.std(Z, axis=0)
self.Z = self.standardize_z(Z)
print('done!')
return
def standardize_z(self, z):
return (z - self.mu) / self.std
def unstandardize_z(self, z):
return (z * self.std) + self.mu
def perturb_z(self, z, noise_norm, constant_norm=False):
if noise_norm > 0.0:
noise_vec = np.random.normal(0, 1, size=z.shape)
noise_vec = noise_vec / np.linalg.norm(noise_vec)
if constant_norm:
return z + (noise_norm * noise_vec)
else:
noise_amp = np.random.uniform(
0, noise_norm, size=(z.shape[0], 1))
return z + (noise_amp * noise_vec)
else:
return z
def smiles_distance_z(self, smiles, z0):
x = self.smiles_to_hot(smiles)
z_rep = self.encode(x)
return np.linalg.norm(z0 - z_rep, axis=1)
def prep_mol_df(self, smiles, z):
df = pd.DataFrame({'smiles': smiles})
sort_df = pd.DataFrame(df[['smiles']].groupby(
by='smiles').size().rename('count').reset_index())
df = df.merge(sort_df, on='smiles')
df.drop_duplicates(subset='smiles', inplace=True)
df = df[df['smiles'].apply(fast_verify)]
if len(df) > 0:
df['mol'] = df['smiles'].apply(mu.smiles_to_mol)
if len(df) > 0:
df = df[
|
pd.notnull(df['mol'])
|
pandas.notnull
|
# -*- coding: utf-8 -*-
import random
import numpy as np
import time
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
import datetime
from dateutil import parser
import os
import csv
import matplotlib.pyplot as plt
import pandas as pd
def data_preprocess(dir_path):
dir_list = os.listdir(dir_path)
total_data = []
for dir_csv in dir_list:
total_path = dir_path+'/'+dir_csv+'/prices.csv'
# print(total_path)
file = open(total_path,'r')
rdr = csv.reader(file)
# for d in rdr:
# if 'FAX' in d[0]:
# total_data.append(d)
# break
[total_data.append(d) for d in rdr if 'FAX' in d[0]]
# total_data = list(set(total_data))
# print(total_data)
return total_data
def data_pre_pro_walk(dir_path, key):
total_data = []
for (paths, dirs, files) in os.walk(dir_path):
for fs in files:
if fs == 'prices.csv':
# print(paths,fs)
with open(paths+'/'+fs,'r') as file:
rdr = csv.reader(file)
# [total_data.append(d) for d in rdr if key in d[0]]
for da in [d for d in rdr if key in d[0]]:
da.extend([parser.parse(da[1]).weekday()])
total_data.append(da)
# print(da)
np_sdata = np.array(total_data)
#np_sdata[:,1] is means the date
# following command applies unique to the date!
# unique is always sorted
uni_np, indic = np.unique(np_sdata[:,1],return_index=True)
# print(np_sdata[indic])
# print(uni_np)
#sdata_sorted = sorted(sdata,key=lambda x: time.mktime(time.strptime(x[1],"%Y-%m-%d")))
return np_sdata[indic]
#data = data_preprocess('2017data')
#sdata = sorted(data, key=lambda x: time.mktime(time.strptime(x[1],"%Y-%m-%d")))
def data_pre_pro_walk_pandas(dir_path, key):
total_data = []
for (paths, dirs, files) in os.walk(dir_path):
for fs in files:
if fs == 'prices.csv':
# print(paths,fs)
with open(paths+'/'+fs,'r') as file:
rdr = csv.reader(file)
# [total_data.append(d) for d in rdr if key in d[0]]
for da in [d for d in rdr if key in d[0]]:
da.extend([parser.parse(da[1]).weekday()])
total_data.append(da)
# print(da)
np_sdata = np.array(total_data)
#np_sdata[:,1] is means the date
# following command applies unique to the date!
# unique is always sorted
uni_np, indic = np.unique(np_sdata[:,1],return_index=True)
udata = np_sdata[indic]
dates = pd.DatetimeIndex(udata[:,1])
uni_data = np.delete(udata, 1,1)
uni_data = np.delete(uni_data, 0,1)
uni_data = np.float64(uni_data)
labels = ['open','high','low','close','volume','adj_close','week']
df = pd.DataFrame(uni_data, index=dates,columns=labels)
return df
def data_pre_pro_walk_pandas_multikey(dir_path, key_list):
total_data = pd.DataFrame()
for (paths, dirs, files) in os.walk(dir_path):
for fs in files:
if fs == 'prices.csv':
with open(paths+'/'+fs,'r') as file:
try:
df = pd.read_csv(file)
for key in key_list:
aa = df[df.symbol==key]
total_data=total_data.append(aa,ignore_index=True)
except:
pass
df = total_data.set_index('date').sort_index().drop_duplicates(keep='last')
return df
def data_pre_pro_walk_pandas_multikey_ReturnArray(dir_path, key_list):
total_data =
|
pd.DataFrame()
|
pandas.DataFrame
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": pandas.StringDtype(),
"BitErrorsHost21": pandas.StringDtype(),
"BitErrorsHost22": pandas.StringDtype(),
"BitErrorsHost23": pandas.StringDtype(),
"BitErrorsHost24": pandas.StringDtype(),
"BitErrorsHost25": pandas.StringDtype(),
"BitErrorsHost26": pandas.StringDtype(),
"BitErrorsHost27": pandas.StringDtype(),
"BitErrorsHost28": pandas.StringDtype(),
"BitErrorsHost29": pandas.StringDtype(),
"BitErrorsHost30": pandas.StringDtype(),
"BitErrorsHost31": pandas.StringDtype(),
"BitErrorsHost32": pandas.StringDtype(),
"BitErrorsHost33": pandas.StringDtype(),
"BitErrorsHost34": pandas.StringDtype(),
"BitErrorsHost35": pandas.StringDtype(),
"BitErrorsHost36": pandas.StringDtype(),
"BitErrorsHost37": pandas.StringDtype(),
"BitErrorsHost38": pandas.StringDtype(),
"BitErrorsHost39": pandas.StringDtype(),
"BitErrorsHost40": pandas.StringDtype(),
"XORRebuildSuccess": pandas.StringDtype(),
"XORRebuildFail": pandas.StringDtype(),
"BandReloForError": pandas.StringDtype(),
"mrrSuccess": pandas.StringDtype(),
"mrrFail": pandas.StringDtype(),
"mrrNudgeSuccess": pandas.StringDtype(),
"mrrNudgeHarmless": pandas.StringDtype(),
"mrrNudgeFail": pandas.StringDtype(),
"totalErases": pandas.StringDtype(),
"dieOfflineCount": pandas.StringDtype(),
"curtemp": pandas.StringDtype(),
"mintemp": pandas.StringDtype(),
"maxtemp": pandas.StringDtype(),
"oventemp": pandas.StringDtype(),
"allZeroSectors": pandas.StringDtype(),
"ctxRecoveryEvents": pandas.StringDtype(),
"ctxRecoveryErases": pandas.StringDtype(),
"NSversionMinor": pandas.StringDtype(),
"lifeMinTemp": pandas.StringDtype(),
"lifeMaxTemp": pandas.StringDtype(),
"powerCycles": pandas.StringDtype(),
"systemReads": pandas.StringDtype(),
"systemWrites": pandas.StringDtype(),
"readRetryOverflow": pandas.StringDtype(),
"unplannedPowerCycles": pandas.StringDtype(),
"unsafeShutdowns": pandas.StringDtype(),
"defragForcedReloCount": pandas.StringDtype(),
"bandReloForBDR": pandas.StringDtype(),
"bandReloForDieOffline": pandas.StringDtype(),
"bandReloForPFail": pandas.StringDtype(),
"bandReloForWL": pandas.StringDtype(),
"provisionalDefects": pandas.StringDtype(),
"uncorrectableProgErrors": pandas.StringDtype(),
"powerOnSeconds": pandas.StringDtype(),
"bandReloForChannelTimeout": pandas.StringDtype(),
"fwDowngradeCount": pandas.StringDtype(),
"dramCorrectablesTotal": pandas.StringDtype(),
"hb_id": pandas.StringDtype(),
"dramCorrectables1to1": pandas.StringDtype(),
"dramCorrectables4to1": pandas.StringDtype(),
"dramCorrectablesSram": pandas.StringDtype(),
"dramCorrectablesUnknown": pandas.StringDtype(),
"pliCapTestInterval": pandas.StringDtype(),
"pliCapTestCount": pandas.StringDtype(),
"pliCapTestResult": pandas.StringDtype(),
"pliCapTestTimeStamp": pandas.StringDtype(),
"channelHangSuccess": pandas.StringDtype(),
"channelHangFail": pandas.StringDtype(),
"BitErrorsHost41": pandas.StringDtype(),
"BitErrorsHost42": pandas.StringDtype(),
"BitErrorsHost43": pandas.StringDtype(),
"BitErrorsHost44": pandas.StringDtype(),
"BitErrorsHost45": pandas.StringDtype(),
"BitErrorsHost46": pandas.StringDtype(),
"BitErrorsHost47": pandas.StringDtype(),
"BitErrorsHost48": pandas.StringDtype(),
"BitErrorsHost49": pandas.StringDtype(),
"BitErrorsHost50": pandas.StringDtype(),
"BitErrorsHost51": pandas.StringDtype(),
"BitErrorsHost52": pandas.StringDtype(),
"BitErrorsHost53": pandas.StringDtype(),
"BitErrorsHost54": pandas.StringDtype(),
"BitErrorsHost55": pandas.StringDtype(),
"BitErrorsHost56": pandas.StringDtype(),
"mrrNearMiss": pandas.StringDtype(),
"mrrRereadAvg": pandas.StringDtype(),
"readDisturbEvictions": pandas.StringDtype(),
"L1L2ParityError": pandas.StringDtype(),
"pageDefects": pandas.StringDtype(),
"pageProvisionalTotal": pandas.StringDtype(),
"ASICTemp": pandas.StringDtype(),
"PMICTemp": pandas.StringDtype(),
"size": pandas.StringDtype(),
"lastWrite": pandas.StringDtype(),
"timesWritten": pandas.StringDtype(),
"maxNumContextBands": pandas.StringDtype(),
"blankCount": pandas.StringDtype(),
"cleanBands": pandas.StringDtype(),
"avgTprog": pandas.StringDtype(),
"avgEraseCount": pandas.StringDtype(),
"edtcHandledBandCnt": pandas.StringDtype(),
"bandReloForNLBA": pandas.StringDtype(),
"bandCrossingDuringPliCount": pandas.StringDtype(),
"bitErrBucketNum": pandas.StringDtype(),
"sramCorrectablesTotal": pandas.StringDtype(),
"l1SramCorrErrCnt": pandas.StringDtype(),
"l2SramCorrErrCnt": pandas.StringDtype(),
"parityErrorValue": pandas.StringDtype(),
"parityErrorType": pandas.StringDtype(),
"mrr_LutValidDataSize": pandas.StringDtype(),
"pageProvisionalDefects": pandas.StringDtype(),
"plisWithErasesInProgress": pandas.StringDtype(),
"lastReplayDebug": pandas.StringDtype(),
"externalPreReadFatals": pandas.StringDtype(),
"hostReadCmd": pandas.StringDtype(),
"hostWriteCmd": pandas.StringDtype(),
"trimmedSectors": pandas.StringDtype(),
"trimTokens": pandas.StringDtype(),
"mrrEventsInCodewords": pandas.StringDtype(),
"mrrEventsInSectors": pandas.StringDtype(),
"powerOnMicroseconds": pandas.StringDtype(),
"mrrInXorRecEvents": pandas.StringDtype(),
"mrrFailInXorRecEvents": pandas.StringDtype(),
"mrrUpperpageEvents": pandas.StringDtype(),
"mrrLowerpageEvents": pandas.StringDtype(),
"mrrSlcpageEvents": pandas.StringDtype(),
"mrrReReadTotal": pandas.StringDtype(),
"powerOnResets": pandas.StringDtype(),
"powerOnMinutes": pandas.StringDtype(),
"throttleOnMilliseconds": pandas.StringDtype(),
"ctxTailMagic": pandas.StringDtype(),
"contextDropCount": pandas.StringDtype(),
"lastCtxSequenceId": pandas.StringDtype(),
"currCtxSequenceId": pandas.StringDtype(),
"mbliEraseCount": pandas.StringDtype(),
"pageAverageProgramCount": pandas.StringDtype(),
"bandAverageEraseCount": pandas.StringDtype(),
"bandTotalEraseCount": pandas.StringDtype(),
"bandReloForXorRebuildFail": pandas.StringDtype(),
"defragSpeculativeMiss": pandas.StringDtype(),
"uncorrectableBackgroundScan": pandas.StringDtype(),
"BitErrorsHost57": pandas.StringDtype(),
"BitErrorsHost58": pandas.StringDtype(),
"BitErrorsHost59": pandas.StringDtype(),
"BitErrorsHost60": pandas.StringDtype(),
"BitErrorsHost61": pandas.StringDtype(),
"BitErrorsHost62": pandas.StringDtype(),
"BitErrorsHost63": pandas.StringDtype(),
"BitErrorsHost64": pandas.StringDtype(),
"BitErrorsHost65": pandas.StringDtype(),
"BitErrorsHost66": pandas.StringDtype(),
"BitErrorsHost67": pandas.StringDtype(),
"BitErrorsHost68": pandas.StringDtype(),
"BitErrorsHost69": pandas.StringDtype(),
"BitErrorsHost70": pandas.StringDtype(),
"BitErrorsHost71": pandas.StringDtype(),
"BitErrorsHost72": pandas.StringDtype(),
"BitErrorsHost73": pandas.StringDtype(),
"BitErrorsHost74": pandas.StringDtype(),
"BitErrorsHost75": pandas.StringDtype(),
"BitErrorsHost76": pandas.StringDtype(),
"BitErrorsHost77": pandas.StringDtype(),
"BitErrorsHost78": pandas.StringDtype(),
"BitErrorsHost79": pandas.StringDtype(),
"BitErrorsHost80": pandas.StringDtype(),
"bitErrBucketArray1": pandas.StringDtype(),
"bitErrBucketArray2": pandas.StringDtype(),
"bitErrBucketArray3": pandas.StringDtype(),
"bitErrBucketArray4": pandas.StringDtype(),
"bitErrBucketArray5": pandas.StringDtype(),
"bitErrBucketArray6": pandas.StringDtype(),
"bitErrBucketArray7": pandas.StringDtype(),
"bitErrBucketArray8": pandas.StringDtype(),
"bitErrBucketArray9": pandas.StringDtype(),
"bitErrBucketArray10": pandas.StringDtype(),
"bitErrBucketArray11": pandas.StringDtype(),
"bitErrBucketArray12": pandas.StringDtype(),
"bitErrBucketArray13": pandas.StringDtype(),
"bitErrBucketArray14": pandas.StringDtype(),
"bitErrBucketArray15": pandas.StringDtype(),
"bitErrBucketArray16": pandas.StringDtype(),
"bitErrBucketArray17": pandas.StringDtype(),
"bitErrBucketArray18": pandas.StringDtype(),
"bitErrBucketArray19": pandas.StringDtype(),
"bitErrBucketArray20": pandas.StringDtype(),
"bitErrBucketArray21": pandas.StringDtype(),
"bitErrBucketArray22": pandas.StringDtype(),
"bitErrBucketArray23": pandas.StringDtype(),
"bitErrBucketArray24": pandas.StringDtype(),
"bitErrBucketArray25": pandas.StringDtype(),
"bitErrBucketArray26": pandas.StringDtype(),
"bitErrBucketArray27": pandas.StringDtype(),
"bitErrBucketArray28": pandas.StringDtype(),
"bitErrBucketArray29": pandas.StringDtype(),
"bitErrBucketArray30": pandas.StringDtype(),
"bitErrBucketArray31": pandas.StringDtype(),
"bitErrBucketArray32": pandas.StringDtype(),
"bitErrBucketArray33": pandas.StringDtype(),
"bitErrBucketArray34": pandas.StringDtype(),
"bitErrBucketArray35": pandas.StringDtype(),
"bitErrBucketArray36": pandas.StringDtype(),
"bitErrBucketArray37": pandas.StringDtype(),
"bitErrBucketArray38": pandas.StringDtype(),
"bitErrBucketArray39": pandas.StringDtype(),
"bitErrBucketArray40": pandas.StringDtype(),
"bitErrBucketArray41": pandas.StringDtype(),
"bitErrBucketArray42": pandas.StringDtype(),
"bitErrBucketArray43": pandas.StringDtype(),
"bitErrBucketArray44": pandas.StringDtype(),
"bitErrBucketArray45": pandas.StringDtype(),
"bitErrBucketArray46": pandas.StringDtype(),
"bitErrBucketArray47": pandas.StringDtype(),
"bitErrBucketArray48": pandas.StringDtype(),
"bitErrBucketArray49": pandas.StringDtype(),
"bitErrBucketArray50": pandas.StringDtype(),
"bitErrBucketArray51":
|
pandas.StringDtype()
|
pandas.StringDtype
|
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 2 12:16:43 2019
@author: <NAME>
"""
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score, roc_auc_score
from sklearn.preprocessing import StandardScaler
#%%
train_full = pd.read_csv("D:\Kaggle\Santander_classification\Data\\train.csv")
train = train_full.sample(n = 20000).reset_index()
#%%
features = train.drop(columns = ["index", "ID_code", "target"])
#%%
def preprocessing(dataframe):
standardized = StandardScaler().fit_transform(dataframe)
PrincipalComponent = PCA(n_components=199)
PrincipleComp = PrincipalComponent.fit_transform(standardized)
variance = PrincipalComponent.explained_variance_ratio_
variance_ratio = np.cumsum(np.round(variance, decimals=10)*100)
print(variance_ratio)
return PrincipleComp
PrincipleComp = preprocessing(features)
#%%
output = train["target"]
#%%
x_train, x_test, y_train, y_test = train_test_split(PrincipleComp, output)
#%%
import keras
import keras.backend as K
from keras.models import Sequential
from keras.layers import Dense, Dropout, BatchNormalization
import tensorflow as tf
#%%
def auc(y_true, y_pred):
auc = tf.metrics.auc(y_true, y_pred)[1]
K.get_session().run(tf.local_variables_initializer())
return auc
#%%
model = Sequential([
Dense(256, input_dim = 150, kernel_initializer='normal', activation='relu'),
Dropout(0.6),
BatchNormalization(),
Dense(64, kernel_initializer='normal', activation='relu'),
Dropout(0.5),
BatchNormalization(),
Dense(16, kernel_initializer='normal', activation='relu'),
Dropout(0.4),
BatchNormalization(),
Dense(4, kernel_initializer='normal', activation='tanh'),
Dropout(0.3),
BatchNormalization(),
Dense(1, kernel_initializer='normal', activation='sigmoid')
])
#%%
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy', auc])
model.summary()
#%%
model.fit(x_train, y_train, batch_size=500, epochs = 10, validation_data=(x_test, y_test))
#%%
predictions = model.predict(x_test)
#predictions = (predictions > 0.5) * 1
score = f1_score(y_test, predictions)
auc = roc_auc_score(y_test, predictions)
#%%
test_full = pd.read_csv("D:\Kaggle\Santander_classification\Data\\test.csv")
test = test_full.drop(columns = ["ID_code"])
test_features = preprocessing(test)
target = model.predict(test_features)
target = (target > 0.5) * 1
#%%
submission =
|
pd.DataFrame()
|
pandas.DataFrame
|
import string
import pandas as pd
import numpy as np
import doctest
from texthero import preprocessing, stopwords
from . import PandasTestCase
"""
Test doctest
"""
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(preprocessing))
return tests
class TestPreprocessing(PandasTestCase):
"""
Test remove digits.
"""
def test_remove_digits_only_block(self):
s = pd.Series("remove block of digits 1234 h1n1")
s_true = pd.Series("remove block of digits h1n1")
self.assertEqual(preprocessing.remove_digits(s), s_true)
def test_remove_digits_any(self):
s = pd.Series("remove block of digits 1234 h1n1")
s_true = pd.Series("remove block of digits h n ")
self.assertEqual(preprocessing.remove_digits(s, only_blocks=False), s_true)
def test_remove_digits_brackets(self):
s = pd.Series("Digits in bracket (123 $) needs to be cleaned out")
s_true = pd.Series("Digits in bracket ( $) needs to be cleaned out")
self.assertEqual(preprocessing.remove_digits(s), s_true)
def test_remove_digits_start(self):
s = pd.Series("123 starting digits needs to be cleaned out")
s_true = pd.Series(" starting digits needs to be cleaned out")
self.assertEqual(preprocessing.remove_digits(s), s_true)
def test_remove_digits_end(self):
s = pd.Series("end digits needs to be cleaned out 123")
s_true = pd.Series("end digits needs to be cleaned out ")
self.assertEqual(preprocessing.remove_digits(s), s_true)
def test_remove_digits_phone(self):
s = pd.Series("+41 1234 5678")
s_true = pd.Series("+ ")
self.assertEqual(preprocessing.remove_digits(s), s_true)
def test_remove_digits_punctuation(self):
s = pd.Series(string.punctuation)
s_true = pd.Series(string.punctuation)
self.assertEqual(preprocessing.remove_digits(s), s_true)
"""
Test replace digits
"""
def test_replace_digits(self):
s = pd.Series("1234 falcon9")
s_true = pd.Series("X falcon9")
self.assertEqual(preprocessing.replace_digits(s, "X"), s_true)
def test_replace_digits_any(self):
s = pd.Series("1234 falcon9")
s_true = pd.Series("X falconX")
self.assertEqual(
preprocessing.replace_digits(s, "X", only_blocks=False), s_true
)
"""
Remove punctuation.
"""
def test_remove_punctation(self):
s = pd.Series("Remove all! punctuation!! ()")
s_true = pd.Series(
"Remove all punctuation "
) # TODO maybe just remove space?
self.assertEqual(preprocessing.remove_punctuation(s), s_true)
"""
Remove diacritics.
"""
def test_remove_diactitics(self):
s = pd.Series("Montréal, über, 12.89, Mère, Françoise, noël, 889, اِس, اُس")
s_true = pd.Series("Montreal, uber, 12.89, Mere, Francoise, noel, 889, اس, اس")
self.assertEqual(preprocessing.remove_diacritics(s), s_true)
"""
Remove whitespace.
"""
def test_remove_whitespace(self):
s = pd.Series("hello world hello world ")
s_true = pd.Series("hello world hello world")
self.assertEqual(preprocessing.remove_whitespace(s), s_true)
"""
Test pipeline.
"""
def test_pipeline_stopwords(self):
s = pd.Series("E-I-E-I-O\nAnd on")
s_true = pd.Series("e-i-e-i-o\n ")
pipeline = [preprocessing.lowercase, preprocessing.remove_stopwords]
self.assertEqual(preprocessing.clean(s, pipeline=pipeline), s_true)
"""
Test stopwords.
"""
def test_remove_stopwords(self):
text = "i am quite intrigued"
text_default_preprocessed = " quite intrigued"
text_spacy_preprocessed = " intrigued"
text_custom_preprocessed = "i quite "
self.assertEqual(
preprocessing.remove_stopwords(pd.Series(text)),
pd.Series(text_default_preprocessed),
)
self.assertEqual(
preprocessing.remove_stopwords(
pd.Series(text), stopwords=stopwords.SPACY_EN
),
pd.Series(text_spacy_preprocessed),
)
self.assertEqual(
preprocessing.remove_stopwords(
pd.Series(text), stopwords={"am", "intrigued"}
),
pd.Series(text_custom_preprocessed),
)
def test_stopwords_are_set(self):
self.assertEqual(type(stopwords.DEFAULT), set)
self.assertEqual(type(stopwords.NLTK_EN), set)
self.assertEqual(type(stopwords.SPACY_EN), set)
"""
Test remove html tags
"""
def test_remove_html_tags(self):
s = pd.Series("<html>remove <br>html</br> tags<html> ")
s_true = pd.Series("remove html tags ")
self.assertEqual(preprocessing.remove_html_tags(s), s_true)
"""
Text tokenization
"""
def test_tokenize(self):
s = pd.Series("text to tokenize")
s_true = pd.Series([["text", "to", "tokenize"]])
self.assertEqual(preprocessing.tokenize(s), s_true)
def test_tokenize_multirows(self):
s = pd.Series(["first row", "second row"])
s_true =
|
pd.Series([["first", "row"], ["second", "row"]])
|
pandas.Series
|
"""
Comparison between solving with only one initial estimate and taking the best of
6 initial estimates (corresponding to the 6 axis-aligned unit vectors) for u.
"""
from calibration.util import *
from calibration.solver import bf_slsqp, slsqp
import os
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# change working directory to the directory this file is in (for saving data)
os.chdir(os.path.dirname(os.path.abspath(__file__)))
SENSOR_NOISES = [0, 20, 40]
SAMPLES = 100
GEN_DATA = False
if(GEN_DATA):
results = {
"solver_type": [],
"noise": [],
"p_error": [],
"u_error": [],
"a_error": [],
"d_error": []
}
for noise in SENSOR_NOISES:
i = 0
while i < SAMPLES:
p = [np.random.uniform(-100, 100) for _ in range(3)]
u = random_unit_vector()
a = random_unit_vector()
d = np.random.uniform(-200, 200)
obs = gen_observation(p, u, a, d)
if(obs != float('inf') and angle_between(u, a) < 1):
print(i)
x_0 = obs[1]
tfs = generate_motions(p, u, a, d, x_0, [[-1000, 1000]]*3, radius=2000, n=32)
tfd_ps = [from_hom(tf @ to_hom(p)) for tf in tfs]
tfd_us = [from_hom(tf @ np.append(u, [0])) for tf in tfs]
measurements = [gen_observation(tfd_p, tfd_u, a, d)[0] for tfd_p, tfd_u in zip(tfd_ps, tfd_us)]
measurements = [m + np.random.normal(0, noise) for m in measurements]
soln, loss = slsqp(
tfs,
measurements,
a_est=[0, 0, 1],
d_est=0,
p_est=[0, 0, 0],
u_est=[0, 0, -1],
p_bounds=[
[-100, 100],
[-100, 100],
[-100, 100]
],
d_bounds = [-200, 200]
)
results["solver_type"].append("One")
results["noise"].append(noise)
results["p_error"].append(np.linalg.norm(np.array(p) - np.array(soln[0]))) #TODO check
results["u_error"].append(angle_between(u, soln[1]))
results["a_error"].append(angle_between(a, soln[2]))
results["d_error"].append(np.abs(d - soln[3]))
soln, loss = bf_slsqp(
tfs,
measurements,
p_bounds=[
[-100, 100],
[-100, 100],
[-100, 100]
],
d_bounds = [-200, 200]
)
results["solver_type"].append("Best of 6")
results["noise"].append(noise)
results["p_error"].append(np.linalg.norm(np.array(p) - np.array(soln[0]))) #TODO check
results["u_error"].append(angle_between(u, soln[1]))
results["a_error"].append(angle_between(a, soln[2]))
results["d_error"].append(np.abs(d - soln[3]))
i+=1
results = pd.DataFrame(results)
results.to_csv('data/simulated/initial_est_test.csv')
else:
results =
|
pd.read_csv("data/simulated/initial_est_test.csv")
|
pandas.read_csv
|
import math
from abc import ABC
from typing import Optional, Iterable
import pandas as pd
from django.db import connection
from pandas import DataFrame
from recipe_db.analytics import METRIC_PRECISION, POPULARITY_START_MONTH, POPULARITY_CUT_OFF_DATE
from recipe_db.analytics.scope import RecipeScope, StyleProjection, YeastProjection, HopProjection, \
FermentableProjection
from recipe_db.analytics.utils import remove_outliers, get_style_names_dict, get_hop_names_dict, get_yeast_names_dict, \
get_fermentable_names_dict, RollingAverage, Trending, months_ago
from recipe_db.models import Recipe
class RecipeLevelAnalysis(ABC):
def __init__(self, scope: RecipeScope) -> None:
self.scope = scope
class RecipesListAnalysis(RecipeLevelAnalysis):
def random(self, num_recipes: int) -> Iterable[Recipe]:
scope_filter = self.scope.get_filter()
query = '''
SELECT r.uid AS recipe_id
FROM recipe_db_recipe AS r
WHERE r.name IS NOT NULL {}
ORDER BY random()
LIMIT %s
'''.format(scope_filter.where)
df = pd.read_sql(query, connection, params=scope_filter.parameters + [num_recipes])
recipe_ids = df['recipe_id'].values.tolist()
if len(recipe_ids) == 0:
return []
return Recipe.objects.filter(uid__in=recipe_ids).order_by('name')
class RecipesCountAnalysis(RecipeLevelAnalysis):
def total(self) -> int:
scope_filter = self.scope.get_filter()
query = '''
SELECT
count(r.uid) AS total_recipes
FROM recipe_db_recipe AS r
WHERE
created IS NOT NULL
{}
'''.format(scope_filter.where)
df = pd.read_sql(query, connection, params=scope_filter.parameters)
if len(df) == 0:
return 0
return df['total_recipes'].values.tolist()[0]
def per_day(self) -> DataFrame:
scope_filter = self.scope.get_filter()
query = '''
SELECT
date(r.created) AS day,
count(r.uid) AS total_recipes
FROM recipe_db_recipe AS r
WHERE
created IS NOT NULL
{}
GROUP BY date(r.created)
'''.format(scope_filter.where)
df = pd.read_sql(query, connection, params=scope_filter.parameters)
df = df.set_index('day')
return df
def per_month(self) -> DataFrame:
scope_filter = self.scope.get_filter()
query = '''
SELECT
date(r.created, 'start of month') AS month,
count(r.uid) AS total_recipes
FROM recipe_db_recipe AS r
WHERE
created IS NOT NULL
{}
GROUP BY date(r.created, 'start of month')
ORDER BY month ASC
'''.format(scope_filter.where)
df = pd.read_sql(query, connection, params=scope_filter.parameters)
df = df.set_index('month')
return df
def per_style(self) -> DataFrame:
scope_filter = self.scope.get_filter()
query = '''
SELECT
ras.style_id,
count(DISTINCT r.uid) AS total_recipes
FROM recipe_db_recipe AS r
JOIN recipe_db_recipe_associated_styles ras
ON r.uid = ras.recipe_id
WHERE
1 {}
GROUP BY ras.style_id
ORDER BY ras.style_id ASC
'''.format(scope_filter.where)
df =
|
pd.read_sql(query, connection, params=scope_filter.parameters)
|
pandas.read_sql
|
import pandas as pd
import os
import sys
# Header
name = "Topsis-Harmanjit-101903287"
__version__ = "0.0.1"
__author__ = '<NAME>'
__credits__ = 'Thapar Institute of Engineering and Technology'
# Code
def main():
# Checking for command line arguments
if len(sys.argv) != 5:
print("ERROR : NUMBER OF PARAMETERS")
print("USAGE EXAMPLE : python 101903287.py <input_file.csv> 1,1,1,1 +,+,-,+ <result_file.csv> ")
exit(1)
# Checking for input file in directory
elif not os.path.isfile(sys.argv[1]):
print(f"ERROR : {sys.argv[1]} Don't exist!!")
exit(1)
# Checking for input file formats
elif ".csv" != (os.path.splitext(sys.argv[1]))[1]:
print(f"ERROR : {sys.argv[1]} is not csv!")
exit(1)
# Checking for output file formats
elif (".csv" != (os.path.splitext(sys.argv[4]))[1]):
print("ERROR : Output file extension is wrong")
exit(1)
# Function Code
else:
df = pd.read_csv(sys.argv[1])
col = len(df.columns.values)
# Checking for columns
if col < 3:
print("ERROR : Input file have less than 3 columns")
exit(1)
# Handling errors of weighted and impact arrays
try:
weights = [int(i) for i in sys.argv[2].split(',')]
except:
print("ERROR : In weights array please check again")
exit(1)
impact = sys.argv[3].split(',')
for i in impact:
if not (i == '+' or i == '-'):
print("ERROR : In impact array please check again")
exit(1)
# Checking number of column,weights and impacts is same or not
if col != len(weights)+1 or col != len(impact)+1:
print(
"ERROR : Number of weights, number of impacts and number of columns not same")
exit(1)
# Handeling non-numeric data and filling non-numeric data with mean
for i in range(1, col):
|
pd.to_numeric(df.iloc[:, i], errors='coerce')
|
pandas.to_numeric
|
'''
prep.py : reads and prepares raster files for time series feature extraction
authors: m.mann & a.bedada
'''
import numpy as np
import glob
import os.path
import pandas as pd
import geopandas as gpd
import rasterio
from rasterio import features
import gdal
from re import sub
from pathlib import Path
def set_df_mindex(df):
'''
Returns dataframe with pixel_id and time index
'''
df.set_index(['pixel_id', 'time'], inplace=True)
return df
def set_df_index(df):
df.set_index(['pixel_id'], inplace=True)
return df
def reset_df_index(df):
df.reset_index(inplace=True)
return df
def set_common_index(a, b):
a = reset_df_index(if_series_to_df(a))
b = reset_df_index(if_series_to_df(b))
index_value = a.columns.intersection(b.columns) \
.intersection(['pixel_id','time']).tolist()
a.set_index(index_value, inplace=True)
b.set_index(index_value, inplace=True)
return a, b
def read_my_df(path):
my_df = pd.read_csv(os.path.join(path,'my_df.csv'))
my_df = set_df_mindex(my_df) #sort
# add columns needed for tsfresh
my_df = reset_df_index(my_df)
return(my_df)
def path_to_var(path):
'''
Returns variable name from path to folder of tifs
'''
return([sub(r'[^a-zA-Z ]+', '', os.path.basename(x).split('.')[0]) for x in
glob.glob("{}/**/*.tif".format(path), recursive=True) ][0])
def image_names(path):
'''
Reads raster files from multiple folders and returns their names
:param path: directory path
:return: names of the raster files
'''
images = glob.glob("{}/**/*.tif".format(path), recursive=True)
image_name = [os.path.basename(tif).split('.')[0]
for tif in images]
# handle single tif case
if len(image_name) == 0:
image_name = [os.path.basename(path).split('.')[0]]
return image_name
def read_images(path):
'''
Reads a set of associated raster bands from a file.
Can read one or multiple files stored in different folders.
:param path: file name or directory path
:return: raster files opened as GDALDataset
'''
if os.path.isdir(path):
images = glob.glob("{}/**/*.tif".format(path), recursive=True)
raster_files = [gdal.Open(f, gdal.GA_ReadOnly) for f in images]
else:
raster_files = [gdal.Open(path, gdal.GA_ReadOnly)]
return raster_files
def image_to_array(path):
'''
Converts images inside multiple folders to stacked array
:param path: directory path
:return: stacked numpy array
'''
raster_array = np.stack([raster.ReadAsArray()
for raster in read_images(path)],
axis=-1)
return raster_array
def image_to_series(path):
'''
Converts images to one dimensional array with axis labels
:param path: directory path
:return: pandas series
'''
data = image_to_array(path)
rows, cols, num = data.shape
data = data.reshape(rows*cols, num)
# create index
index = pd.RangeIndex(start=0, stop=len(data), step=1, name = 'pixel_id')
# create wide df with images as columns
df = pd.DataFrame(data=data[0:,0:],
index=index,
dtype=np.float32,
columns=image_names(path))
#reindex and sort columns
df2 = df.reindex(sorted(df.columns), axis=1)
# stack columns as 1d array
df2 = df2.stack().reset_index()
# create a time series column
df2['time'] = df2['level_1'].str.split('[- _]').str[1]
df2['kind'] = df2['level_1'].str.split('[- _]').str[0]
# set multiindex
df2.set_index(['pixel_id', 'time'], inplace=True)
#rename all columns
df2.columns =[ 'level_1', 'value', 'kind']
df2.drop(['level_1'], axis=1, inplace = True)
# add columns needed for tsfresh
df2.reset_index(inplace=True, level=['pixel_id','time'])
# df2['pixel_id'] = df2.index.get_level_values('pixel_id')
# df2['time'] = df2.index.get_level_values('time')
return df2
def image_to_series_simple(file,dtype = np.int8):
'''
Reads and prepares single raster file
:param file: raster file name
:param dtype: numpy data type to return (default:np.int8)
:return: One-dimensional ndarray with axis
'''
# read image as array and reshape its dimension
rows, cols, num = image_to_array(file).shape
data = image_to_array(file).reshape(rows * cols)
# create an index for each pixel
index = pd.RangeIndex(start=0, stop=len(data), step=1, name = 'pixel_id')
# convert N-dimension array to one dimension array
df = pd.Series(data = data,
index = index,
dtype = dtype,
name = 'value')
return df
def poly_rasterizer(poly,raster_ex, raster_path_prefix, buffer_poly_cells=0):
'''
Rasterizes polygons by assigning a value 1.
It can also add a buffer at a distance that is multiples of the example raster resolution
:param poly: polygon to to convert to raster
:param raster_ex: example tiff
:param raster_path_prefix: directory path to the output file example: 'F:/Boundary/StatePoly_buf'
:param buffer_poly_cells: buffer size in cell count example: 1 = buffer by one cell
:return: a GeoTiff raster
'''
# check if polygon is already geopandas dataframe if so, don't read again
if ('poly' in locals()):
if not(isinstance(poly, gpd.geodataframe.GeoDataFrame)):
poly = gpd.read_file(poly)
else:
poly = poly
# create column of ones to rasterize for presence (1)
poly['ONES'] = 1
# get example metadata
with rasterio.open(raster_ex) as src:
array = src.read()
profile = src.profile
profile.update(dtype=rasterio.float32, count=1, compress='lzw',nodata=0)
out_arr = src.read(1) # get data from first band, this gets updated in write
out_arr.fill(0) #set all values of raster to zero
# reproject polygon to match crs of raster
poly = poly.to_crs(src.crs)
# buffer polygon to avoid edge effects
if buffer_poly_cells != 0:
poly['geometry'] = poly.buffer(buffer_poly_cells*src.res[0] ) # this creates an empty polygon geoseries
# Write to tif, using the same profile as the source
with rasterio.open(raster_path_prefix+'.tif', 'w', **profile) as dst:
# generator of geom, value pairs to use in rasterizing
shapes = ((geom,value) for geom, value in zip(poly.geometry, poly.ONES))
#rasterize shapes
burned_value = features.rasterize(shapes=shapes, fill=0, out=out_arr, transform=dst.transform)
dst.write(burned_value,1)
def poly_rasterizer_year_group(poly,raster_exmpl,raster_path_prefix,
year_col_name='YEAR_',year_sub_list=range(1980,1990)):
'''
Rasterizes polygons by assigning a value 1 to pixel. Utilizes year column to create
an aggregated polygon across multiple year groups.
:param poly: polygon to to convert to raster
:param raster_ex: example tiff to base output on
:param raster_path_prefix: directory path to the output file example: 'F:/Boundary/StatePoly_buf'
:param year_col_name: column storing year to compare year_sub_list to
:param year_sub_list: an int year, range(), or list of start end dates [1951, 1955]
:return: a GeoTiff raster
'''
# year or year groups must be forced into a list or range
if type(year_sub_list)==int:
year_sub_list = [year_sub_list]
elif type(year_sub_list) == range:
year_sub_list = year_sub_list
elif type(year_sub_list) == list:
# convert to range so all years are rasterized
year_sub_list = range(year_sub_list[0],year_sub_list[1]+1)
# check if polygon is already geopandas dataframe if so, don't read again
if not('polys' in locals()):
polys = gpd.read_file(poly)
if ('polys' in locals()):
if not(isinstance(polys, gpd.geodataframe.GeoDataFrame)):
polys = gpd.read_file(poly)
else:
polys = poly
# subset to year and convert to integer
polys = polys[polys.loc[:,year_col_name].isin( [str(i) for i in year_sub_list] )]
# create column of ones to rasterize for presence (1) of fire
polys['ONES'] = 1
# get example metadata
with rasterio.open(raster_exmpl) as src:
array = src.read()
profile = src.profile
profile.update(dtype=rasterio.float32, count=1, compress='lzw',nodata=0)
out_arr = src.read(1) # get data from first band, this gets updated in write
# Write to tif, using the same profile as the source
with rasterio.open(raster_path_prefix+str(year_sub_list[0])+'_'+str(year_sub_list[-1])+'.tif', 'w', **profile) as dst:
# generator of geom, value pairs to use in rasterizing
shapes = ((geom,value) for geom, value in zip(polys.geometry, polys.ONES))
#rasterize shapes
rasterized_value = features.rasterize(shapes=shapes, fill=0, out=out_arr, transform=dst.transform)
dst.write(rasterized_value,1)
def poly_to_series(poly,raster_ex, field_name, nodata=-9999, plot_output=True):
'''
Rasterizes polygons by assigning a value 1.
It can also add a buffer at a distance that is multiples of the example raster resolution
:param poly: polygon to to convert to raster
:param raster_ex: example tiff
:param raster_path_prefix: directory path to the output file example: 'F:/Boundary/StatePoly_buf'
:param nodata: (int or float, optional) – Used as fill value for all areas not covered by input geometries.
:param nodata: (True False, optional) – Plot rasterized polygon data?
:return: a pandas dataframe with a named column of rasterized data
'''
# check if polygon is already geopandas dataframe if so, don't read again
if ('poly' in locals()):
if not(isinstance(poly, gpd.geodataframe.GeoDataFrame)):
poly = gpd.read_file(poly)
else:
poly = poly
# get example metadata
with rasterio.open(raster_ex) as src:
array = src.read()
profile = src.profile
profile.update(dtype=rasterio.float32, count=1, compress='lzw',nodata=nodata)
out_arr = src.read(1) # get data from first band, this gets updated in write
out_arr.fill(nodata) #set all values of raster to missing data value
# reproject polygon to match crs of raster
poly = poly.to_crs(src.crs)
# generator of geom, value pairs to use in rasterizing
shapes = ((geom,value) for geom, value in zip(poly.geometry, poly[field_name]))
#rasterize shapes
burned_value = features.rasterize(shapes=shapes, fill=nodata, out=out_arr, transform=src.transform)
if plot_output == True:
import matplotlib.pyplot as plt
plt_burned_value = burned_value.copy()
plt_burned_value[plt_burned_value==nodata] = np.NaN
plt.imshow(plt_burned_value)
plt.set_cmap("Reds")
plt.colorbar( )
plt.show()
# convert to array
rows, cols = burned_value.shape
data = burned_value.reshape(rows*cols, 1)
# create index
index = pd.RangeIndex(start=0, stop=len(data), step=1, name='pixel_id')
# create wide df with images as columns
df = pd.DataFrame(data=data[:,:],
index=index,
dtype=np.float32,
columns=[field_name])
return df
def mask_df(raster_mask, original_df, missing_value = -9999, reset_index = True):
'''
Reads in raster mask and subsets dataframe by mask index
:param raster_mask: tif containing (0,1) mask where 1's are retained
:param original_df: a path to a pandas dataframe, a series to mask, or a list of 2 dfs
:param missing_value: additional missing values to be masked out
:param reset_index: if true, any df index will be reset (added as columns to df)
:return: masked df
'''
# convert mask to pandas series keep only cells with value 1
index_mask = image_to_series_simple(raster_mask)
index_mask = index_mask[index_mask == 1]
# if original_df is list concatenate by index
if type(original_df) == list:
list_flag = True
first_df_shape = if_series_to_df(original_df[0]).shape
try:
original_df = pd.concat(original_df,
axis=1,
ignore_index=False)
except:
print('time index missing in one element, merging list elements using only pixel_id index')
original_df = [set_df_index(reset_df_index(if_series_to_df(df))) for df in original_df]
original_df = pd.concat(original_df,
axis=1,
ignore_index=False)
original_df = reset_df_index(original_df)
else:
list_flag = False
# check if polygon is already geopandas dataframe if so, don't read again
if not(isinstance(original_df, pd.core.series.Series)) and \
not(isinstance(original_df, pd.core.frame.DataFrame)):
original_df = read_my_df(original_df)
# limit to matching pixels in index from index_mask
try:
original_df = original_df.iloc[original_df.index.get_level_values('pixel_id').isin(index_mask.index)]
except KeyError:
# set multiindex
original_df.set_index(['pixel_id', 'time'], inplace=True)
original_df = original_df.iloc[original_df.index.get_level_values('pixel_id').isin(index_mask.index)]
# remove any more missing values
if missing_value != None:
# inserts nan in missing value locations
try:
original_df = original_df[original_df.iloc[:,:] != missing_value]
except:
original_df = original_df[original_df.iloc[:] != missing_value]
original_df.dropna(inplace=True)
if list_flag == True:
# split back out list elements
a , b = original_df.iloc[:,range(first_df_shape[1])], original_df.iloc[:,first_df_shape[1]:]
# reset index as columns
if reset_index == True:
a = reset_df_index(if_series_to_df(a))
b = reset_df_index(if_series_to_df(b))
return a , b
else:
# reset index as columns
if reset_index == True:
original_df = reset_df_index(if_series_to_df(original_df))
return original_df
def unmask_df(original_df, mask_df_output):
'''
Unmasks a dataframe with the raster file used for masking
:param original_df: a data frame with the correct unmasked index values
:param mask_df_output: a path to a pandas dataframe or series to mask
:return: unmasked output
'''
# check if df is already dataframe if so, don't read again
if not(isinstance(original_df, pd.core.series.Series)) and \
not(isinstance(original_df, pd.core.frame.DataFrame)):
original_df = read_my_df(original_df)
else:
original_df = original_df
# cover series to dataframes
original_df = if_series_to_df(original_df)
mask_df_output = if_series_to_df(mask_df_output)
# find common index and set
original_df, mask_df_output = set_common_index(a = original_df,
b = mask_df_output)
# limit original_df to col # of mask_df and change names to match
original_df = original_df.iloc[:,:mask_df_output.shape[1]]
original_df.columns = mask_df_output.columns
original_df['value'] = -9999
try:
# replace values based on masked values, iterate through kind if multiple features
for knd in mask_df_output['kind'].unique():
original_df.update(mask_df_output[mask_df_output['kind']==knd])
except:
# replace values based on masked values for non long form data types
original_df.update(mask_df_output)
return original_df
def unmask_from_mask(mask_df_output, raster_mask, missing_value = -9999):
'''
Unmasks a multiindex dataframe with the raster file used for masking
:param mask_df_output: a path to a pandas dataframe or series to mask with matching (multi)index values
:param raster_mask: path to a rask max where 0 values are treated as missing
:param missing_value: value assigned to missing values generally used for writing raster tifs
:return: unmasked output
'''
# set up df with correct index to unmask to
unmask_df = if_series_to_df(image_to_series_simple(raster_mask,dtype = np.float32))
unmask_df[unmask_df.value==0] = missing_value
unmask_df.reset_index(inplace=True)
time_index = mask_df_output.reset_index().time.unique()[0]
unmask_df['time'] = time_index
unmask_df = set_df_mindex(unmask_df)
# add placeholders for unmasked values
for name in mask_df_output.columns:
unmask_df[name] = unmask_df['value']
unmask_df.drop(columns=['value'],inplace=True)
try:
# replace values based on masked values, iterate through kind if multiple features
for knd in mask_df_output['kind'].unique():
unmask_df.update(mask_df_output[mask_df_output['kind']==knd])
except:
# replace values based on masked values for non long form data types
unmask_df.update(mask_df_output)
return unmask_df
def check_mask(raster_mask, raster_input_ex):
'''
Checks that mask and input rasters have identical properties
:param raster_mask: full path and prefix for raster name
:param raster_input_ex: int specifying number of cells to buffer polygon with, 0 for no buffer
:return: raster
'''
mask_list = []
ex_list = []
test_list = ['Mask','Resolution','Bounds','Shape']
with rasterio.open(raster_mask) as mask:
mask_list = [mask.crs,mask.res,mask.bounds,mask.shape]
with rasterio.open(raster_input_ex) as ex:
ex_list = [ex.crs,ex.res,ex.bounds, ex.shape]
for i in range(0,len(mask_list)):
if mask_list[i] == ex_list[i]:
print(test_list[i]+": passed")
else:
print(test_list[i]+": FAILED")
# close rasters
mask.close()
ex.close()
def combine_extracted_features(path, write_out=True,index_col=0):
'''
Combines multiple extracted_features.csv files and assigns year prefix
based on subfolder names.
Folder structure assumed as follows:
Precip>
monthly1990-1995>
extracted_features.csv
extracted_features.tif
monthly1996-2000>
extracted_features.csv
extracted_features.tif
:param path: path to parent directory holding folders containing extracted features. (Example: Test)
:param write_out: Should combined df be written to csv
:param index_col: position of index in extracted_features.csv to be combined (default: 0, otherwise use None)
:return: merged df containing all extracted_features.csv data with assigned year prefix
'''
# get paths of all extracted_features.csv files
all_files = [os.path.join(root, name)
for root, dirs, files in os.walk(path)
for name in files
if name.endswith(( "features.csv"))]
# extract numeric values from parent folder name
parent_folder_years = [sub(r'\D', "", parent_folder) for parent_folder in all_files]
print('Combining folder year names',parent_folder_years)
# data read generator add year prefix to all column names REMOVE?
df_from_each_file = (pd.read_csv(all_files[i],index_col= index_col )\
.drop(['time'],errors='ignore', axis=1)\
.add_suffix('-'+parent_folder_years[i]) \
for i in range(len(all_files)))
# create joined df with all extraced_features data
concatenated_df = pd.concat(df_from_each_file,
axis=1,
ignore_index=False)
# set index to match others
concatenated_df.index.names = ['pixel_id']
# deal with output location
out_path = Path(path).parent.joinpath(Path(path).stem+"_features")
out_path.mkdir(parents=True, exist_ok=True)
# write combined extracted features data
if write_out == True:
concatenated_df.to_csv(os.path.join(out_path,'combined_extracted_features_df.csv'), chunksize=50000, index=False)
return(concatenated_df)
def combine_target_rasters(path, target_file_prefix, dep_var_name ='Y',write_out=True):
'''
Combines multiple extracted_features.csv files and assigns year prefix
based on subfolder names.
Folder structure assumed as follows:
Path>
target_2000-2005.tif
target_2006-2010.tif
target_2011-2016.tif
:param path: path to parent directory holding folders containing extracted features. (Example: Test)
:param target_file_prefix: prefix to search for in path (ex above: "target_")
:param dep_var_name: column name to assign (default: "Y")
:param write_out: Should combined df be written to csv
:return: merged df containing all extracted_features.csv data with assigned year prefix
'''
targets = glob.glob(("{}/**/"+target_file_prefix+"*.tif").format(path), recursive=True)
targets_years = [sub(r'\D', "", i) for i in targets]
# rename columns with Y- prefix
series_from_each_file = [ image_to_series_simple(targets[i]).rename('Y-'+targets_years[i])
for i in range(len(targets_years))]
# create joined df with all target data
concatenated_df = pd.concat(series_from_each_file,
axis=1,
ignore_index=False)
# deal with output location
out_path = Path(path).parent.joinpath(Path(path).stem+"_target")
out_path.mkdir(parents=True, exist_ok=True)
# write combined extracted features data
if write_out == True:
print('writing file to ',out_path)
concatenated_df.to_csv(os.path.join(out_path,'combined_target_df.csv'), chunksize=50000, index=False)
return(concatenated_df)
def wide_to_long_target_features(target,features,sep='-'):
'''
Reads in target and feature data in wide format and returns long format
:param target: target (Y) data wide format multiple years
:param features: attribute (X) data wide format multiple years
:return: target, attribute both in long format
'''
# get variables to convert to long by removing dates at end of name
target_stubs = list(set([sub(sep+r'\d+', "", i) for i in target.columns if i !='pixel_id' ]))
features_stubs = list(set([sub(sep+r'\d+', "", i) for i in features.columns if i !='pixel_id' ]))
target['pixel_id'] = target.index
features['pixel_id'] = features.index
target_ln =
|
pd.wide_to_long(target,i='pixel_id',j="time", stubnames = target_stubs, sep=sep)
|
pandas.wide_to_long
|
import json
from typing import Tuple, Union
import pandas as pd
import numpy as np
import re
import os
from tableone import TableOne
from collections import defaultdict
from io import StringIO
from .gene_patterns import *
import plotly.express as px
import pypeta
from pypeta import Peta
from pypeta import filter_description
class SampleIdError(RuntimeError):
def __init__(self, sample_id: str, message: str):
self.sample_id = sample_id
self.message = message
class NotNumericSeriesError(RuntimeError):
def __init__(self, message: str):
self.message = message
class UnknowSelectionTypeError(RuntimeError):
def __init__(self, message: str):
self.message = message
class NotInColumnError(RuntimeError):
def __init__(self, message: str):
self.message = message
class GenesRelationError(RuntimeError):
def __init__(self, message: str):
self.message = message
class VariantUndefinedError(RuntimeError):
def __init__(self, message: str):
self.message = message
class ListsUnEqualLengthError(RuntimeError):
def __init__(self, message: str):
self.message = message
class DatetimeFormatError(RuntimeError):
def __init__(self, message: str):
self.message = message
class CDx_Data():
"""[summary]
"""
def __init__(self,
mut_df: pd.DataFrame = None,
cli_df: pd.DataFrame = None,
cnv_df: pd.DataFrame = None,
sv_df: pd.DataFrame = None,
json_str: str = None):
"""Constructor method with DataFrames
Args:
mut_df (pd.DataFrame, optional): SNV and InDel info. Defaults to None.
cli_df (pd.DataFrame, optional): Clinical info. Defaults to None.
cnv_df (pd.DataFrame, optional): CNV info. Defaults to None.
sv_df (pd.DataFrame, optional): SV info. Defaults to None.
"""
self.json_str = json_str
self.mut = mut_df
self.cnv = cnv_df
self.sv = sv_df
if not cli_df is None:
self.cli = cli_df
self.cli = self._infer_datetime_columns()
else:
self._set_cli()
self.crosstab = self.get_crosstab()
def __len__(self):
return 0 if self.cli is None else len(self.cli)
def __getitem__(self, n):
return self.select_by_sample_ids([self.cli.sampleId.iloc[n]])
def __sub__(self, cdx):
if self.cli is None and cdx.cli is None:
return CDx_Data()
cli = None if self.cli is None and cdx.cli is None else pd.concat(
[self.cli, cdx.cli]).drop_duplicates(keep=False)
mut = None if self.mut is None and cdx.mut is None else pd.concat(
[self.mut, cdx.mut]).drop_duplicates(keep=False)
cnv = None if self.cnv is None and cdx.cnv is None else pd.concat(
[self.cnv, cdx.cnv]).drop_duplicates(keep=False)
sv = None if self.sv is None and cdx.sv is None else pd.concat(
[self.sv, cdx.sv]).drop_duplicates(keep=False)
return CDx_Data(cli_df=cli, mut_df=mut, cnv_df=cnv, sv_df=sv)
def __add__(self, cdx):
if self.cli is None and cdx.cli is None:
return CDx_Data()
cli = pd.concat([self.cli, cdx.cli]).drop_duplicates()
mut = pd.concat([self.mut, cdx.mut]).drop_duplicates()
cnv =
|
pd.concat([self.cnv, cdx.cnv])
|
pandas.concat
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
from typing import Dict, List, Tuple
import numpy as np
import pandas as pd
import torch
from caffe2.python import workspace
from ml.rl.caffe_utils import C2, StackedAssociativeArray
from ml.rl.preprocessing import normalization
from ml.rl.preprocessing.normalization import MISSING_VALUE
logger = logging.getLogger(__name__)
class SparseToDenseProcessor:
def __init__(
self, sorted_features: List[int], set_missing_value_to_zero: bool = False
):
self.sorted_features = sorted_features
self.set_missing_value_to_zero = set_missing_value_to_zero
def __call__(self, sparse_data):
return self.process(sparse_data)
class Caffe2SparseToDenseProcessor(SparseToDenseProcessor):
def __init__(
self, sorted_features: List[int], set_missing_value_to_zero: bool = False
):
super().__init__(sorted_features, set_missing_value_to_zero)
def process(
self, sparse_data: StackedAssociativeArray
) -> Tuple[str, str, List[str]]:
lengths_blob = sparse_data.lengths
keys_blob = sparse_data.keys
values_blob = sparse_data.values
MISSING_SCALAR = C2.NextBlob("MISSING_SCALAR")
missing_value = 0.0 if self.set_missing_value_to_zero else MISSING_VALUE
workspace.FeedBlob(MISSING_SCALAR, np.array([missing_value], dtype=np.float32))
C2.net().GivenTensorFill([], [MISSING_SCALAR], shape=[], values=[missing_value])
parameters: List[str] = [MISSING_SCALAR]
assert len(self.sorted_features) > 0, "Sorted features is empty"
dense_input = C2.NextBlob("dense_input")
dense_input_presence = C2.NextBlob("dense_input_presence")
C2.net().SparseToDenseMask(
[keys_blob, values_blob, MISSING_SCALAR, lengths_blob],
[dense_input, dense_input_presence],
mask=self.sorted_features,
return_presence_mask=True,
)
if self.set_missing_value_to_zero:
dense_input_presence = C2.And(
C2.GT(dense_input, -1e-4, broadcast=1),
C2.LT(dense_input, 1e-4, broadcast=1),
)
return dense_input, dense_input_presence, parameters
class PandasSparseToDenseProcessor(SparseToDenseProcessor):
def __init__(
self, sorted_features: List[int], set_missing_value_to_zero: bool = False
):
super().__init__(sorted_features, set_missing_value_to_zero)
def process(self, sparse_data) -> Tuple[torch.Tensor, torch.Tensor]:
missing_value = normalization.MISSING_VALUE
if self.set_missing_value_to_zero:
missing_value = 0.0
state_features_df =
|
pd.DataFrame(sparse_data)
|
pandas.DataFrame
|
import logging
from operator import itemgetter
from logging.config import dictConfig
from datetime import datetime, timedelta, date
from math import ceil
import dash
import dash_table
from dash_table.Format import Format, Scheme
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
import plotly.express as px
import pandas as pd
from chinese_calendar import get_holidays
import plotly.graph_objects as go
import numpy as np
from keysersoze.models import (
Deal,
Asset,
AssetMarketHistory,
)
from keysersoze.utils import (
get_accounts_history,
get_accounts_summary,
)
from keysersoze.apps.app import APP
from keysersoze.apps.utils import make_card_component
LOGGER = logging.getLogger(__name__)
dictConfig({
'version': 1,
'formatters': {
'simple': {
'format': '%(asctime)s - %(filename)s:%(lineno)s: %(message)s',
}
},
'handlers': {
'default': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple',
"stream": "ext://sys.stdout",
},
},
'loggers': {
'__main__': {
'handlers': ['default'],
'level': 'DEBUG',
'propagate': True
},
'keysersoze': {
'handlers': ['default'],
'level': 'DEBUG',
'propagate': True
}
}
})
pd.options.mode.chained_assignment = 'raise'
COLUMN_MAPPINGS = {
'code': '代码',
'name': '名称',
'ratio': '占比',
'return_rate': '收益率',
'cost': '投入',
'avg_cost': '成本',
'price': '价格',
'price_date': '价格日期',
'amount': '份额',
'money': '金额',
'return': '收益',
'action': '操作',
'account': '账户',
'date': '日期',
'time': '时间',
'fee': '费用',
'position': '仓位',
'day_return': '日收益',
}
FORMATS = {
'价格日期': {'type': 'datetime', 'format': Format(nully='N/A')},
'日期': {'type': 'datetime', 'format': Format(nully='N/A')},
'时间': {'type': 'datetime', 'format': Format(nully='N/A')},
'占比': {'type': 'numeric', 'format': Format(scheme='%', precision=2)},
'收益率': {'type': 'numeric', 'format': Format(nully='N/A', scheme='%', precision=2)},
'份额': {'type': 'numeric', 'format': Format(nully='N/A', precision=2, scheme=Scheme.fixed)},
'金额': {'type': 'numeric', 'format': Format(nully='N/A', precision=2, scheme=Scheme.fixed)},
'费用': {'type': 'numeric', 'format': Format(nully='N/A', precision=2, scheme=Scheme.fixed)},
'投入': {'type': 'numeric', 'format': Format(nully='N/A', precision=2, scheme=Scheme.fixed)},
'成本': {'type': 'numeric', 'format': Format(nully='N/A', precision=4, scheme=Scheme.fixed)},
'价格': {'type': 'numeric', 'format': Format(nully='N/A', precision=4, scheme=Scheme.fixed)},
'收益': {'type': 'numeric', 'format': Format(nully='N/A', precision=2, scheme=Scheme.fixed)},
}
ACCOUNT_PRIORITIES = {
'长期投资': 0,
'长赢定投': 1,
'U定投': 2,
'投资实证': 3,
'稳健投资': 4,
'证券账户': 6,
'蛋卷基金': 7,
}
all_accounts = [deal.account for deal in Deal.select(Deal.account).distinct()]
all_accounts.sort(key=lambda name: ACCOUNT_PRIORITIES.get(name, 1000))
layout = html.Div(
[
dcc.Store(id='assets'),
dcc.Store(id='stats'),
dcc.Store(id='accounts_history'),
dcc.Store(id='index_history'),
dcc.Store(id='deals'),
dcc.Store(id='start-date'),
dcc.Store(id='end-date'),
html.H3('投资账户概览'),
dbc.Checklist(
id='show-money',
options=[{'label': '显示金额', 'value': 'show'}],
value=[],
switch=True,
),
html.Hr(),
dbc.InputGroup(
[
dbc.InputGroupAddon('选择账户', addon_type='prepend', className='mr-2'),
dbc.Checklist(
id='checklist',
options=[{'label': a, 'value': a} for a in all_accounts],
value=[all_accounts[0]],
inline=True,
className='my-auto'
),
],
className='my-2',
),
html.Div(id='account-summary'),
html.Br(),
dbc.Tabs([
dbc.Tab(
label='资产走势',
children=[
dcc.Graph(
id='asset-history-chart',
config={
'displayModeBar': False,
}
),
]
),
dbc.Tab(
label='累计收益走势',
children=[
dcc.Graph(
id="total-return-chart",
config={
'displayModeBar': False
}
),
]
),
dbc.Tab(
label='累计收益率走势',
children=[
dbc.InputGroup(
[
dbc.InputGroupAddon('比较基准', addon_type='prepend', className='mr-2'),
dbc.Checklist(
id='compare',
options=[
{'label': '中证全指', 'value': '000985.CSI'},
{'label': '上证指数', 'value': '000001.SH'},
{'label': '深证成指', 'value': '399001.SZ'},
{'label': '沪深300', 'value': '000300.SH'},
{'label': '中证500', 'value': '000905.SH'},
],
value=['000985.CSI'],
inline=True,
className='my-auto'
),
],
className='my-2',
),
dcc.Graph(
id="return-curve-chart",
config={
'displayModeBar': False
}
),
]
),
dbc.Tab(
label='日收益历史',
children=[
dcc.Graph(
id="day-return-chart",
config={
'displayModeBar': False
},
),
]
),
]),
html.Center(
[
dbc.RadioItems(
id="date-range",
className='btn-group',
labelClassName='btn btn-light border',
labelCheckedClassName='active',
options=[
{"label": "近一月", "value": "1m"},
{"label": "近三月", "value": "3m"},
{"label": "近半年", "value": "6m"},
{"label": "近一年", "value": "12m"},
{"label": "今年以来", "value": "thisyear"},
{"label": "本月", "value": "thismonth"},
{"label": "本周", "value": "thisweek"},
{"label": "所有", "value": "all"},
{"label": "自定义", "value": "customized"},
],
value="thisyear",
),
],
className='radio-group',
),
html.Div(
id='customized-date-range-container',
children=[
dcc.RangeSlider(
id='customized-date-range',
min=2018,
max=2022,
step=None,
marks={year: str(year) for year in range(2018, 2023)},
value=[2018, 2022],
)
],
className='my-auto ml-0 mr-0',
style={'max-width': '100%', 'display': 'none'}
),
html.Hr(),
dbc.Tabs([
dbc.Tab(
label='持仓明细',
children=[
html.Br(),
dbc.Checklist(
id='show-cleared',
options=[{'label': '显示清仓品种', 'value': 'show'}],
value=[],
switch=True,
),
html.Div(id='assets_cards'),
html.Center(
[
dbc.RadioItems(
id="assets-pagination",
className="btn-group",
labelClassName="btn btn-secondary",
labelCheckedClassName="active",
options=[
{"label": "1", "value": 0},
],
value=0,
),
],
className='radio-group',
),
]
),
dbc.Tab(
label='交易记录',
children=[
html.Br(),
html.Div(id='deals_table'),
html.Center(
[
dbc.RadioItems(
id="deals-pagination",
className="btn-group",
labelClassName="btn btn-secondary",
labelCheckedClassName="active",
options=[
{"label": "1", "value": 0},
],
value=0,
),
],
className='radio-group',
),
]
),
])
],
)
@APP.callback(
[
dash.dependencies.Output('assets', 'data'),
dash.dependencies.Output('stats', 'data'),
dash.dependencies.Output('accounts_history', 'data'),
dash.dependencies.Output('index_history', 'data'),
dash.dependencies.Output('deals', 'data'),
dash.dependencies.Output('deals-pagination', 'options'),
dash.dependencies.Output('assets-pagination', 'options'),
],
[
dash.dependencies.Input('checklist', 'value'),
dash.dependencies.Input('compare', 'value'),
],
)
def update_after_check(accounts, index_codes):
accounts = accounts or all_accounts
summary_data, assets_data = get_accounts_summary(accounts)
history = get_accounts_history(accounts).to_dict('records')
history.sort(key=itemgetter('account', 'date'))
index_history = []
for index_code in index_codes:
index = Asset.get(zs_code=index_code)
for record in index.history:
index_history.append({
'account': index.name,
'date': record.date,
'price': record.close_price
})
index_history.sort(key=itemgetter('account', 'date'))
deals = []
for record in Deal.get_deals(accounts):
deals.append({
'account': record.account,
'time': record.time,
'code': record.asset.zs_code,
'name': record.asset.name,
'action': record.action,
'amount': record.amount,
'price': record.price,
'money': record.money,
'fee': record.fee,
})
deals.sort(key=itemgetter('time'), reverse=True)
valid_deals_count = 0
for item in deals:
if item['action'] == 'fix_cash':
continue
if item['code'] == 'CASH' and item['action'] == 'reinvest':
continue
valid_deals_count += 1
pagination_options = [
{'label': idx + 1, 'value': idx}
for idx in range(ceil(valid_deals_count / 100))
]
assets_pagination_options = []
return (
assets_data,
summary_data,
history,
index_history,
deals,
pagination_options,
assets_pagination_options
)
@APP.callback(
dash.dependencies.Output('account-summary', 'children'),
[
dash.dependencies.Input('stats', 'data'),
dash.dependencies.Input('show-money', 'value')
]
)
def update_summary(stats, show_money):
body_content = []
body_content.append(
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': '总资产',
'color': 'bg-primary',
},
{
'item_cls': html.H4,
'type': 'money',
'content': stats['money'],
'color': 'bg-primary',
},
],
show_money=show_money,
inverse=True
)
)
body_content.append(
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': '日收益',
'color': 'bg-primary',
},
{
'item_cls': html.H4,
'type': 'money',
'content': stats['day_return'],
'color': 'bg-primary',
},
{
'item_cls': html.P,
'type': 'percent',
'content': stats['day_return_rate'],
'color': 'bg-primary',
},
],
show_money=show_money,
inverse=True
)
)
body_content.append(
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': '累计收益',
'color': 'bg-primary',
},
{
'item_cls': html.H4,
'type': 'money',
'content': stats['return'],
'color': 'bg-primary',
},
{
'item_cls': html.P,
'type': 'percent',
'content': stats['return_rate'] if stats['amount'] > 0 else 'N/A(已清仓)',
'color': 'bg-primary',
},
],
show_money=show_money,
inverse=True
)
)
body_content.append(
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': '年化收益率',
'color': 'bg-primary',
},
{
'item_cls': html.H4,
'type': 'percent',
'content': stats['annualized_return'],
'color': 'bg-primary',
},
],
show_money=show_money,
inverse=True,
)
)
body_content.append(
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': '现金',
'color': 'bg-primary',
},
{
'item_cls': html.H4,
'type': 'money',
'content': stats['cash'],
'color': 'bg-primary',
},
],
show_money=show_money,
inverse=True
)
)
body_content.append(
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': '仓位',
'color': 'bg-primary',
},
{
'item_cls': html.H4,
'type': 'percent',
'content': stats['position'],
'color': 'bg-primary',
},
],
show_money=show_money,
inverse=True
)
)
card = dbc.Card(
[
dbc.CardBody(
dbc.Row(
[dbc.Col([card_component]) for card_component in body_content],
),
className='py-2',
)
],
className='my-auto',
color='primary',
)
return [card]
@APP.callback(
dash.dependencies.Output('assets_cards', 'children'),
[
dash.dependencies.Input('assets', 'data'),
dash.dependencies.Input('show-money', 'value'),
dash.dependencies.Input('show-cleared', 'value'),
]
)
def update_assets_table(assets_data, show_money, show_cleared):
cards = [html.Hr()]
for row in assets_data:
if not show_cleared and abs(row['amount']) <= 0.001:
continue
if row["code"] in ('CASH', 'WZZNCK'):
continue
cards.append(make_asset_card(row, show_money))
cards.append(html.Br())
return cards
def make_asset_card(asset_info, show_money=True):
def get_color(value):
if not isinstance(value, (float, int)):
return None
if value > 0:
return 'text-danger'
if value < 0:
return 'text-success'
return None
header = dbc.CardHeader([
html.H5(
html.A(
f'{asset_info["name"]}({asset_info["code"]})',
href=f'/asset/{asset_info["code"].replace(".", "").lower()}',
target='_blank'
),
className='mb-0'
),
html.P(f'更新日期 {asset_info["price_date"]}', className='mb-0'),
])
body_content = []
body_content.append(
make_card_component(
[
{'item_cls': html.P, 'type': 'text', 'content': '持有金额/份额'},
{'item_cls': html.H4, 'type': 'money', 'content': asset_info['money']},
{'item_cls': html.P, 'type': 'amount', 'content': asset_info['amount']}
],
show_money=show_money,
)
)
body_content.append(
make_card_component(
[
{'item_cls': html.P, 'type': 'text', 'content': '日收益'},
{
'item_cls': html.H4,
'type': 'money',
'content': asset_info['day_return'],
'color': get_color(asset_info['day_return']),
},
{
'item_cls': html.P,
'type': 'percent',
'content': asset_info['day_return_rate'],
'color': get_color(asset_info['day_return']),
}
],
show_money=show_money,
)
)
body_content.append(
make_card_component(
[
{'item_cls': html.P, 'type': 'text', 'content': '现价/成本'},
{'item_cls': html.H4, 'type': 'price', 'content': asset_info['price']},
{'item_cls': html.P, 'type': 'price', 'content': asset_info['avg_cost'] or 'N/A'}
],
show_money=show_money,
)
)
asset = Asset.get(zs_code=asset_info['code'])
prices = []
for item in asset.history.order_by(AssetMarketHistory.date.desc()).limit(10):
if item.close_price is not None:
prices.append({
'date': item.date,
'price': item.close_price,
})
else:
prices.append({
'date': item.date,
'price': item.nav,
})
if len(prices) >= 10:
break
prices.sort(key=itemgetter('date'))
df = pd.DataFrame(prices)
df['date'] = pd.to_datetime(df['date'])
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=df['date'],
y=df['price'],
showlegend=False,
marker={'color': 'orange'},
mode='lines+markers',
)
)
fig.update_layout(
width=150,
height=100,
margin={'l': 4, 'r': 4, 'b': 20, 't': 10, 'pad': 4},
xaxis={'showticklabels': False, 'showgrid': False, 'fixedrange': True},
yaxis={'showticklabels': False, 'showgrid': False, 'fixedrange': True},
)
fig.update_xaxes(
rangebreaks=[
{'bounds': ["sat", "mon"]},
{
'values': get_holidays(df.date.min(), df.date.max(), False)
}
]
)
body_content.append(
make_card_component(
[
{'item_cls': html.P, 'type': 'text', 'content': '十日走势'},
{
'item_cls': None,
'type': 'figure',
'content': fig
}
],
show_money=show_money
)
)
body_content.append(
make_card_component(
[
{'item_cls': html.P, 'type': 'text', 'content': '累计收益'},
{
'item_cls': html.H4,
'type': 'money',
'content': asset_info['return'],
'color': get_color(asset_info['return']),
},
{
'item_cls': html.P,
'type': 'percent',
'content': asset_info['return_rate'],
'color': get_color(asset_info['return']),
}
],
show_money=show_money,
)
)
body_content.append(
make_card_component(
[
{'item_cls': html.P, 'type': 'text', 'content': '占比'},
{'item_cls': html.H4, 'type': 'percent', 'content': asset_info['position']},
],
show_money=show_money,
)
)
card = dbc.Card(
[
header,
dbc.CardBody(
dbc.Row(
[dbc.Col([card_component]) for card_component in body_content],
),
className='py-2',
)
],
className='my-auto'
)
return card
@APP.callback(
dash.dependencies.Output('return-curve-chart', 'figure'),
[
dash.dependencies.Input('accounts_history', 'data'),
dash.dependencies.Input('index_history', 'data'),
dash.dependencies.Input('start-date', 'data'),
dash.dependencies.Input('end-date', 'data'),
]
)
def draw_return_chart(accounts_history, index_history, start_date, end_date):
df =
|
pd.DataFrame(accounts_history)
|
pandas.DataFrame
|
""" test fancy indexing & misc """
from datetime import datetime
import re
import weakref
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import (
is_float_dtype,
is_integer_dtype,
)
import pandas as pd
from pandas import (
DataFrame,
Index,
NaT,
Series,
date_range,
offsets,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.api import Float64Index
from pandas.tests.indexing.common import _mklbl
from pandas.tests.indexing.test_floats import gen_obj
# ------------------------------------------------------------------------
# Indexing test cases
class TestFancy:
"""pure get/set item & fancy indexing"""
def test_setitem_ndarray_1d(self):
# GH5508
# len of indexer vs length of the 1d ndarray
df = DataFrame(index=Index(np.arange(1, 11)))
df["foo"] = np.zeros(10, dtype=np.float64)
df["bar"] = np.zeros(10, dtype=complex)
# invalid
msg = "Must have equal len keys and value when setting with an iterable"
with pytest.raises(ValueError, match=msg):
df.loc[df.index[2:5], "bar"] = np.array([2.33j, 1.23 + 0.1j, 2.2, 1.0])
# valid
df.loc[df.index[2:6], "bar"] = np.array([2.33j, 1.23 + 0.1j, 2.2, 1.0])
result = df.loc[df.index[2:6], "bar"]
expected = Series(
[2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6], name="bar"
)
tm.assert_series_equal(result, expected)
def test_setitem_ndarray_1d_2(self):
# GH5508
# dtype getting changed?
df = DataFrame(index=Index(np.arange(1, 11)))
df["foo"] = np.zeros(10, dtype=np.float64)
df["bar"] = np.zeros(10, dtype=complex)
msg = "Must have equal len keys and value when setting with an iterable"
with pytest.raises(ValueError, match=msg):
df[2:5] = np.arange(1, 4) * 1j
def test_getitem_ndarray_3d(
self, index, frame_or_series, indexer_sli, using_array_manager
):
# GH 25567
obj = gen_obj(frame_or_series, index)
idxr = indexer_sli(obj)
nd3 = np.random.randint(5, size=(2, 2, 2))
msgs = []
if frame_or_series is Series and indexer_sli in [tm.setitem, tm.iloc]:
msgs.append(r"Wrong number of dimensions. values.ndim > ndim \[3 > 1\]")
if using_array_manager:
msgs.append("Passed array should be 1-dimensional")
if frame_or_series is Series or indexer_sli is tm.iloc:
msgs.append(r"Buffer has wrong number of dimensions \(expected 1, got 3\)")
if using_array_manager:
msgs.append("indexer should be 1-dimensional")
if indexer_sli is tm.loc or (
frame_or_series is Series and indexer_sli is tm.setitem
):
msgs.append("Cannot index with multidimensional key")
if frame_or_series is DataFrame and indexer_sli is tm.setitem:
msgs.append("Index data must be 1-dimensional")
if isinstance(index, pd.IntervalIndex) and indexer_sli is tm.iloc:
msgs.append("Index data must be 1-dimensional")
if isinstance(index, (pd.TimedeltaIndex, pd.DatetimeIndex, pd.PeriodIndex)):
msgs.append("Data must be 1-dimensional")
if len(index) == 0 or isinstance(index, pd.MultiIndex):
msgs.append("positional indexers are out-of-bounds")
msg = "|".join(msgs)
potential_errors = (IndexError, ValueError, NotImplementedError)
with pytest.raises(potential_errors, match=msg):
idxr[nd3]
def test_setitem_ndarray_3d(self, index, frame_or_series, indexer_sli):
# GH 25567
obj = gen_obj(frame_or_series, index)
idxr = indexer_sli(obj)
nd3 = np.random.randint(5, size=(2, 2, 2))
if indexer_sli is tm.iloc:
err = ValueError
msg = f"Cannot set values with ndim > {obj.ndim}"
else:
err = ValueError
msg = "|".join(
[
r"Buffer has wrong number of dimensions \(expected 1, got 3\)",
"Cannot set values with ndim > 1",
"Index data must be 1-dimensional",
"Data must be 1-dimensional",
"Array conditional must be same shape as self",
]
)
with pytest.raises(err, match=msg):
idxr[nd3] = 0
def test_getitem_ndarray_0d(self):
# GH#24924
key = np.array(0)
# dataframe __getitem__
df = DataFrame([[1, 2], [3, 4]])
result = df[key]
expected = Series([1, 3], name=0)
tm.assert_series_equal(result, expected)
# series __getitem__
ser = Series([1, 2])
result = ser[key]
assert result == 1
def test_inf_upcast(self):
# GH 16957
# We should be able to use np.inf as a key
# np.inf should cause an index to convert to float
# Test with np.inf in rows
df = DataFrame(columns=[0])
df.loc[1] = 1
df.loc[2] = 2
df.loc[np.inf] = 3
# make sure we can look up the value
assert df.loc[np.inf, 0] == 3
result = df.index
expected = Float64Index([1, 2, np.inf])
tm.assert_index_equal(result, expected)
def test_setitem_dtype_upcast(self):
# GH3216
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df["c"] = np.nan
assert df["c"].dtype == np.float64
df.loc[0, "c"] = "foo"
expected = DataFrame(
[{"a": 1, "b": np.nan, "c": "foo"}, {"a": 3, "b": 2, "c": np.nan}]
)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("val", [3.14, "wxyz"])
def test_setitem_dtype_upcast2(self, val):
# GH10280
df = DataFrame(
np.arange(6, dtype="int64").reshape(2, 3),
index=list("ab"),
columns=["foo", "bar", "baz"],
)
left = df.copy()
left.loc["a", "bar"] = val
right = DataFrame(
[[0, val, 2], [3, 4, 5]],
index=list("ab"),
columns=["foo", "bar", "baz"],
)
tm.assert_frame_equal(left, right)
assert is_integer_dtype(left["foo"])
assert is_integer_dtype(left["baz"])
def test_setitem_dtype_upcast3(self):
left = DataFrame(
np.arange(6, dtype="int64").reshape(2, 3) / 10.0,
index=list("ab"),
columns=["foo", "bar", "baz"],
)
left.loc["a", "bar"] = "wxyz"
right = DataFrame(
[[0, "wxyz", 0.2], [0.3, 0.4, 0.5]],
index=list("ab"),
columns=["foo", "bar", "baz"],
)
tm.assert_frame_equal(left, right)
assert is_float_dtype(left["foo"])
assert is_float_dtype(left["baz"])
def test_dups_fancy_indexing(self):
# GH 3455
df = tm.makeCustomDataframe(10, 3)
df.columns = ["a", "a", "b"]
result = df[["b", "a"]].columns
expected = Index(["b", "a", "a"])
tm.assert_index_equal(result, expected)
def test_dups_fancy_indexing_across_dtypes(self):
# across dtypes
df = DataFrame([[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]], columns=list("aaaaaaa"))
df.head()
str(df)
result = DataFrame([[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]])
result.columns = list("aaaaaaa")
# TODO(wesm): unused?
df_v = df.iloc[:, 4] # noqa
res_v = result.iloc[:, 4] # noqa
tm.assert_frame_equal(df, result)
def test_dups_fancy_indexing_not_in_order(self):
# GH 3561, dups not in selected order
df = DataFrame(
{"test": [5, 7, 9, 11], "test1": [4.0, 5, 6, 7], "other": list("abcd")},
index=["A", "A", "B", "C"],
)
rows = ["C", "B"]
expected = DataFrame(
{"test": [11, 9], "test1": [7.0, 6], "other": ["d", "c"]}, index=rows
)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
result = df.loc[Index(rows)]
tm.assert_frame_equal(result, expected)
rows = ["C", "B", "E"]
with pytest.raises(KeyError, match="not in index"):
df.loc[rows]
# see GH5553, make sure we use the right indexer
rows = ["F", "G", "H", "C", "B", "E"]
with pytest.raises(KeyError, match="not in index"):
df.loc[rows]
def test_dups_fancy_indexing_only_missing_label(self):
# List containing only missing label
dfnu = DataFrame(np.random.randn(5, 3), index=list("AABCD"))
with pytest.raises(
KeyError,
match=re.escape(
"\"None of [Index(['E'], dtype='object')] are in the [index]\""
),
):
dfnu.loc[["E"]]
# ToDo: check_index_type can be True after GH 11497
@pytest.mark.parametrize("vals", [[0, 1, 2], list("abc")])
def test_dups_fancy_indexing_missing_label(self, vals):
# GH 4619; duplicate indexer with missing label
df = DataFrame({"A": vals})
with pytest.raises(KeyError, match="not in index"):
df.loc[[0, 8, 0]]
def test_dups_fancy_indexing_non_unique(self):
# non unique with non unique selector
df = DataFrame({"test": [5, 7, 9, 11]}, index=["A", "A", "B", "C"])
with pytest.raises(KeyError, match="not in index"):
df.loc[["A", "A", "E"]]
def test_dups_fancy_indexing2(self):
# GH 5835
# dups on index and missing values
df = DataFrame(np.random.randn(5, 5), columns=["A", "B", "B", "B", "A"])
with pytest.raises(KeyError, match="not in index"):
df.loc[:, ["A", "B", "C"]]
def test_dups_fancy_indexing3(self):
# GH 6504, multi-axis indexing
df = DataFrame(
np.random.randn(9, 2), index=[1, 1, 1, 2, 2, 2, 3, 3, 3], columns=["a", "b"]
)
expected = df.iloc[0:6]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
expected = df
result = df.loc[:, ["a", "b"]]
tm.assert_frame_equal(result, expected)
expected = df.iloc[0:6, :]
result = df.loc[[1, 2], ["a", "b"]]
tm.assert_frame_equal(result, expected)
def test_duplicate_int_indexing(self, indexer_sl):
# GH 17347
ser = Series(range(3), index=[1, 1, 3])
expected = Series(range(2), index=[1, 1])
result = indexer_sl(ser)[[1]]
tm.assert_series_equal(result, expected)
def test_indexing_mixed_frame_bug(self):
# GH3492
df = DataFrame(
{"a": {1: "aaa", 2: "bbb", 3: "ccc"}, "b": {1: 111, 2: 222, 3: 333}}
)
# this works, new column is created correctly
df["test"] = df["a"].apply(lambda x: "_" if x == "aaa" else x)
# this does not work, ie column test is not changed
idx = df["test"] == "_"
temp = df.loc[idx, "a"].apply(lambda x: "-----" if x == "aaa" else x)
df.loc[idx, "test"] = temp
assert df.iloc[0, 2] == "-----"
def test_multitype_list_index_access(self):
# GH 10610
df = DataFrame(np.random.random((10, 5)), columns=["a"] + [20, 21, 22, 23])
with pytest.raises(KeyError, match=re.escape("'[26, -8] not in index'")):
df[[22, 26, -8]]
assert df[21].shape[0] == df.shape[0]
def test_set_index_nan(self):
# GH 3586
df = DataFrame(
{
"PRuid": {
17: "nonQC",
18: "nonQC",
19: "nonQC",
20: "10",
21: "11",
22: "12",
23: "13",
24: "24",
25: "35",
26: "46",
27: "47",
28: "48",
29: "59",
30: "10",
},
"QC": {
17: 0.0,
18: 0.0,
19: 0.0,
20: np.nan,
21: np.nan,
22: np.nan,
23: np.nan,
24: 1.0,
25: np.nan,
26: np.nan,
27: np.nan,
28: np.nan,
29: np.nan,
30: np.nan,
},
"data": {
17: 7.9544899999999998,
18: 8.0142609999999994,
19: 7.8591520000000008,
20: 0.86140349999999999,
21: 0.87853110000000001,
22: 0.8427041999999999,
23: 0.78587700000000005,
24: 0.73062459999999996,
25: 0.81668560000000001,
26: 0.81927080000000008,
27: 0.80705009999999999,
28: 0.81440240000000008,
29: 0.80140849999999997,
30: 0.81307740000000006,
},
"year": {
17: 2006,
18: 2007,
19: 2008,
20: 1985,
21: 1985,
22: 1985,
23: 1985,
24: 1985,
25: 1985,
26: 1985,
27: 1985,
28: 1985,
29: 1985,
30: 1986,
},
}
).reset_index()
result = (
df.set_index(["year", "PRuid", "QC"])
.reset_index()
.reindex(columns=df.columns)
)
tm.assert_frame_equal(result, df)
def test_multi_assign(self):
# GH 3626, an assignment of a sub-df to a df
df = DataFrame(
{
"FC": ["a", "b", "a", "b", "a", "b"],
"PF": [0, 0, 0, 0, 1, 1],
"col1": list(range(6)),
"col2": list(range(6, 12)),
}
)
df.iloc[1, 0] = np.nan
df2 = df.copy()
mask = ~df2.FC.isna()
cols = ["col1", "col2"]
dft = df2 * 2
dft.iloc[3, 3] = np.nan
expected = DataFrame(
{
"FC": ["a", np.nan, "a", "b", "a", "b"],
"PF": [0, 0, 0, 0, 1, 1],
"col1": Series([0, 1, 4, 6, 8, 10]),
"col2": [12, 7, 16, np.nan, 20, 22],
}
)
# frame on rhs
df2.loc[mask, cols] = dft.loc[mask, cols]
tm.assert_frame_equal(df2, expected)
# with an ndarray on rhs
# coerces to float64 because values has float64 dtype
# GH 14001
expected = DataFrame(
{
"FC": ["a", np.nan, "a", "b", "a", "b"],
"PF": [0, 0, 0, 0, 1, 1],
"col1": [0.0, 1.0, 4.0, 6.0, 8.0, 10.0],
"col2": [12, 7, 16, np.nan, 20, 22],
}
)
df2 = df.copy()
df2.loc[mask, cols] = dft.loc[mask, cols].values
tm.assert_frame_equal(df2, expected)
def test_multi_assign_broadcasting_rhs(self):
# broadcasting on the rhs is required
df = DataFrame(
{
"A": [1, 2, 0, 0, 0],
"B": [0, 0, 0, 10, 11],
"C": [0, 0, 0, 10, 11],
"D": [3, 4, 5, 6, 7],
}
)
expected = df.copy()
mask = expected["A"] == 0
for col in ["A", "B"]:
expected.loc[mask, col] = df["D"]
df.loc[df["A"] == 0, ["A", "B"]] = df["D"]
tm.assert_frame_equal(df, expected)
# TODO(ArrayManager) setting single item with an iterable doesn't work yet
# in the "split" path
@td.skip_array_manager_not_yet_implemented
def test_setitem_list(self):
# GH 6043
# iloc with a list
df = DataFrame(index=[0, 1], columns=[0])
df.iloc[1, 0] = [1, 2, 3]
df.iloc[1, 0] = [1, 2]
result = DataFrame(index=[0, 1], columns=[0])
result.iloc[1, 0] = [1, 2]
tm.assert_frame_equal(result, df)
def test_string_slice(self):
# GH 14424
# string indexing against datetimelike with object
# dtype should properly raises KeyError
df = DataFrame([1], Index([pd.Timestamp("2011-01-01")], dtype=object))
assert df.index._is_all_dates
with pytest.raises(KeyError, match="'2011'"):
df["2011"]
with pytest.raises(KeyError, match="'2011'"):
df.loc["2011", 0]
def test_string_slice_empty(self):
# GH 14424
df = DataFrame()
assert not df.index._is_all_dates
with pytest.raises(KeyError, match="'2011'"):
df["2011"]
with pytest.raises(KeyError, match="^0$"):
df.loc["2011", 0]
def test_astype_assignment(self):
# GH4312 (iloc)
df_orig = DataFrame(
[["1", "2", "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2].astype(np.int64)
expected = DataFrame(
[[1, 2, "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2]._convert(datetime=True, numeric=True)
expected = DataFrame(
[[1, 2, "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
tm.assert_frame_equal(df, expected)
# GH5702 (loc)
df = df_orig.copy()
df.loc[:, "A"] = df.loc[:, "A"].astype(np.int64)
expected = DataFrame(
[[1, "2", "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[:, ["B", "C"]] = df.loc[:, ["B", "C"]].astype(np.int64)
expected = DataFrame(
[["1", 2, 3, ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
tm.assert_frame_equal(df, expected)
def test_astype_assignment_full_replacements(self):
# full replacements / no nans
df = DataFrame({"A": [1.0, 2.0, 3.0, 4.0]})
df.iloc[:, 0] = df["A"].astype(np.int64)
expected = DataFrame({"A": [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
df = DataFrame({"A": [1.0, 2.0, 3.0, 4.0]})
df.loc[:, "A"] = df["A"].astype(np.int64)
expected = DataFrame({"A": [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("indexer", [tm.getitem, tm.loc])
def test_index_type_coercion(self, indexer):
# GH 11836
# if we have an index type and set it with something that looks
# to numpy like the same, but is actually, not
# (e.g. setting with a float or string '0')
# then we need to coerce to object
# integer indexes
for s in [Series(range(5)), Series(range(5), index=range(1, 6))]:
assert s.index.is_integer()
s2 = s.copy()
indexer(s2)[0.1] = 0
assert s2.index.is_floating()
assert indexer(s2)[0.1] == 0
s2 = s.copy()
indexer(s2)[0.0] = 0
exp = s.index
if 0 not in s:
exp = Index(s.index.tolist() + [0])
tm.assert_index_equal(s2.index, exp)
s2 = s.copy()
indexer(s2)["0"] = 0
assert s2.index.is_object()
for s in [Series(range(5), index=np.arange(5.0))]:
assert s.index.is_floating()
s2 = s.copy()
indexer(s2)[0.1] = 0
assert s2.index.is_floating()
assert indexer(s2)[0.1] == 0
s2 = s.copy()
indexer(s2)[0.0] = 0
tm.assert_index_equal(s2.index, s.index)
s2 = s.copy()
indexer(s2)["0"] = 0
assert s2.index.is_object()
class TestMisc:
def test_float_index_to_mixed(self):
df = DataFrame({0.0: np.random.rand(10), 1.0: np.random.rand(10)})
df["a"] = 10
expected = DataFrame({0.0: df[0.0], 1.0: df[1.0], "a": [10] * 10})
tm.assert_frame_equal(expected, df)
def test_float_index_non_scalar_assignment(self):
df = DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]}, index=[1.0, 2.0, 3.0])
df.loc[df.index[:2]] = 1
expected = DataFrame({"a": [1, 1, 3], "b": [1, 1, 5]}, index=df.index)
|
tm.assert_frame_equal(expected, df)
|
pandas._testing.assert_frame_equal
|
import os
import re
import scrapy
import pandas as pd
def parse_page(filename):
with open(filename, 'r') as file:
selector = scrapy.Selector(text=file.read())
# Get name.
name = selector.css('div#coreInfo > h1::text').get()
# Get all sections separately.
journals = get_citations_for_section(selector, 'journal-articles')
books = get_citations_for_section(selector, 'books')
chapters = get_citations_for_section(selector, 'chapters')
conferences = get_citations_for_section(selector, 'conferences')
scholarly = get_citations_for_section(selector, 'scholarly-editions')
posters = get_citations_for_section(selector, 'posters')
others = get_citations_for_section(selector, 'others')
# Make and populate the DF.
person_df =
|
pd.DataFrame(columns=['Name', 'Type', 'Citation'])
|
pandas.DataFrame
|
import math
import timeit
import networkx as nx
import numpy as np
import pandas as pd
from scipy import sparse
from tqdm import tqdm
from tiedecay.dataset import Dataset
class TieDecayNetwork(object):
"""
Object representing the tie strengths between nodes
in the network.
The user can use this class to find the tie strength
over any given window of time contained in the dataset.
Args:
dataset (Dataset): dataset object
alpha (float): tie-decay parameter
"""
def __init__(self, dataset: Dataset, alpha: float):
assert type(dataset) is Dataset, "Invalid type for dataset."
self.dataset = dataset
self.alpha = alpha
self.history_loaded = False
# threshold below which to filter tie strength to 0
self.threshold = 1e-7
return
def compute_from_dataset(self, t: str, t_start: str = None) -> nx.DiGraph:
"""
Compute the tie decay values over a given time window,
using the dataset
Args:
t (str): time as a string that can be converted to pd.Datetime
t_start (str): start time as a string that can be converted to pd.Datetime
- if not provided, the initial time in the dataset will be used
Returns:
B (nx.DiGraph): graph with tie strengths as edge weights
"""
if t_start is not None:
t_start = pd.to_datetime(t_start)
assert t_start >= pd.to_datetime(
self.dataset.t_first
), f"t_start: {t_start} must be after {pd.to_datetime(self.dataset.t_first)}"
assert t_start < pd.to_datetime(
self.dataset.t_last
), f"t_start: {t_start} must be before {pd.to_datetime(self.dataset.t_last)}"
else:
t_start = pd.to_datetime(self.dataset.t_first)
t = pd.to_datetime(t)
assert t >= t_start, f"Time t: {t} is before t_start: {t_start}"
df = pd.DataFrame(self.dataset.adj_list)
df.columns = ["source", "target", "time"]
df.time = pd.to_datetime(df.time)
df = df[df.time <= t]
B = self._get_decay_graph(df, t)
self.history_loaded = True
self.B = B
return B
def _get_decay_graph(self, df: pd.DataFrame, t: pd.Timestamp) -> nx.DiGraph:
"""
Get the TieDecay matrix B(t) using a dataframe
Args:
df (pd.DataFrame): dataframe with 'source', 'target', 'time'
t (pd.Timestamp): timestamp at which to evaluate the td values
Returns:
B (nx.DiGraph): graph with tie strengths as edge weights
"""
# get tie strength for each interaction
df["weight"] = df.apply(
lambda x: math.exp(-self.alpha * (t - x.time).total_seconds()), axis=1
)
# zero out small values
df.weight = df.weight.mask(df.weight < self.threshold, 0)
# sum across each pair of nodes
td_df = df.groupby(["source", "target"]).sum().reset_index()
td_df["weight"] = td_df["weight"]
# construct graph
B = nx.from_pandas_edgelist(
td_df,
source="source",
target="target",
edge_attr="weight",
create_using=nx.DiGraph(),
)
return B
def compute_centrality_trajectories_from_dataset(
self, number_of_samples: int, centrality_method: str
) -> pd.DataFrame:
"""
Sample tie-decay PageRank values from the dataset at a given resolution
(number of samples).
Args:
number_of_samples (int): number of time points at which to evaluate
tie strengths
centrality_method (str): supported options:
- pagerank
Returns:
centrality_df (pd.DataFrame): dataframe with node indices as the df index,
and centrality values at each sampled
time point
sampled_times (pandas.core.indexes.datetimes.DatetimeIndex): the timestamps that were sampled
"""
total_seconds = (
pd.to_datetime(self.dataset.t_last) - pd.to_datetime(self.dataset.t_first)
).total_seconds()
seconds_per_sample = int(total_seconds / number_of_samples)
sampling_range = pd.date_range(
start=self.dataset.t_first,
end=self.dataset.t_last,
freq=str(seconds_per_sample) + "s",
)
df =
|
pd.DataFrame(self.dataset.adj_list)
|
pandas.DataFrame
|
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from numpy import select
from numpy.random import choice
from urllib.parse import quote
import json
import pandas as pd
import plotly.graph_objs as go
import plotly.plotly as py
from Credentials import credentials
from spotifyScrape import spotifyScrape
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
songs_df = pd.read_csv('all_songs.csv')
artist_averages =
|
pd.read_csv('artist_averages.csv')
|
pandas.read_csv
|
import logging
import os
import re
import shutil
from datetime import datetime
from itertools import combinations
from random import randint
import numpy as np
import pandas as pd
import psutil
import pytest
from dask import dataframe as dd
from distributed.utils_test import cluster
from tqdm import tqdm
import featuretools as ft
from featuretools import EntitySet, Timedelta, calculate_feature_matrix, dfs
from featuretools.computational_backends import utils
from featuretools.computational_backends.calculate_feature_matrix import (
FEATURE_CALCULATION_PERCENTAGE,
_chunk_dataframe_groups,
_handle_chunk_size,
scatter_warning
)
from featuretools.computational_backends.utils import (
bin_cutoff_times,
create_client_and_cluster,
n_jobs_to_workers
)
from featuretools.feature_base import (
AggregationFeature,
DirectFeature,
IdentityFeature
)
from featuretools.primitives import (
Count,
Max,
Min,
Percentile,
Sum,
TransformPrimitive
)
from featuretools.tests.testing_utils import (
backward_path,
get_mock_client_cluster,
to_pandas
)
from featuretools.utils.gen_utils import Library, import_or_none
ks = import_or_none('databricks.koalas')
def test_scatter_warning(caplog):
logger = logging.getLogger('featuretools')
match = "EntitySet was only scattered to {} out of {} workers"
warning_message = match.format(1, 2)
logger.propagate = True
scatter_warning(1, 2)
logger.propagate = False
assert warning_message in caplog.text
# TODO: final assert fails w/ Dask
def test_calc_feature_matrix(es):
if not all(isinstance(entity.df, pd.DataFrame) for entity in es.entities):
pytest.xfail('Distributed dataframe result not ordered')
times = list([datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)] +
[datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)] +
[datetime(2011, 4, 9, 10, 40, 0)] +
[datetime(2011, 4, 10, 10, 40, i) for i in range(2)] +
[datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)] +
[datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)])
instances = range(17)
cutoff_time = pd.DataFrame({'time': times, es['log'].index: instances})
labels = [False] * 3 + [True] * 2 + [False] * 9 + [True] + [False] * 2
property_feature = ft.Feature(es['log']['value']) > 10
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time,
verbose=True)
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
error_text = 'features must be a non-empty list of features'
with pytest.raises(AssertionError, match=error_text):
feature_matrix = calculate_feature_matrix('features', es, cutoff_time=cutoff_time)
with pytest.raises(AssertionError, match=error_text):
feature_matrix = calculate_feature_matrix([], es, cutoff_time=cutoff_time)
with pytest.raises(AssertionError, match=error_text):
feature_matrix = calculate_feature_matrix([1, 2, 3], es, cutoff_time=cutoff_time)
error_text = "cutoff_time times must be datetime type: try casting via "\
"pd\\.to_datetime\\(\\)"
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature],
es,
instance_ids=range(17),
cutoff_time=17)
error_text = 'cutoff_time must be a single value or DataFrame'
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature],
es,
instance_ids=range(17),
cutoff_time=times)
cutoff_times_dup = pd.DataFrame({'time': [datetime(2018, 3, 1),
datetime(2018, 3, 1)],
es['log'].index: [1, 1]})
error_text = 'Duplicated rows in cutoff time dataframe.'
with pytest.raises(AssertionError, match=error_text):
feature_matrix = calculate_feature_matrix([property_feature],
entityset=es,
cutoff_time=cutoff_times_dup)
cutoff_reordered = cutoff_time.iloc[[-1, 10, 1]] # 3 ids not ordered by cutoff time
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_reordered,
verbose=True)
assert all(feature_matrix.index == cutoff_reordered["id"].values)
# fails with Dask and Koalas entitysets, cutoff time not reordered; cannot verify out of order
# - can't tell if wrong/different all are false so can't check positional
def test_cfm_warns_dask_cutoff_time(es):
times = list([datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)] +
[datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)] +
[datetime(2011, 4, 9, 10, 40, 0)] +
[datetime(2011, 4, 10, 10, 40, i) for i in range(2)] +
[datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)] +
[datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)])
instances = range(17)
cutoff_time = pd.DataFrame({'time': times,
es['log'].index: instances})
cutoff_time = dd.from_pandas(cutoff_time, npartitions=4)
property_feature = ft.Feature(es['log']['value']) > 10
match = "cutoff_time should be a Pandas DataFrame: " \
"computing cutoff_time, this may take a while"
with pytest.warns(UserWarning, match=match):
calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time)
def test_cfm_compose(es, lt):
property_feature = ft.Feature(es['log']['value']) > 10
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=lt,
verbose=True)
feature_matrix = to_pandas(feature_matrix, index='id', sort_index=True)
assert (feature_matrix[property_feature.get_name()] ==
feature_matrix['label_func']).values.all()
def test_cfm_compose_approximate(es, lt):
if not all(isinstance(entity.df, pd.DataFrame) for entity in es.entities):
pytest.xfail('dask does not support approximate')
property_feature = ft.Feature(es['log']['value']) > 10
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=lt,
approximate='1s',
verbose=True)
assert(type(feature_matrix) == pd.core.frame.DataFrame)
feature_matrix = to_pandas(feature_matrix, index='id', sort_index=True)
assert (feature_matrix[property_feature.get_name()] ==
feature_matrix['label_func']).values.all()
def test_cfm_dask_compose(dask_es, lt):
property_feature = ft.Feature(dask_es['log']['value']) > 10
feature_matrix = calculate_feature_matrix([property_feature],
dask_es,
cutoff_time=lt,
verbose=True)
feature_matrix = feature_matrix.compute()
assert (feature_matrix[property_feature.get_name()] == feature_matrix['label_func']).values.all()
# tests approximate, skip for dask/koalas
def test_cfm_approximate_correct_ordering():
trips = {
'trip_id': [i for i in range(1000)],
'flight_time': [datetime(1998, 4, 2) for i in range(350)] + [datetime(1997, 4, 3) for i in range(650)],
'flight_id': [randint(1, 25) for i in range(1000)],
'trip_duration': [randint(1, 999) for i in range(1000)]
}
df = pd.DataFrame.from_dict(trips)
es = EntitySet('flights')
es.entity_from_dataframe("trips",
dataframe=df,
index="trip_id",
time_index='flight_time')
es.normalize_entity(base_entity_id="trips",
new_entity_id="flights",
index="flight_id",
make_time_index=True)
features = dfs(entityset=es, target_entity='trips', features_only=True)
flight_features = [feature for feature in features
if isinstance(feature, DirectFeature) and
isinstance(feature.base_features[0],
AggregationFeature)]
property_feature = IdentityFeature(es['trips']['trip_id'])
cutoff_time = pd.DataFrame.from_dict({'instance_id': df['trip_id'],
'time': df['flight_time']})
time_feature = IdentityFeature(es['trips']['flight_time'])
feature_matrix = calculate_feature_matrix(flight_features + [property_feature, time_feature],
es,
cutoff_time_in_index=True,
cutoff_time=cutoff_time)
feature_matrix.index.names = ['instance', 'time']
assert(np.all(feature_matrix.reset_index('time').reset_index()[['instance', 'time']].values == feature_matrix[['trip_id', 'flight_time']].values))
feature_matrix_2 = calculate_feature_matrix(flight_features + [property_feature, time_feature],
es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True,
approximate=Timedelta(2, 'd'))
feature_matrix_2.index.names = ['instance', 'time']
assert(np.all(feature_matrix_2.reset_index('time').reset_index()[['instance', 'time']].values == feature_matrix_2[['trip_id', 'flight_time']].values))
for column in feature_matrix:
for x, y in zip(feature_matrix[column], feature_matrix_2[column]):
assert ((pd.isnull(x) and pd.isnull(y)) or (x == y))
# uses approximate, skip for dask/koalas entitysets
def test_cfm_no_cutoff_time_index(pd_es):
agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['sessions'], primitive=Count)
agg_feat4 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(agg_feat4, pd_es['sessions'])
cutoff_time = pd.DataFrame({
'time': [datetime(2013, 4, 9, 10, 31, 19), datetime(2013, 4, 9, 11, 0, 0)],
'instance_id': [0, 2]
})
feature_matrix = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
cutoff_time_in_index=False,
approximate=Timedelta(12, 's'),
cutoff_time=cutoff_time)
assert feature_matrix.index.name == 'id'
assert feature_matrix.index.values.tolist() == [0, 2]
assert feature_matrix[dfeat.get_name()].tolist() == [10, 10]
assert feature_matrix[agg_feat.get_name()].tolist() == [5, 1]
cutoff_time = pd.DataFrame({
'time': [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)],
'instance_id': [0, 2]
})
feature_matrix_2 = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
cutoff_time_in_index=False,
approximate=Timedelta(10, 's'),
cutoff_time=cutoff_time)
assert feature_matrix_2.index.name == 'id'
assert feature_matrix_2.index.tolist() == [0, 2]
assert feature_matrix_2[dfeat.get_name()].tolist() == [7, 10]
assert feature_matrix_2[agg_feat.get_name()].tolist() == [5, 1]
# TODO: fails with dask entitysets
# TODO: fails with koalas entitysets
def test_cfm_duplicated_index_in_cutoff_time(es):
if not all(isinstance(entity.df, pd.DataFrame) for entity in es.entities):
pytest.xfail('Distributed results not ordered, missing duplicates')
times = [datetime(2011, 4, 1), datetime(2011, 5, 1),
datetime(2011, 4, 1), datetime(2011, 5, 1)]
instances = [1, 1, 2, 2]
property_feature = ft.Feature(es['log']['value']) > 10
cutoff_time = pd.DataFrame({'id': instances, 'time': times},
index=[1, 1, 1, 1])
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time,
chunk_size=1)
assert (feature_matrix.shape[0] == cutoff_time.shape[0])
# TODO: fails with Dask, Koalas
def test_saveprogress(es, tmpdir):
if not all(isinstance(entity.df, pd.DataFrame) for entity in es.entities):
pytest.xfail('saveprogress fails with distributed entitysets')
times = list([datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)] +
[datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)] +
[datetime(2011, 4, 9, 10, 40, 0)] +
[datetime(2011, 4, 10, 10, 40, i) for i in range(2)] +
[datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)] +
[datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)])
cutoff_time = pd.DataFrame({'time': times, 'instance_id': range(17)})
property_feature = ft.Feature(es['log']['value']) > 10
save_progress = str(tmpdir)
fm_save = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time,
save_progress=save_progress)
_, _, files = next(os.walk(save_progress))
files = [os.path.join(save_progress, file) for file in files]
# there are 17 datetime files created above
assert len(files) == 17
list_df = []
for file_ in files:
df = pd.read_csv(file_, index_col="id", header=0)
list_df.append(df)
merged_df = pd.concat(list_df)
merged_df.set_index(pd.DatetimeIndex(times), inplace=True, append=True)
fm_no_save = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time)
assert np.all((merged_df.sort_index().values) == (fm_save.sort_index().values))
assert np.all((fm_no_save.sort_index().values) == (fm_save.sort_index().values))
assert np.all((fm_no_save.sort_index().values) == (merged_df.sort_index().values))
shutil.rmtree(save_progress)
def test_cutoff_time_correctly(es):
property_feature = ft.Feature(es['log']['id'], parent_entity=es['customers'], primitive=Count)
times = [datetime(2011, 4, 10), datetime(2011, 4, 11), datetime(2011, 4, 7)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 1, 2]})
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time)
feature_matrix = to_pandas(feature_matrix, index='id', sort_index=True)
labels = [10, 5, 0]
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
def test_cutoff_time_binning():
cutoff_time = pd.DataFrame({
'time': [
datetime(2011, 4, 9, 12, 31),
datetime(2011, 4, 10, 11),
datetime(2011, 4, 10, 13, 10, 1)
],
'instance_id': [1, 2, 3]
})
binned_cutoff_times = bin_cutoff_times(cutoff_time, Timedelta(4, 'h'))
labels = [datetime(2011, 4, 9, 12),
datetime(2011, 4, 10, 8),
datetime(2011, 4, 10, 12)]
for i in binned_cutoff_times.index:
assert binned_cutoff_times['time'][i] == labels[i]
binned_cutoff_times = bin_cutoff_times(cutoff_time, Timedelta(25, 'h'))
labels = [datetime(2011, 4, 8, 22),
datetime(2011, 4, 9, 23),
datetime(2011, 4, 9, 23)]
for i in binned_cutoff_times.index:
assert binned_cutoff_times['time'][i] == labels[i]
error_text = "Unit is relative"
with pytest.raises(ValueError, match=error_text):
binned_cutoff_times = bin_cutoff_times(cutoff_time, Timedelta(1, 'mo'))
def test_training_window_fails_dask(dask_es):
property_feature = ft.Feature(dask_es['log']['id'],
parent_entity=dask_es['customers'],
primitive=Count)
error_text = "Using training_window is not supported with Dask Entities"
with pytest.raises(ValueError, match=error_text):
calculate_feature_matrix([property_feature],
dask_es,
training_window='2 hours')
def test_cutoff_time_columns_order(es):
property_feature = ft.Feature(es['log']['id'], parent_entity=es['customers'], primitive=Count)
times = [datetime(2011, 4, 10), datetime(2011, 4, 11), datetime(2011, 4, 7)]
id_col_names = ['instance_id', es['customers'].index]
time_col_names = ['time', es['customers'].time_index]
for id_col in id_col_names:
for time_col in time_col_names:
cutoff_time = pd.DataFrame({'dummy_col_1': [1, 2, 3],
id_col: [0, 1, 2],
'dummy_col_2': [True, False, False],
time_col: times})
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time)
labels = [10, 5, 0]
feature_matrix = to_pandas(feature_matrix, index='id', sort_index=True)
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
def test_cutoff_time_df_redundant_column_names(es):
property_feature = ft.Feature(es['log']['id'], parent_entity=es['customers'], primitive=Count)
times = [datetime(2011, 4, 10), datetime(2011, 4, 11), datetime(2011, 4, 7)]
cutoff_time = pd.DataFrame({es['customers'].index: [0, 1, 2],
'instance_id': [0, 1, 2],
'dummy_col': [True, False, False],
'time': times})
err_msg = 'Cutoff time DataFrame cannot contain both a column named "instance_id" and a column' \
' with the same name as the target entity index'
with pytest.raises(AttributeError, match=err_msg):
calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time)
cutoff_time = pd.DataFrame({es['customers'].time_index: [0, 1, 2],
'instance_id': [0, 1, 2],
'dummy_col': [True, False, False],
'time': times})
err_msg = 'Cutoff time DataFrame cannot contain both a column named "time" and a column' \
' with the same name as the target entity time index'
with pytest.raises(AttributeError, match=err_msg):
calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time)
def test_training_window(pd_es):
property_feature = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['customers'], primitive=Count)
top_level_agg = ft.Feature(pd_es['customers']['id'], parent_entity=pd_es[u'régions'], primitive=Count)
# make sure features that have a direct to a higher level agg
# so we have multiple "filter eids" in get_pandas_data_slice,
# and we go through the loop to pull data with a training_window param more than once
dagg = DirectFeature(top_level_agg, pd_es['customers'])
# for now, warns if last_time_index not present
times = [datetime(2011, 4, 9, 12, 31),
datetime(2011, 4, 10, 11),
datetime(2011, 4, 10, 13, 10)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 1, 2]})
warn_text = "Using training_window but last_time_index is not set on entity customers"
with pytest.warns(UserWarning, match=warn_text):
feature_matrix = calculate_feature_matrix([property_feature, dagg],
pd_es,
cutoff_time=cutoff_time,
training_window='2 hours')
pd_es.add_last_time_indexes()
error_text = 'Training window cannot be in observations'
with pytest.raises(AssertionError, match=error_text):
feature_matrix = calculate_feature_matrix([property_feature],
pd_es,
cutoff_time=cutoff_time,
training_window=Timedelta(2, 'observations'))
# Case1. include_cutoff_time = True
feature_matrix = calculate_feature_matrix([property_feature, dagg],
pd_es,
cutoff_time=cutoff_time,
training_window='2 hours',
include_cutoff_time=True)
prop_values = [4, 5, 1]
dagg_values = [3, 2, 1]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
# Case2. include_cutoff_time = False
feature_matrix = calculate_feature_matrix([property_feature, dagg],
pd_es,
cutoff_time=cutoff_time,
training_window='2 hours',
include_cutoff_time=False)
prop_values = [5, 5, 2]
dagg_values = [3, 2, 1]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
# Case3. include_cutoff_time = False with single cutoff time value
feature_matrix = calculate_feature_matrix([property_feature, dagg],
pd_es,
cutoff_time=pd.to_datetime("2011-04-09 10:40:00"),
training_window='9 minutes',
include_cutoff_time=False)
prop_values = [0, 4, 0]
dagg_values = [3, 3, 3]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
# Case4. include_cutoff_time = True with single cutoff time value
feature_matrix = calculate_feature_matrix([property_feature, dagg],
pd_es,
cutoff_time=pd.to_datetime("2011-04-10 10:40:00"),
training_window='2 days',
include_cutoff_time=True)
prop_values = [0, 10, 1]
dagg_values = [3, 3, 3]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
def test_training_window_overlap(pd_es):
pd_es.add_last_time_indexes()
count_log = ft.Feature(
base=pd_es['log']['id'],
parent_entity=pd_es['customers'],
primitive=Count,
)
cutoff_time = pd.DataFrame({
'id': [0, 0],
'time': ['2011-04-09 10:30:00', '2011-04-09 10:40:00'],
}).astype({'time': 'datetime64[ns]'})
# Case1. include_cutoff_time = True
actual = calculate_feature_matrix(
features=[count_log],
entityset=pd_es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True,
training_window='10 minutes',
include_cutoff_time=True,
)['COUNT(log)']
np.testing.assert_array_equal(actual.values, [1, 9])
# Case2. include_cutoff_time = False
actual = calculate_feature_matrix(
features=[count_log],
entityset=pd_es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True,
training_window='10 minutes',
include_cutoff_time=False,
)['COUNT(log)']
np.testing.assert_array_equal(actual.values, [0, 9])
def test_include_cutoff_time_without_training_window(es):
es.add_last_time_indexes()
count_log = ft.Feature(
base=es['log']['id'],
parent_entity=es['customers'],
primitive=Count,
)
cutoff_time = pd.DataFrame({
'id': [0, 0],
'time': ['2011-04-09 10:30:00', '2011-04-09 10:31:00'],
}).astype({'time': 'datetime64[ns]'})
# Case1. include_cutoff_time = True
actual = calculate_feature_matrix(
features=[count_log],
entityset=es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True,
include_cutoff_time=True,
)['COUNT(log)']
actual = to_pandas(actual)
np.testing.assert_array_equal(actual.values, [1, 6])
# Case2. include_cutoff_time = False
actual = calculate_feature_matrix(
features=[count_log],
entityset=es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True,
include_cutoff_time=False,
)['COUNT(log)']
actual = to_pandas(actual)
np.testing.assert_array_equal(actual.values, [0, 5])
# Case3. include_cutoff_time = True with single cutoff time value
actual = calculate_feature_matrix(
features=[count_log],
entityset=es,
cutoff_time=pd.to_datetime("2011-04-09 10:31:00"),
instance_ids=[0],
cutoff_time_in_index=True,
include_cutoff_time=True,
)['COUNT(log)']
actual = to_pandas(actual)
np.testing.assert_array_equal(actual.values, [6])
# Case4. include_cutoff_time = False with single cutoff time value
actual = calculate_feature_matrix(
features=[count_log],
entityset=es,
cutoff_time=pd.to_datetime("2011-04-09 10:31:00"),
instance_ids=[0],
cutoff_time_in_index=True,
include_cutoff_time=False,
)['COUNT(log)']
actual = to_pandas(actual)
np.testing.assert_array_equal(actual.values, [5])
def test_approximate_dfeat_of_agg_on_target_include_cutoff_time(pd_es):
agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['sessions'], primitive=Count)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(agg_feat2, pd_es['sessions'])
cutoff_time = pd.DataFrame({'time': [datetime(2011, 4, 9, 10, 31, 19)], 'instance_id': [0]})
feature_matrix = calculate_feature_matrix([dfeat, agg_feat2, agg_feat],
pd_es,
approximate=Timedelta(20, 's'),
cutoff_time=cutoff_time,
include_cutoff_time=False)
# binned cutoff_time will be datetime(2011, 4, 9, 10, 31, 0) and
# log event 5 at datetime(2011, 4, 9, 10, 31, 0) will be
# excluded due to approximate cutoff time point
assert feature_matrix[dfeat.get_name()].tolist() == [5]
assert feature_matrix[agg_feat.get_name()].tolist() == [5]
feature_matrix = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
approximate=Timedelta(20, 's'),
cutoff_time=cutoff_time,
include_cutoff_time=True)
# binned cutoff_time will be datetime(2011, 4, 9, 10, 31, 0) and
# log event 5 at datetime(2011, 4, 9, 10, 31, 0) will be
# included due to approximate cutoff time point
assert feature_matrix[dfeat.get_name()].tolist() == [6]
assert feature_matrix[agg_feat.get_name()].tolist() == [5]
def test_training_window_recent_time_index(pd_es):
# customer with no sessions
row = {
'id': [3],
'age': [73],
u'région_id': ['United States'],
'cohort': [1],
'cancel_reason': ["Lost interest"],
'loves_ice_cream': [True],
'favorite_quote': ["Don't look back. Something might be gaining on you."],
'signup_date': [datetime(2011, 4, 10)],
'upgrade_date': [datetime(2011, 4, 12)],
'cancel_date': [datetime(2011, 5, 13)],
'date_of_birth': [datetime(1938, 2, 1)],
'engagement_level': [2],
}
to_add_df = pd.DataFrame(row)
to_add_df.index = range(3, 4)
# have to convert category to int in order to concat
old_df = pd_es['customers'].df
old_df.index = old_df.index.astype("int")
old_df["id"] = old_df["id"].astype(int)
df = pd.concat([old_df, to_add_df], sort=True)
# convert back after
df.index = df.index.astype("category")
df["id"] = df["id"].astype("category")
pd_es['customers'].update_data(df=df, recalculate_last_time_indexes=False)
pd_es.add_last_time_indexes()
property_feature = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['customers'], primitive=Count)
top_level_agg = ft.Feature(pd_es['customers']['id'], parent_entity=pd_es[u'régions'], primitive=Count)
dagg = DirectFeature(top_level_agg, pd_es['customers'])
instance_ids = [0, 1, 2, 3]
times = [datetime(2011, 4, 9, 12, 31), datetime(2011, 4, 10, 11),
datetime(2011, 4, 10, 13, 10, 1), datetime(2011, 4, 10, 1, 59, 59)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': instance_ids})
# Case1. include_cutoff_time = True
feature_matrix = calculate_feature_matrix(
[property_feature, dagg],
pd_es,
cutoff_time=cutoff_time,
training_window='2 hours',
include_cutoff_time=True,
)
prop_values = [4, 5, 1, 0]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
dagg_values = [3, 2, 1, 3]
feature_matrix.sort_index(inplace=True)
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
# Case2. include_cutoff_time = False
feature_matrix = calculate_feature_matrix(
[property_feature, dagg],
pd_es,
cutoff_time=cutoff_time,
training_window='2 hours',
include_cutoff_time=False,
)
prop_values = [5, 5, 1, 0]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
dagg_values = [3, 2, 1, 3]
feature_matrix.sort_index(inplace=True)
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
# TODO: add test to fail w/ koalas
def test_approximate_fails_dask(dask_es):
agg_feat = ft.Feature(dask_es['log']['id'],
parent_entity=dask_es['sessions'],
primitive=Count)
error_text = "Using approximate is not supported with Dask Entities"
with pytest.raises(ValueError, match=error_text):
calculate_feature_matrix([agg_feat],
dask_es,
approximate=Timedelta(1, 'week'))
def test_approximate_multiple_instances_per_cutoff_time(pd_es):
agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['sessions'], primitive=Count)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(agg_feat2, pd_es['sessions'])
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
feature_matrix = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
approximate=Timedelta(1, 'week'),
cutoff_time=cutoff_time)
assert feature_matrix.shape[0] == 2
assert feature_matrix[agg_feat.get_name()].tolist() == [5, 1]
def test_approximate_with_multiple_paths(pd_diamond_es):
pd_es = pd_diamond_es
path = backward_path(pd_es, ['regions', 'customers', 'transactions'])
agg_feat = ft.AggregationFeature(pd_es['transactions']['id'],
parent_entity=pd_es['regions'],
relationship_path=path,
primitive=Count)
dfeat = DirectFeature(agg_feat, pd_es['customers'])
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
feature_matrix = calculate_feature_matrix([dfeat],
pd_es,
approximate=Timedelta(1, 'week'),
cutoff_time=cutoff_time)
assert feature_matrix[dfeat.get_name()].tolist() == [6, 2]
def test_approximate_dfeat_of_agg_on_target(pd_es):
agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['sessions'], primitive=Count)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(agg_feat2, pd_es['sessions'])
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
feature_matrix = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
instance_ids=[0, 2],
approximate=Timedelta(10, 's'),
cutoff_time=cutoff_time)
assert feature_matrix[dfeat.get_name()].tolist() == [7, 10]
assert feature_matrix[agg_feat.get_name()].tolist() == [5, 1]
def test_approximate_dfeat_of_need_all_values(pd_es):
p = ft.Feature(pd_es['log']['value'], primitive=Percentile)
agg_feat = ft.Feature(p, parent_entity=pd_es['sessions'], primitive=Sum)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(agg_feat2, pd_es['sessions'])
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
feature_matrix = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time_in_index=True,
cutoff_time=cutoff_time)
log_df = pd_es['log'].df
instances = [0, 2]
cutoffs = [pd.Timestamp('2011-04-09 10:31:19'), pd.Timestamp('2011-04-09 11:00:00')]
approxes = [pd.Timestamp('2011-04-09 10:31:10'), pd.Timestamp('2011-04-09 11:00:00')]
true_vals = []
true_vals_approx = []
for instance, cutoff, approx in zip(instances, cutoffs, approxes):
log_data_cutoff = log_df[log_df['datetime'] < cutoff]
log_data_cutoff['percentile'] = log_data_cutoff['value'].rank(pct=True)
true_agg = log_data_cutoff.loc[log_data_cutoff['session_id'] == instance, 'percentile'].fillna(0).sum()
true_vals.append(round(true_agg, 3))
log_data_approx = log_df[log_df['datetime'] < approx]
log_data_approx['percentile'] = log_data_approx['value'].rank(pct=True)
true_agg_approx = log_data_approx.loc[log_data_approx['session_id'].isin([0, 1, 2]), 'percentile'].fillna(0).sum()
true_vals_approx.append(round(true_agg_approx, 3))
lapprox = [round(x, 3) for x in feature_matrix[dfeat.get_name()].tolist()]
test_list = [round(x, 3) for x in feature_matrix[agg_feat.get_name()].tolist()]
assert lapprox == true_vals_approx
assert test_list == true_vals
def test_uses_full_entity_feat_of_approximate(pd_es):
agg_feat = ft.Feature(pd_es['log']['value'], parent_entity=pd_es['sessions'], primitive=Sum)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
agg_feat3 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Max)
dfeat = DirectFeature(agg_feat2, pd_es['sessions'])
dfeat2 = DirectFeature(agg_feat3, pd_es['sessions'])
p = ft.Feature(dfeat, primitive=Percentile)
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
# only dfeat2 should be approximated
# because Percentile needs all values
feature_matrix_only_dfeat2 = calculate_feature_matrix(
[dfeat2],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time_in_index=True,
cutoff_time=cutoff_time)
assert feature_matrix_only_dfeat2[dfeat2.get_name()].tolist() == [50, 50]
feature_matrix_approx = calculate_feature_matrix(
[p, dfeat, dfeat2, agg_feat],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time_in_index=True,
cutoff_time=cutoff_time)
assert feature_matrix_only_dfeat2[dfeat2.get_name()].tolist() == feature_matrix_approx[dfeat2.get_name()].tolist()
feature_matrix_small_approx = calculate_feature_matrix(
[p, dfeat, dfeat2, agg_feat],
pd_es,
approximate=Timedelta(10, 'ms'),
cutoff_time_in_index=True,
cutoff_time=cutoff_time)
feature_matrix_no_approx = calculate_feature_matrix(
[p, dfeat, dfeat2, agg_feat],
pd_es,
cutoff_time_in_index=True,
cutoff_time=cutoff_time)
for f in [p, dfeat, agg_feat]:
for fm1, fm2 in combinations([feature_matrix_approx,
feature_matrix_small_approx,
feature_matrix_no_approx], 2):
assert fm1[f.get_name()].tolist() == fm2[f.get_name()].tolist()
def test_approximate_dfeat_of_dfeat_of_agg_on_target(pd_es):
agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['sessions'], primitive=Count)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(ft.Feature(agg_feat2, pd_es["sessions"]), pd_es['log'])
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time =
|
pd.DataFrame({'time': times, 'instance_id': [0, 2]})
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 22 13:38:01 2020
@author: <NAME>
Produces to the total mass of CO2 flux and new production for different boxes.
Requires:
'datasets/co2/landschutzer_co2/spco2_MPI_SOM-FFN_v2018.nc'
'processed/flux/fratios.nc'
avg_npp_rg_cbpm.nc
datasets/npp_satellite/avg_npp_rg_cafe.nc
xr.open_dataarray('processed/earth_m2.nc
Produces:
processed/results/enso_basin_means.csv
processed/results/carbon_mass.csv
figs/Figure6_basinavg_pG.png
"""
import numpy as np
import pandas as pd
import xarray as xr
import matplotlib.pyplot as plt
from carbon_math import *
import matplotlib
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter,
AutoMinorLocator, ScalarFormatter)
class FixedOrderFormatter(ScalarFormatter):
"""Formats axis ticks using scientific notation with a constant order of
magnitude
https://stackoverflow.com/a/3679918/9965678
"""
def __init__(self, order_of_mag=0, useOffset=True, useMathText=False):
self._order_of_mag = order_of_mag
ScalarFormatter.__init__(self, useOffset=useOffset,
useMathText=useMathText)
def _set_orderOfMagnitude(self, range):
"""Over-riding this to avoid having orderOfMagnitude reset elsewhere"""
self.orderOfMagnitude = self._order_of_mag
def trends(ax,x,y,c='r'):
from scipy.stats import linregress
mean=np.nanmean(y)
std=np.nanstd(y)*1
#x_n=np.arange(0,len(x))
# x1=np.arange(np.datetime64(x[0],'M'),np.datetime64(x[-1],'M')+np.timedelta64(1,'M'))
#x1=trd.index.values.astype('datetime64[D]')
x1=x.astype('datetime64[D]')
x_n=pd.to_numeric(x1)
slope, intercept, r_value, p_value, std_err = linregress(x_n,y)
mn=min(x_n)
mx=max(x_n)
x1=np.linspace(mn,mx,len(x))
y1=slope*x1+intercept
ax.plot(pd.to_datetime(x),y1,':'+c,linewidth=2.5)
#ax.text(x1[-1]-(x1[-1]*0.1),y1[-1]-(y1[-1]*0.1),'R2='+str(np.round(r_value**2,3)))
return slope, intercept, r_value,p_value,std_err
def justtrends(x,y,c='r'):
from scipy.stats import linregress
mean=np.nanmean(y)
std=np.nanstd(y)*1
#x_n=np.arange(0,len(x))
# x1=np.arange(np.datetime64(x[0],'M'),np.datetime64(x[-1],'M')+np.timedelta64(1,'M'))
#x1=trd.index.values.astype('datetime64[D]')
x1=x.astype('datetime64[D]')
x_n=pd.to_numeric(x1)
slope, intercept, r_value, p_value, std_err = linregress(x_n,y)
mn=min(x_n)
mx=max(x_n)
x1=np.linspace(mn,mx,len(x))
y1=slope*x1+intercept
#ax.text(x1[-1]-(x1[-1]*0.1),y1[-1]-(y1[-1]*0.1),'R2='+str(np.round(r_value**2,3)))
return slope, intercept, r_value,p_value,std_err
seamask=xr.open_dataset('processed/seamask.nc') #Because 2020 version doesn't have it.
seamask= seamask.assign_coords(lon=(seamask.lon % 360)).roll(lon=(seamask.dims['lon']),roll_coords=False).sortby('lon')
landsch_fp='datasets/co2/landschutzer_co2/spco2_MPI-SOM_FFN_v2020.nc'
landschutzer=xr.open_dataset(landsch_fp)
landschutzer= landschutzer.assign_coords(lon=(landschutzer.lon % 360)).roll(lon=(landschutzer.dims['lon']),roll_coords=False).sortby('lon') #EPIC 1 line fix for the dateline problem.
land_pac=landschutzer.sel(lon=slice(120,290),lat=slice(-20,20))
#land_pac.to_netcdf('processed/fluxmaps/landshutzer.nc')
land_pac=moles_to_carbon(land_pac.fgco2_smoothed)
#JMA=moles_to_carbon(xr.open_mfdataset('datasets/co2/JMA_co2/jma_flux.nc').flux.sel(lon=slice(120,290),lat=slice(-20,20)))
#yasanaka=moles_to_carbon(xr.open_mfdataset('datasets/co2/yasanaka_co2/Yasunaka_pCO2_flux.nc').flux_masked).sel(lon=slice(120,290),lat=slice(-20,20))
f_ratios=xr.open_mfdataset('processed/flux/fratios.nc')
ratio=f_ratios.laws2000#laws2000,laws2011a,laws2011b,henson2011
npp=(xr.open_dataset('processed/flux/avg_npp_rg_cbpm.nc').avg_npp/1000*365)
npp=(xr.open_dataset('processed/flux/avg_npp_rg_cafe.nc').avg_npp/1000*365)
#grid=xr.open_dataarray('processed/tropics_size_m2.nc')
grid=xr.open_dataarray('processed/earth_m2.nc')
grid['lon']=grid.lon+180
grid=grid.where(seamask.seamask==1)
#fig=plt.figure(figsize=(8,10))
limits=[['West',165,180],
['Central',205,220],
['East',235,250],
#['Basin_check',180,280]]
['Basin',150,270]] #As used in the paper
#['Ishii',135,270]]
#['Borgne',135,270]] #Le Borgne 2002 Warm Pool
#['Wyrtiki',180,270]]
#Borgne is lims=1
#Wyrtiki is both lims=5 and 10
lims=15
mass_table=pd.DataFrame({})
check_lag_corr_x=[]
check_lag_corr_y=[]
#Calculating overall flux rates.
plt.figure(figsize=(13,10))
for i,ty in enumerate(limits):
if i <=2:
ax = plt.subplot(2,3,i+1)
else:
ax=plt.subplot(2,1,2)
startl= ty[1]#120#80#160#135#180#135#180 #not 150
endl=ty[2]#280 #80W #270 #90W
gs=grid.sel(lat=slice(-lims,lims)).sel(lon=slice(startl,endl)).sum()
print('\n'+ty[0])
print('gridsize= '+str(gs.values/1e13))
# if i==0:
# #ax.text()
# plt.text('2000-01-01',1.6e14,'10NS, 165E-180W, '+str(np.round(gs.values/1e13,3))+'x10$^{13}$ m$^2$')
# elif i==1:
# plt.text('2000-01-01',1.6e14,'10NS, 170W-155W, '+str(np.round(gs.values/1e13,3))+'x10$^{13}$ m$^2$')
# elif i==2:
# plt.text('2000-01-01',1.6e14,'10NS, 130W-115W, '+str(np.round(gs.values/1e13,3))+'x10$^{13}$ m$^2$')
# elif i==3:
# plt.text('2007-01-01',trenNP[3]0.5e14,'10NS, 150E-80W, '+str(np.round(gs.values/1e13,3))+'x10$^{13}$ m$^2$')
#Year average of CO2 flux
CO2=(land_pac*grid).sel(lat=slice(-lims,lims)).sel(lon=slice(startl,endl)).sum(dim=['lat','lon'])
#JMC=(JMA*grid).sel(lat=slice(-lims,lims)).sel(lon=slice(startl,endl)).sum(dim=['lat','lon'])
#YAS=(yasanaka*grid).sel(lat=slice(-lims,lims)).sel(lon=slice(startl,endl)).sum(dim=['lat','lon'])
#plt.show()
CO2['time']=CO2['time'].astype('datetime64[M]')
#Year average of CO2 flux
henson=(npp*f_ratios.henson2011*grid).sel(lat=slice(-lims,lims)).sel(lon=slice(startl,endl)).sum(dim=['lat','lon'])
laws2000=(npp*f_ratios.laws2000*grid).sel(lat=slice(-lims,lims)).sel(lon=slice(startl,endl)).sum(dim=['lat','lon'])
laws2011a=(npp*f_ratios.laws2011a*grid).sel(lat=slice(-lims,lims)).sel(lon=slice(startl,endl)).sum(dim=['lat','lon'])
laws2011b=(npp*f_ratios.laws2011b*grid).sel(lat=slice(-lims,lims)).sel(lon=slice(startl,endl)).sum(dim=['lat','lon'])
#dunne=(npp*f_ratios.dunne2005_tpca*grid).sel(lat=slice(-lims,lims)).sel(lon=slice(startl,endl)).sum(dim=['lat','lon'])#.plot(label='Dunne 2005')
dunne=(npp*f_ratios.dunne2005*grid).sel(lat=slice(-lims,lims)).sel(lon=slice(startl,endl)).sum(dim=['lat','lon'])#.plot(label='Dunne 2005')
trim=(npp*f_ratios.trim*grid).sel(lat=slice(-lims,lims)).sel(lon=slice(startl,endl)).sum(dim=['lat','lon'])#.plot(label='Dunne 2005')
#JMC.plot(label='Iida flux',ax=ax)
#YAS.plot(label='Yasanaka flux',ax=ax)
#ax.plot(CO2.time,CO2,c='k')
CO=CO2.sel(time=slice('2000-01-01','2019-12-01'))
trenCO=trends(ax,CO.time.values,CO.values,c='k')
annual_rate_of_changeCO=trenCO[0]*365
#henson.plot(label='Henson',ax=ax)
#laws2011b.plot(label='Laws2011b',ax=ax,c='pink')
mod=laws2011a.sel(time=slice('2000-01-01','2019-12-01'))
trenNP=trends(ax,mod.time.values,mod.values)
annual_rate_of_changeNP=trenNP[0]*365
#(CO2+laws2011a).plot(label='combined',ax=ax,c='m')
if i==3:
trim.plot(label='DeVries and Webber 2017',ax=ax,c='darkorange',linewidth=2)#,linestyle='--')
#laws2011b.plot(label='Laws2011b',ax=ax,c='slategray',linewidth=2)
dunne.sel(time=slice('1997-01-01','2019-07-01')).plot(label='Dunne 2005',ax=ax,c='darkblue')
laws2000.plot(label='Laws 2000',ax=ax,c='green',linestyle='--')
laws2011a.plot(label='Laws 2011a',ax=ax,c='r',linewidth=2.5)
CO2.plot(label='CO$_{2}$ outgassing',ax=ax,c='k',linewidth=2.5)
#Calc trends for the other models #Not actually used just a test.
trim1=trim.sel(time=slice('2000-01-01','2019-12-01'))
dunne1=dunne.sel(time=slice('2000-01-01','2019-12-01'))
laws20001=laws2000.sel(time=slice('2000-01-01','2019-12-01'))
laws2011a1=laws2011a.sel(time=slice('2000-01-01','2019-12-01'))
trenNP1_trim=justtrends(trim1.time.values,trim1.values)[0]*365/1e15
trenNP1_dunne=justtrends(dunne1.time.values,dunne1.values)[0]*365/1e15
trenNP1_laws2000=justtrends(laws20001.time.values,laws20001.values)[0]*365/1e15
trenNP1_laws2011=justtrends(laws2011a1.time.values,laws2011a1.values)[0]*365/1e15
else:
laws2011a.plot(label='Laws 2011a',ax=ax,c='r')
CO2.plot(label='CO$_{2}$ outgassing',ax=ax,c='k')
#henson.plot(label='Henson',ax=ax,c='deeppink')
ax.set_xlim([np.datetime64('1997-06-01'),np.datetime64('2020-01-01')])
#ax.xaxis.grid(True, which='both')
ax.xaxis.set_minor_locator(AutoMinorLocator())
#plt.grid()
ax.set_xlabel('Year')
if i <=2:
#ax.set_ylim([0,0.27*1e15])131
ax.set_ylim([-0.015*1e15,0.25*1e15])
ax.set_title(chr(97+i)+') '+ty[0]+' Pacific',pad=16)
ax.set_ylabel('New production and CO$_{2}$ flux (PgC yr$^{-1}$)')
ax.yaxis.set_major_formatter(FixedOrderFormatter(15))
#y_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
#ax.yaxis.set_major_formatter(y_formatter)
else:
ax.legend(loc='lower center',ncol=5)
ax.set_title(chr(97+i)+') Entire Basin')
ax.set_ylabel('New Production and CO$_{2}$ flux (PgC yr$^{-1}$)')
ax.set_ylim([0,1.6*1e15])
import matplotlib.patches as patches
dat=CO2.to_dataframe(name='CO2')
dat['henson']=henson.to_dataframe(name='henson').henson
dat['laws2011a']=laws2011a.to_dataframe(name='laws2011a').laws2011a
dat['laws2011b']=laws2011b.to_dataframe(name='laws2011b').laws2011b
dat['laws2000']=laws2000.to_dataframe(name='laws2000').laws2000
dat['dunne']=dunne.to_dataframe(name='dunne2005').dunne2005
dat['strim']=trim.to_dataframe(name='simpletrim').simpletrim
# dat['JMC']=JMC.to_dataframe(name='jmc').jmc
#gs=grid.sel(lat=slice(-lims,lims)).sel(lon=slice(startl,endl)).sum()
#print('gridsize= '+str(gs.values/1e14))
print('CO2: '+str(dat.CO2.mean()/1e15))
print('CO2 STD: '+str(dat.CO2.std()/1e15))
print('henson: '+str(dat.henson.mean()/1e15))
print('laws2011a: '+str(dat.laws2011a.mean()/1e15))
print('laws2011a STD: '+str(dat.laws2011a.std()/1e15))
print('laws2011b: '+str(dat.laws2011b.mean()/1e15))
print('laws2000: '+str(dat.laws2000.mean()/1e15))
print('Dunne: '+str(dat.dunne.mean()/1e15))
#print('JMC: '+str(dat.JMC.mean()/1e15))
print('NP Trend: '+ str(annual_rate_of_changeNP/1e15)+' ' +u"\u00B1 "+str((trenNP[4]*365)/1e15) +' PgC/yr/yr')
print('pval= '+str(trenNP[3]))
print('CO2 Trend: '+str(annual_rate_of_changeCO/1e15)+' ' +u"\u00B1 " +str((trenCO[4]*365)/1e15)+' PgC/yr/yr')
print('pval= '+str(trenCO[3]))
dat=dat[dat.index>np.datetime64('1997-08')]
lanina=pd.read_csv('processed/indexes/la_nina_events.csv')
cp_nino=pd.read_csv('processed/indexes/cp_events.csv')
#cpc.to_csv('processed/indexes/cold_cp_events.csv')
ep_nino=pd.read_csv('processed/indexes/ep_events.csv')
info=dat
ninaf=pd.DataFrame()
epf=pd.DataFrame()
cpf=pd.DataFrame()
for i in lanina.iterrows(): ninaf=ninaf.append(info[slice(i[1].start,i[1].end)])
for i in ep_nino.iterrows(): epf=epf.append(info[slice(i[1].start,i[1].end)])
for i in cp_nino.iterrows(): cpf=cpf.append(info[slice(i[1].start,i[1].end)])
nina_dates=ninaf.index
ep_dates=epf.index
cp_dates=cpf.index
ensofps=['processed/indexes/ep_events.csv','processed/indexes/la_nina_events.csv','processed/indexes/cp_events.csv']
for whichenso,fp in enumerate(ensofps):
events=
|
pd.read_csv(fp)
|
pandas.read_csv
|
import json
import argparse
import logging
import os
import csv
from multiprocessing import Pool
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import rankdata
from milieu.data.network import Network
from milieu.data.associations import load_diseases
from milieu.paper.figures.figure import Figure
from milieu.util.util import set_logger, parse_id_rank_pair, prepare_sns
class RecallCurve(Figure):
"""
Base class for all disease protein prediction methods.
"""
def __init__(self, dir, params):
""" Initialize the
Args:
dir (string) The directory where the experiment should be run
params (dict)
"""
super().__init__(dir, params)
self._load_data()
prepare_sns(sns, self.params)
logging.info("Recall Curve")
logging.info("<NAME> -- SNAP Group")
logging.info("======================================")
def _run(self):
"""
"""
self._generate()
def _load_data(self):
"""
"""
logging.info("Loading Disease Associations...")
self.diseases_dict = load_diseases(self.params["associations_path"])
def _generate_recall_curve(self, ranks_path):
"""
"""
count = 0
recall_curve_sum = np.zeros(self.params["length"])
with open(ranks_path, 'r') as ranks_file:
rank_reader = csv.reader(ranks_file)
for i, row in enumerate(rank_reader):
if i == 0:
continue
if (("associations_threshold" in self.params) and self.params["associations_threshold"] > len(row) - 2):
continue
if (("splits" in self.params) and self.diseases_dict[row[0]].split not in self.params["splits"]):
continue
if self.diseases_dict[row[0]].split == "none":
continue
count += 1
ranks = [parse_id_rank_pair(rank_str)[1] for rank_str in row[2:]]
ranks = np.array(ranks).astype(int)
rank_bin_count = np.bincount(ranks)
recall_curve = 1.0 * np.cumsum(rank_bin_count) / len(ranks)
if len(recall_curve) < self.params["length"]:
recall_curve = np.pad(recall_curve,
(0, self.params["length"] - len(recall_curve)),
'edge')
recall_curve_sum += recall_curve[:self.params["length"]]
recall_curve = (recall_curve_sum / (count))
return recall_curve
def _generate(self):
"""
"""
count = 0
recall_curves = {}
for name, method_exp_dir in self.params["method_exp_dirs"].items():
logging.info(name)
if os.path.isdir(os.path.join(method_exp_dir, 'run_0')):
# if there are multiple runs of the experiment consider all
data = []
runs = []
for dir_name in os.listdir(method_exp_dir):
if dir_name[:3] != "run":
continue
path = os.path.join(method_exp_dir, dir_name, 'ranks.csv')
run = self._generate_recall_curve(path)
runs.append(run)
for threshold, recall in enumerate(run):
data.append((recall, threshold))
data =
|
pd.DataFrame(data=data, columns=["recall", "threshold"])
|
pandas.DataFrame
|
import pickle
import pandas as pd
from sklearn.externals import joblib
from sklearn import preprocessing
clf = joblib.load("train_model.m")
data_test = pd.read_csv("cleaning_test.csv")
df = pd.read_csv("data/test.csv")
f_names = ['OverallQual', 'GrLivArea', 'TotalBsmtSF', 'GarageArea', '1stFlrSF', 'FullBath', 'TotRmsAbvGrd',
'YearRemodAdd', 'YearBuilt', 'CentralAir', 'Neighborhood', 'RoofMatl', 'HouseStyle', 'KitchenQual',
'SaleCondition', 'SaleType']
for key in f_names:
data_test[key].fillna(data_test[key].mode()[0], inplace=True)
# 读取模型参数,对测试进行再编码
x = data_test.values
y_te_pred = clf.predict(x)
y_scaler = joblib.load('scalarY')
prediction = pd.DataFrame(y_te_pred, columns=['SalePrice'])
p = y_scaler.inverse_transform(prediction)
result = pd.concat([df['Id'],
|
pd.DataFrame(p, columns=['SalePrice'])
|
pandas.DataFrame
|
import numpy as np
import random
import pandas as pd
param_grid = {
'patience': list(range(20, 21)),
'lr': list(np.logspace(np.log10(0.0005), np.log10(0.1), base=10, num=100)),
'lr_decay': list(np.linspace(0.6, 1, num=8)),
'weight_decay': [5e-6, 5e-5, 1e-5, 5e-4, 1e-4, 5e-3, 1e-3],
'drop_out': [0.5, 0.6, 0.7, 0.8, 0.9],
'batch_size': [64],
'hidden_dimension': [128]
}
params_20ng = {
'patience': [-1],
'lr': [0.0005, 0.0001],
'lr_decay': [1],
'weight_decay': [0],
'drop_out': [0.5],
'hidden_dimension': [256, 512],
'batch_size': [128, 256]
}
params_aclImdb = {
'patience': [-1],
'lr': [0.0001, 0.0005],
'lr_decay': [1],
'weight_decay': [0],
'drop_out': [0.5],
'hidden_dimension': [256, 512],
'batch_size': [128, 256]
}
params_ohsumed = {
'patience': [-1],
'lr': [0.001],
'lr_decay': [1],
'weight_decay': [0],
'drop_out': [0.5],
'hidden_dimension': [256, 512],
'batch_size': [128, 256]
}
params_R52 = {
'patience': [-1],
'lr': [0.001, 0.0005],
'lr_decay': [1],
'weight_decay': [0],
'drop_out': [0.5],
'hidden_dimension': [256, 512],
'batch_size': [128, 256]
}
params_R8 = {
'patience': [-1],
'lr': [0.001, 0.0005],
'lr_decay': [1],
'weight_decay': [0],
'drop_out': [0.5],
'hidden_dimension': [96, 128],
'batch_size': [64, 128]
}
params_mr = {
'patience': [-1],
'lr': [0.001, 0.0005],
'lr_decay': [1],
'weight_decay': [0],
'drop_out': [0.5],
'hidden_dimension': [96, 128],
'batch_size': [64, 128]
}
# def save_parameters():
# '''
# random search
# :return:
# '''
# MAX_EVALS = 10
# dfs = []
# for tune_id in range(MAX_EVALS):
# np.random.seed(tune_id)
# hps = {k: random.sample(v, 1) for k, v in param_grid_for_docs.items()}
# dfs.append(pd.DataFrame.from_dict(hps))
# dfs = pd.concat(dfs).reset_index(drop=True)
# dfs.to_csv('parameters_for_tuning_docs_new', sep='\t', index=False)
# print(dfs)
from sklearn.model_selection import ParameterGrid
def save_parameters():
'''
grid search
:return:
'''
dataset = 'ohsumed'
dfs = []
grids = list(ParameterGrid(params_ohsumed))
for grid in grids:
print(pd.DataFrame.from_dict(grid, orient='index').T)
dfs.append(pd.DataFrame.from_dict(grid, orient='index').T)
dfs =
|
pd.concat(dfs)
|
pandas.concat
|
import warnings
warnings.filterwarnings("ignore")
from flask import Flask
from flask import render_template, request, jsonify
from plotly.graph_objs import Bar
from sklearn.externals import joblib
from sqlalchemy import create_engine
import json
import plotly
import pandas as pd
import numpy as np
import xgboost as xgb
from catboost import CatBoostRegressor
import lightgbm as lgb
from pandas_datareader import data
import datetime
app=Flask(__name__)
# load model
model = joblib.load("models/regressor.pkl")
def technical_indicators(df):
"""
Technical Indicator Calculator Function.
This Function's Output Is A Pandas DataFrame Of Various Techincal Indicators Such As RSI,SMA,EVM,EWMA
BB And ROC Using Different Time Intervals.
Parameters:
df (DataFrame) : Pandas DataFrame Of Stock Price
Returns:
new_df (DataFrame) : Pandas DataFrame Of Techincal Indicators
"""
new_df = pd.DataFrame()
dm = ((df['High'] + df['Low'])/2) - ((df['High'].shift(1) + df['Low'].shift(1))/2)
br = (df['Volume'] / 100000000) / ((df['High'] - df['Low']))
EVM = dm / br
new_df['EVM_15'] = EVM.rolling(15).mean()
sma_60 = pd.Series.rolling(df['Close'], window=60, center=False).mean()
new_df["SMA_60"] = sma_60
sma_200 = pd.Series.rolling(df['Close'], window=30, center=False).mean()
new_df["SMA_200"] = sma_200
ewma_50 = df['Close'].ewm(span = 50, min_periods = 50 - 1).mean()
new_df["EWMA_50"] = ewma_50
ewma_200 = df['Close'].ewm(span = 200, min_periods = 200 - 1).mean()
new_df["EWMA_200"] = ewma_200
sma_5 = pd.Series.rolling(df['Close'], window=5, center=False).mean()
std_5 = pd.Series.rolling(df['Close'], window=5, center=False).std()
bb_5_upper = sma_5 + (2 * std_5)
bb_5_lower = sma_5 - (2 * std_5)
new_df["BB_5_UPPER"] = bb_5_upper
new_df["BB_5_LOWER"] = bb_5_lower
new_df["SMA_5"] = sma_5
sma_10 = pd.Series.rolling(df['Close'], window=10, center=False).mean()
std_10 = pd.Series.rolling(df['Close'], window=10, center=False).std()
bb_10_upper = sma_10 + (2 * std_10)
bb_10_lower = sma_10 - (2 * std_10)
new_df["BB_10_UPPER"] = bb_10_upper
new_df["BB_10_LOWER"] = bb_10_lower
new_df["SMA_10"] = sma_10
sma_20 = pd.Series.rolling(df['Close'], window=20, center=False).mean()
std_20 = pd.Series.rolling(df['Close'], window=20, center=False).std()
bb_20_upper = sma_20 + (2 * std_20)
bb_20_lower = sma_20 - (2 * std_20)
new_df["BB_20_UPPER"] = bb_20_upper
new_df["BB_20_LOWER"] = bb_20_lower
new_df["SMA_20"] = sma_20
roc_5 = df['Close'][5:]/df['Close'][:-5].values - 1
new_df["ROC_5"] = roc_5
roc_10 = df['Close'][10:]/df['Close'][:-10].values - 1
new_df["ROC_10"] = roc_10
roc_20 = df['Close'][20:]/df['Close'][:-20].values - 1
new_df["ROC_20"] = roc_20
delta = df['Close'].diff()
up, down = delta.copy(), delta.copy()
up[up < 0] = 0
down[down > 0] = 0
up_5 =
|
pd.Series.rolling(up, window=5, center=False)
|
pandas.Series.rolling
|
import pytest
from datetime import datetime
import pandas as pd
from tadpole_algorithms.transformations import convert_to_year_month, \
convert_to_year_month_day, map_string_diagnosis
def test_forecastDf_date_conversion():
forecastDf = pd.DataFrame([{'Forecast Date': '2019-07'}])
assert pd.api.types.is_string_dtype(forecastDf.dtypes)
# original conversion code
forecastDf['Forecast Date'] = [datetime.strptime(x, '%Y-%m') for x in forecastDf['Forecast Date']] # considers every month estimate to be the actual first day 2017-01
print(forecastDf.dtypes)
assert
|
pd.api.types.is_datetime64_ns_dtype(forecastDf['Forecast Date'])
|
pandas.api.types.is_datetime64_ns_dtype
|
from pathsetup import run_path_setup
run_path_setup()
import os
import gl
gl.isTrain = False
from model_config import model_argparse
config = model_argparse()
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = config['device']
import tensorflow as tf
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
sess = tf.Session(config=tf_config)
import numpy as np
import pandas as pd
import utils
from ved import VEDModel
from sklearn.model_selection import train_test_split
np.random.seed(1337)
if config['dataset'] == 'daily':
train_data = pd.read_csv(config['data_dir'] + 'DailyDial/de_duplicated/df_daily_train.csv')
val_data =
|
pd.read_csv(config['data_dir'] + 'DailyDial/de_duplicated/df_daily_valid_without_duplicates.csv')
|
pandas.read_csv
|
############### Results Tables ###############
import pandas as pd
import numpy as np
import os
# set path to media-bias-prediction repository
repo_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.chdir(os.path.join(repo_path, 'deep_learning_models'))
file_names = ['full_without_wrongly_labeled', 'aggregators_removed', 'tabloids_removed',
'duplicates_removed',
'aggregators_tabloids_duplicates_removed']
### Applied datasets results
wanted_results = []
for name in file_names:
score_list = []
for i in range(3):
scores = pd.read_csv(os.path.join('scores', f'metric_scores_allsides_{name}_rerun_{i+1}.csv')).iloc[-1,:]
score_list.append(scores)
wanted_results += score_list
applied_datasets_each_run = pd.DataFrame(wanted_results).drop(columns=['epoch', 'time', 'train_precision', 'train_recall',
'val_precision', 'val_recall', 'test_precision',
'test_recall', 'train_loss', 'val_loss', 'test_loss',
'memory']).__round__(4)
standard_deviations = np.zeros((5,9))
for i in range(0,13,3):
std_array = np.std(applied_datasets_each_run.iloc[i:i+3,:],axis=0, ddof=1)
standard_deviations[int(i/3),:] = std_array.round(4)
applied_datasets_std = pd.DataFrame(standard_deviations, columns=applied_datasets_each_run.columns)
# move latex function from back to beginning if needed
#latex_output_fct(applied_datasets_std)
average_results = []
for i in range(0,len(wanted_results),3):
average_results.append((wanted_results[i+0]+wanted_results[i+1]+wanted_results[i+2])/3)
average_results_df = pd.DataFrame(average_results, index=file_names).__round__(4)
final_results = average_results_df.drop(columns=['epoch', 'time', 'train_precision', 'train_recall',
'val_precision', 'val_recall', 'test_precision',
'test_recall', 'train_loss', 'val_loss', 'test_loss',
'memory'])
### SHA-BiLSTM benchmark scores
dl_score_list = []
for i in range(3):
scores = pd.read_csv(os.path.join('dl_benchmark_scores', f'metric_scores_dl_benchmark_allsides_all_removed_rerun_{i+1}.csv')).iloc[-1,:]
dl_score_list.append(scores)
dl_score_df = pd.DataFrame(dl_score_list).drop(columns=['epoch', 'time', 'train_precision', 'train_recall',
'val_precision', 'val_recall', 'test_precision',
'test_recall', 'train_loss', 'val_loss', 'test_loss',
'memory'])
dl_standard_deviations = np.std(dl_score_df,axis=0).round(4)
dl_average_results = np.mean(dl_score_df, axis=0).__round__(4)
#latex_output_fct(dl_standard_deviations)
### Benchmark time and memory results
# Bert
bert_time_memory_df = pd.DataFrame(columns=['time','memory'])
for i in range(3):
temp_df = pd.read_csv(os.path.join('scores', f'metric_scores_allsides_aggregators_tabloids_duplicates_removed_rerun_{i+1}.csv'))[['time','memory']]
bert_time_memory_df = pd.concat([bert_time_memory_df,temp_df],)
bert_avg_time = round(np.sum(bert_time_memory_df['time'])/3,2)
bert_max_memory = np.max(bert_time_memory_df['memory'])
# SHA-BiLSTM
# batch=64
bilstm_time_memory_df = pd.DataFrame(columns=['time','memory'])
for i in range(3):
temp_df = pd.read_csv(os.path.join('dl_benchmark_scores', f'metric_scores_dl_benchmark_allsides_all_removed_rerun_{i+1}.csv'))[['time','memory']]
bilstm_time_memory_df = pd.concat([bilstm_time_memory_df,temp_df],)
bilstm_avg_time = round(np.sum(bilstm_time_memory_df['time'])/3,2)
bilstm_max_memory = np.max(bilstm_time_memory_df['memory'])
# batch=16
bilstm16_df = pd.read_csv(os.path.join('dl_benchmark_scores', f'metric_scores_dl_benchmark_allsides_batch_16_all_removed_rerun_1.csv'))[['time','memory']]
bilstm16_avg_time = round(np.sum(bilstm16_df['time']),2) # /3
bilstm16_max_memory = np.max(bilstm16_df['memory'])
### Cost sensitive results
cost_sensitive_score_list = []
for i in range(3):
scores = pd.read_csv(os.path.join('scores', f'metric_scores_allsides_cost_sensitive_all_removed_rerun_{i+1}.csv')).iloc[-1,:]
cost_sensitive_score_list.append(scores)
cost_sensitive_score_df = pd.DataFrame(cost_sensitive_score_list).drop(columns=['epoch', 'time', 'train_precision', 'train_recall',
'val_precision', 'val_recall', 'test_precision',
'test_recall', 'train_loss', 'val_loss', 'test_loss',
'memory'])
cost_sensitive_standard_deviations = np.std(cost_sensitive_score_df, axis=0).round(4)
cost_sensitive_average_results = np.mean(cost_sensitive_score_df,axis=0).round(4)
#latex_output_fct(cost_sensitive_standard_deviations)
### Excluded sources results
excluded_sources_results = []
excluded_sources_std = []
for group,sources_in_training in zip(['small', 'small', 'large', 'large'],
['with_sources', 'without_sources','with_sources', 'without_sources']):
results_per_category_list = []
for run in range(1,4):
single_run_df = pd.read_csv(os.path.join('scores', f'accuracy_scores_{group}_{sources_in_training}_run_{run}.csv')).iloc[0,:]
results_per_category_list.append(single_run_df)
results_per_category_df = pd.DataFrame(results_per_category_list)
average_per_category = np.mean(results_per_category_df, axis=0)
std_per_category = np.std(results_per_category_df, axis=0)
excluded_sources_results.append(average_per_category)
excluded_sources_std.append(std_per_category)
excluded_sources_small = pd.DataFrame(excluded_sources_results[:2]).__round__(4)
excluded_sources_small
excluded_sources_large = pd.DataFrame(excluded_sources_results[2:]).__round__(4)
excluded_sources_large
excluded_sources_small_std = pd.DataFrame(excluded_sources_std[:2]).__round__(4)
excluded_sources_large_std =
|
pd.DataFrame(excluded_sources_std[2:])
|
pandas.DataFrame
|
from functools import partial
import json
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pytest
from solarforecastarbiter.io import utils
# data for test Dataframe
TEST_DICT = {'value': [2.0, 43.9, 338.0, -199.7, 0.32],
'quality_flag': [1, 1, 9, 5, 2]}
DF_INDEX = pd.date_range(start=pd.Timestamp('2019-01-24T00:00'),
freq='1min',
periods=5,
tz='UTC', name='timestamp')
DF_INDEX.freq = None
TEST_DATA = pd.DataFrame(TEST_DICT, index=DF_INDEX)
EMPTY_SERIES = pd.Series(dtype=float)
EMPTY_TIMESERIES = pd.Series([], name='value', index=pd.DatetimeIndex(
[], name='timestamp', tz='UTC'), dtype=float)
EMPTY_DATAFRAME = pd.DataFrame(dtype=float)
EMPTY_TIME_DATAFRAME = pd.DataFrame([], index=pd.DatetimeIndex(
[], name='timestamp', tz='UTC'), dtype=float)
TEST_DATAFRAME = pd.DataFrame({
'25.0': [0.0, 1, 2, 3, 4, 5],
'50.0': [1.0, 2, 3, 4, 5, 6],
'75.0': [2.0, 3, 4, 5, 6, 7]},
index=pd.date_range(start='20190101T0600',
end='20190101T1100',
freq='1h',
tz='America/Denver',
name='timestamp')).tz_convert('UTC')
@pytest.mark.parametrize('dump_quality,default_flag,flag_value', [
(False, None, 1),
(True, 2, 2)
])
def test_obs_df_to_json(dump_quality, default_flag, flag_value):
td = TEST_DATA.copy()
if dump_quality:
del td['quality_flag']
converted = utils.observation_df_to_json_payload(td, default_flag)
converted_dict = json.loads(converted)
assert 'values' in converted_dict
values = converted_dict['values']
assert len(values) == 5
assert values[0]['timestamp'] == '2019-01-24T00:00:00Z'
assert values[0]['quality_flag'] == flag_value
assert isinstance(values[0]['value'], float)
def test_obs_df_to_json_no_quality():
td = TEST_DATA.copy()
del td['quality_flag']
with pytest.raises(KeyError):
utils.observation_df_to_json_payload(td)
def test_obs_df_to_json_no_values():
td = TEST_DATA.copy().rename(columns={'value': 'val1'})
with pytest.raises(KeyError):
utils.observation_df_to_json_payload(td)
def test_forecast_series_to_json():
series = pd.Series([0, 1, 2, 3, 4], index=pd.date_range(
start='2019-01-01T12:00Z', freq='5min', periods=5))
expected = [{'value': 0.0, 'timestamp': '2019-01-01T12:00:00Z'},
{'value': 1.0, 'timestamp': '2019-01-01T12:05:00Z'},
{'value': 2.0, 'timestamp': '2019-01-01T12:10:00Z'},
{'value': 3.0, 'timestamp': '2019-01-01T12:15:00Z'},
{'value': 4.0, 'timestamp': '2019-01-01T12:20:00Z'}]
json_out = utils.forecast_object_to_json(series)
assert json.loads(json_out)['values'] == expected
def test_json_payload_to_observation_df(observation_values,
observation_values_text):
out = utils.json_payload_to_observation_df(
json.loads(observation_values_text))
pdt.assert_frame_equal(out, observation_values)
def test_json_payload_to_forecast_series(forecast_values,
forecast_values_text):
out = utils.json_payload_to_forecast_series(
json.loads(forecast_values_text))
pdt.assert_series_equal(out, forecast_values)
def test_empty_payload_to_obsevation_df():
out = utils.json_payload_to_observation_df({'values': []})
assert set(out.columns) == {'value', 'quality_flag'}
assert isinstance(out.index, pd.DatetimeIndex)
def test_empty_payload_to_forecast_series():
out = utils.json_payload_to_forecast_series({'values': []})
assert isinstance(out.index, pd.DatetimeIndex)
def test_null_json_payload_to_observation_df():
observation_values_text = b"""
{
"_links": {
"metadata": ""
},
"observation_id": "OBSID",
"values": [
{
"quality_flag": 1,
"timestamp": "2019-01-01T12:00:00-0700",
"value": null
},
{
"quality_flag": 1,
"timestamp": "2019-01-01T12:05:00-0700",
"value": null
}
]
}"""
ind = pd.DatetimeIndex([
pd.Timestamp("2019-01-01T19:00:00Z"),
pd.Timestamp("2019-01-01T19:05:00Z")
], name='timestamp')
observation_values = pd.DataFrame({
'value': pd.Series([None, None], index=ind, dtype=float),
'quality_flag': pd.Series([1, 1], index=ind)
})
out = utils.json_payload_to_observation_df(
json.loads(observation_values_text))
pdt.assert_frame_equal(out, observation_values)
def test_null_json_payload_to_forecast_series():
forecast_values_text = b"""
{
"_links": {
"metadata": ""
},
"forecast_id": "OBSID",
"values": [
{
"timestamp": "2019-01-01T12:00:00-0700",
"value": null
},
{
"timestamp": "2019-01-01T12:05:00-0700",
"value": null
}
]
}"""
ind = pd.DatetimeIndex([
pd.Timestamp("2019-01-01T19:00:00Z"),
pd.Timestamp("2019-01-01T19:05:00Z")
], name='timestamp')
forecast_values = pd.Series([None, None], index=ind, dtype=float,
name='value')
out = utils.json_payload_to_forecast_series(
json.loads(forecast_values_text))
pdt.assert_series_equal(out, forecast_values)
@pytest.mark.parametrize('label,exp,start,end', [
('instant', TEST_DATA, None, None),
(None, TEST_DATA, None, None),
('ending', TEST_DATA.iloc[1:], None, None),
('beginning', TEST_DATA.iloc[:-1], None, None),
pytest.param('er', TEST_DATA, None, None,
marks=pytest.mark.xfail(raises=ValueError)),
# start/end outside data
('ending', TEST_DATA, pd.Timestamp('20190123T2300Z'), None),
('beginning', TEST_DATA, None, pd.Timestamp('20190124T0100Z')),
# more limited
('ending', TEST_DATA.iloc[2:],
|
pd.Timestamp('20190124T0001Z')
|
pandas.Timestamp
|
import json
from pyspark.sql import SQLContext
sqlContext = SQLContext(sc)
import pandas as pd
import pandas
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
import os
import glob
mydir="/home/cloudera/streamData/output*"
#Running the program continuously
while(True):
# mydir="file:/home/cloudera/streamData/output*"
output_files = [file for file in glob.glob(os.path.join(mydir, 'part-*'))]
output_files.sort(key=os.path.getmtime,reverse=True)
#Reading the most rececntly modified file while plotting
myrdd1 = sc.wholeTextFiles('file:'+output_files[0])
print('file:'+output_files[0])
#Converting to spark DataFrame
dataDF=myrdd1.toDF()
dataDF=dataDF.toPandas()
dataDF.columns=["filename","cities"]
#Filtering the empty cities
dataDF=dataDF[dataDF['cities']!=""]
dataDF=dataDF[dataDF['filename']!=""]
if not dataDF.empty:
newDF=dataDF
if newDF.shape[0]!=0:
location=pd.DataFrame(newDF['cities'][0].split('\n'))
location.columns=['cities']
location["cities"]=location["cities"].astype(str)
splitDF=location['cities'].apply(lambda x: pd.Series(x.split(',')))
splitDF.columns=['cities','counts']
splitDF['counts']=splitDF['counts'].map(lambda x: str(x).replace(')',''))
splitDF=splitDF[splitDF["cities"]!='nan']
splitDF=splitDF[splitDF["counts"]!='nan']
splitDF['counts']=
|
pd.to_numeric(splitDF['counts'])
|
pandas.to_numeric
|
# ClinVarome annotation functions
# Gather all genes annotations : gene, gene_id,
# (AF, FAF,) diseases, clinical features, mecanismes counts, nhomalt.
# Give score for genes according their confidence criteria
# Commented code is the lines needed to make the AgglomerativeClustering
import pandas as pd
import numpy as np
import pysam
from scipy.stats import poisson
# from sklearn.preprocessing import QuantileTransformer
# from sklearn.cluster import AgglomerativeClustering
from clinvarome.utils.dictionary import (
EFFECTS,
MC_CATEGORIES,
MC_SHORT,
# ARRAY_TRANSFORM,
# CLUSTER_NAMES,
)
import logging
# For logs
def get_logger(scope: str, level=logging.DEBUG):
"""
get_logger
"""
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=level
)
return logging.getLogger(scope)
logger = get_logger(__name__)
# Clinical features
def gather_clinical_features(record, gene_finding, gene_disease):
"""
update gene_finding and gene_disease dictionary using information from a VCF record
"""
geneinfo = record.info["GENEINFO"].split("|")[0].split(":")[0]
if "CLNDISEASE" in record.info:
clndisease = record.info["CLNDISEASE"][0].split("|")
gene_disease.setdefault(geneinfo, [])
gene_disease[geneinfo].append(clndisease)
if "CLNFINDING" in record.info:
clnfinding = record.info["CLNFINDING"][0].split("|")
gene_finding.setdefault(geneinfo, [])
gene_finding[geneinfo].append(clnfinding)
def get_clinical_dataframe(gene_disease, gene_finding):
"""
Process dictionary output from gather_clinical_features function
into a dataframe
"""
for key, value in gene_disease.items():
flat_list = [j for i in value for j in i]
gene_disease[key] = ";".join(sorted(list(set(flat_list))))
gene_disease_df = pd.DataFrame(
gene_disease.items(), columns=["gene_info", "clinical_disease"]
)
for key, value in gene_finding.items():
flat_list = [j for i in value for j in i]
gene_finding[key] = ";".join(sorted(list(set(flat_list))))
gene_finding_df = pd.DataFrame(
gene_finding.items(), columns=["gene_info", "clinical_finding"]
)
gene_features = gene_disease_df.merge(gene_finding_df, how="outer")
return gene_features
# FAF
def calcul_max_AF(AC, AN):
"""
For a given AC and AN, calcul the maximum AF: the
upper bound of the Poisson 95 % CI.
"""
if (AC == 0) and (AN != 0):
max_AF_pois = 1 / AN
elif (AC != 0) and (AN != 0):
max_AC_pois = poisson.ppf(0.95, AC)
max_AF_pois = float(max_AC_pois / AN)
else:
max_AF_pois = 0
return max_AF_pois
def gather_dict_gene_max_AF(record, gene_AF_pois_dict):
"""
Update the maximum FAF of a gene using information in a VCF record
"""
ls_AC = []
ls_AN = []
ls_AF_pois = []
geneinfo = record.info["GENEINFO"].split("|")[0].split(":")[0]
gene_AF_pois_dict.setdefault(geneinfo, [])
if "AC_afr" in record.info:
AC_afr = record.info["AC_afr"]
AC_amr = record.info["AC_amr"]
AC_nfe = record.info["AC_nfe"]
AC_eas = record.info["AC_eas"]
AN_afr = record.info["AN_afr"]
AN_amr = record.info["AN_amr"]
AN_nfe = record.info["AN_nfe"]
AN_eas = record.info["AN_eas"]
ls_AC = [AC_afr, AC_amr, AC_nfe, AC_eas]
ls_AN = [AN_afr, AN_amr, AN_nfe, AN_eas]
for k in range(0, len(ls_AC)):
ls_AF_pois.append(calcul_max_AF(ls_AC[k], ls_AN[k]))
max_af_pois = max(ls_AF_pois)
gene_AF_pois_dict[geneinfo].append(max_af_pois)
else:
gene_AF_pois_dict[geneinfo].append(0)
def get_AF_max_by_gene(gene_AF_dict):
"""For a given gene, return the maximum FAF (among its variants)
and get a dataframe."""
gene_AF_max = {}
for key, values in gene_AF_dict.items():
gene_max_AF = max(values)
gene_AF_max.setdefault(key, [])
gene_AF_max[key].append(gene_max_AF)
print(gene_AF_max)
gene_anno_pois = pd.DataFrame.from_dict(
gene_AF_max, orient="index", columns=["FAF"]
)
gene_anno_pois = gene_anno_pois.reset_index()
gene_anno_pois = gene_anno_pois.rename(columns={"index": "gene_info"})
print(gene_anno_pois)
return gene_anno_pois
# Molecular consequence counts
def mol_consequences_by_variant(record, gene_var_dict):
"""
Parse molecular consequences (mc) available for a variant and
return the highest predicted effect
"""
geneinfo = record.info["GENEINFO"].split("|")[0].split(":")[0]
gene_var_dict.setdefault(geneinfo, [])
if "MC" in record.info:
mc = record.info["MC"]
mc_only = [i.split("|")[1] for i in mc]
min_value = min([v for k, v in EFFECTS.items() if k in mc_only])
for key, value in EFFECTS.items():
if min_value == value:
gene_var_dict[geneinfo].append(MC_CATEGORIES[key])
break
else:
gene_var_dict[geneinfo].append("Not_provided")
def count_type_mol_consequences(gene_var_dict):
"""
Count occurence of molecular consequence (mc)from pathogenic
variant for each gene
"""
gene_mc_count = {}
for key, values in gene_var_dict.items():
list_mc = []
for k in MC_SHORT.keys():
if k in values:
count = values.count(k)
list_mc.append([count, k])
gene_mc_count.setdefault(key, [])
gene_mc_count[key].append(list_mc)
return gene_mc_count
def get_mol_consequences_dataframe(gene_var_dict):
"""
Format molecular consequences occurences (mc) by gene dictionary into dataframe.
"""
gene_mc_count = count_type_mol_consequences(gene_var_dict)
df_tot =
|
pd.DataFrame()
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import pytest
import unittest
import datetime
import sys
import context
from fastbt.utils import *
def equation(a,b,c,x,y):
return a*x**2 + b*y + c
def test_multiargs_simple():
seq = pd.Series([equation(1,2,3,4,y) for y in range(20, 30)]).sort_index()
seq.index = range(20,30)
constants = {'a':1, 'b':2, 'c':3, 'x':4}
variables = {'y': range(20, 30)}
par = multi_args(equation, constants=constants, variables=variables).sort_index()
# Check both values and indexes
for x,y in zip(seq, par):
assert x == y
for x,y in zip (seq.index, par.index):
assert (x,) == y
def test_multiargs_product():
seq = []
for x in range(0,10):
for y in range(10,15):
seq.append(equation(1,2,3,x,y))
index = pd.MultiIndex.from_product([range(0, 10), range(10, 15)])
seq = pd.Series(seq)
seq.index = index
seq = seq.sort_index()
constants = {'a':1, 'b':2, 'c':3}
variables = {'x': range(0, 10), 'y': range(10,15)}
par = multi_args(equation, constants=constants,
variables=variables, isProduct=True).sort_index()
# Check both values and indexes
for x,y in zip(seq, par):
assert x == y
for x,y in zip (seq.index, par.index):
assert x == y
def test_multiargs_max_limit():
seq = []
for x in range(0,100):
for y in range(100, 150):
seq.append(equation(1,2,3,x,y))
index = pd.MultiIndex.from_product([range(0, 100), range(100, 150)])
seq = pd.Series(seq)
seq.index = index
seq = seq.sort_index()
constants = {'a':1, 'b':2, 'c':3}
variables = {'x': range(0, 100), 'y': range(100,150)}
par = multi_args(equation, constants=constants,
variables=variables, isProduct=True).sort_index()
assert len(par) == 1000
assert len(seq) == 5000
# Check both values and indexes
for x,y in zip(seq, par):
assert x == y
for x,y in zip (seq.index, par.index):
assert x == y
@pytest.mark.parametrize("maxLimit", [2000, 3000, 5000, 10000])
def test_multiargs_max_limit_adjust(maxLimit):
seq = []
for x in range(0,100):
for y in range(100, 150):
seq.append(equation(1,2,3,x,y))
index = pd.MultiIndex.from_product([range(0, 100), range(100, 150)])
seq = pd.Series(seq)
seq.index = index
seq = seq.sort_index()
constants = {'a':1, 'b':2, 'c':3}
variables = {'x': range(0, 100), 'y': range(100,150)}
par = multi_args(equation, constants=constants,
variables=variables, isProduct=True, maxLimit=maxLimit).sort_index()
assert len(par) == min(maxLimit, 5000)
assert len(seq) == 5000
# Check both values and indexes
for x,y in zip(seq, par):
assert x == y
for x,y in zip (seq.index, par.index):
assert x == y
def test_tick():
assert tick(112.71) == 112.7
assert tick(112.73) == 112.75
assert tick(1054.85, tick_size=0.1) == 1054.8
assert tick(1054.851, tick_size=0.1) == 1054.9
assert tick(104.73, 1) == 105
assert tick(103.2856, 0.01) == 103.29
assert tick(0.007814, 0.001) == 0.008
assert tick(0.00003562, 0.000001) == 0.000036
assert tick(0.000035617, 0.00000002) == 0.00003562
def test_tick_series():
s = pd.Series([100.43, 200.32, 300.32])
result = [100.45, 200.3, 300.3]
for x,y in zip(tick(s), result):
assert x==y
def test_stop_loss():
assert stop_loss(100, 3) == 97
assert stop_loss(100, 3, order='S') == 103
assert stop_loss(1013, 2.5, order='B', tick_size=0.1) == 987.7
assert stop_loss(100, -3) == 103 # This should be depreceated
assert stop_loss(100, -3, order='S') == 97
def test_stop_loss_error():
with pytest.raises(ValueError):
assert stop_loss(100, 3, 'BS')
def test_stop_loss_series():
p =
|
pd.Series([100.75, 150.63, 180.32])
|
pandas.Series
|
# we could possibly use this file to store commands that will help us interface with excel. for example:
# - read the excel file and output a df
# - give me a list of all the tickers we currently have shares of
# - etc.
# this way, if we want to access the same data across different files and functions, we can all call that data from here (makes it easier to change code, and less work for us)
# we can also use this to hold for example, a dictionary or set or wtv that stores all the tickers we own. and it can also store the latest trades for each ticker. this way we dont need to run through the whole list of trades every time we want to find the latest trade for a given ticker
#lmk what u think of this chai!
import pandas as pd
import numpy as np
import pickle
import os
excel_file_name = './trading_data.xlsx'
excel_helper_file_name = './data_helper.xlsx'
def get_df():
df =
|
pd.read_excel(excel_file_name)
|
pandas.read_excel
|
'''
Group enabled ANPNetwork class and supporting classes.
'''
from pyanp.pairwise import Pairwise
from pyanp.prioritizer import Prioritizer, PriorityType
from pyanp.general import islist, unwrap_list, get_matrix, matrix_as_df
from typing import Union
import pandas as pd
from copy import deepcopy
from pyanp.limitmatrix import normalize, calculus, priority_from_limit
import numpy as np
import re
from pyanp.rating import Rating
class ANPNode:
'''
A node inside a cluster, inside a netowrk. The basic building block of
an ANP netowrk.
:param network: An ANPNetwork object that this node lives inside.
:param cluster: An ANPCluster object that this node lives inside.
:param name: The name of this node.
'''
def __init__(self, network, cluster, name:str):
self.name = name
self.cluster = cluster
self.network = network
self.node_prioritizers = {}
self.subnetwork = None
self.invert = False
def is_node_cluster_connection(self, dest_cluster:str)->bool:
'''
Is this node connected to a cluster.
:param dest_cluster: The name of the cluster
:return: True/False
'''
if dest_cluster in self.node_prioritizers:
return True
else:
return False
def node_connect(self, dest_node)->None:
''''
Make a node connection from this node to dest_node
:param dest_node: The destination node as a str, int, or ANPNode. It
can be a list of nodes, and then we will coonect each node from
this node. The dest_node should be in any format accepted by
ANPNetwork._get_node()
'''
if islist(dest_node):
for dn in dest_node:
self.node_connect(dn)
else:
prioritizer = self.get_node_prioritizer(dest_node, create=True)
prioritizer.add_alt(dest_node, ignore_existing=True)
#Make sure parent clusters are connected
src_cluster = self.cluster
dest_cluster = self.network._get_node_cluster(dest_node)
src_cluster.cluster_connect(dest_cluster)
def get_node_prioritizer(self, dest_node, create=False,
create_class=Pairwise, dest_is_cluster=False)->Prioritizer:
'''
Gets the node prioritizer for the other_node
:param dest_node: The node as a int, str, or ANPNode object.
:return: The prioritizer if it exists, or None
'''
if dest_is_cluster:
dest_cluster = self.network.cluster_obj(dest_node)
dest_name = dest_cluster.name
else:
dest_cluster = self.network._get_node_cluster(dest_node)
dest_name = dest_cluster.name
if dest_name not in self.node_prioritizers:
if create:
prioritizer = create_class()
self.node_prioritizers[dest_name] = prioritizer
return prioritizer
else:
return None
else:
return self.node_prioritizers[dest_name]
def is_node_node_connection(self, dest_node)->bool:
'''
Checks if there is a node connection from this node to dest_node
:param dest_node: The node as a int, str, or ANPNode object.
:return:
'''
pri = self.get_node_prioritizer(dest_node)
if pri is None:
return False
elif not pri.is_alt(dest_node):
return False
else:
return True
def get_unscaled_column(self, username=None)->pd.Series:
'''
Returns the column in the unscaled supermatrix for this node.
:param username: The user/users to do this for. Typical Prioritizer
calculation usage, i.e. None means do for all group average.
:return: A pandas series indexed by the node names.
'''
nnodes = self.network.nnodes()
rval = pd.Series(data=[0.0]*nnodes, index=self.network.node_names())
prioritizer:Prioritizer
for prioritizer in self.node_prioritizers.values():
vals = prioritizer.priority(username, PriorityType.NORMALIZE)
for alt, val in vals.iteritems():
rval[alt] = val
return rval
def data_names(self, append_to=None):
'''
Used when exporting an Excel header for a network, for its data.
:param append_to: If not None, append header strings to this list.
Otherwise we create a new list to append to.
:return: List of strings of comparison name headers. If append_to is not
None, we return append_to with the new string headers appended.
'''
if append_to is None:
append_to = []
pri:Prioritizer
for pri in self.node_prioritizers.values():
pri.data_names(append_to, post_pend="wrt "+self.name)
return append_to
def set_node_prioritizer_type(self, destNode, prioritizer_class):
'''
Sets the node prioritizer type
:param destNode: An ANPNode object, string, or integer location
:param prioritizer_class: The new type
:return: None
'''
pri = self.get_node_prioritizer(destNode, create_class=prioritizer_class)
if not isinstance(pri, prioritizer_class):
#Wrong type, get alts from this one, and create correct one
rval = prioritizer_class()
rval.add_alt(pri.alt_names())
dest_cluster = self.network._get_node_cluster(destNode)
dest_name = dest_cluster.name
self.node_prioritizers[dest_name] = rval
else:
pass
class ANPCluster:
'''
A cluster in an ANP object
:param network: The ANPNetowrk object this cluster is in.
:param name: The name of the cluster to create.
'''
def __init__(self, network, name:str):
self.prioritizer = Pairwise()
self.name = name
self.network = network
# The list of ANP nodes in this cluster
self.nodes = {}
def add_node(self, *nodes)->None:
"""
Adds one or more nodes
:param nodes: A vararg list of node names to add to this cluster.
The names should all be strings.
:return: Nonthing
"""
nodes = unwrap_list(nodes)
if islist(nodes):
for node in nodes:
if isinstance(node, str):
self.add_node(node)
else:
self.nodes[nodes] = ANPNode(self.network, self, nodes)
def nnodes(self)->int:
"""
:return: The number of nodes in this cluster.
"""
return len(self.nodes)
def is_node(self, node_name:str)->bool:
'''
Does a node by that name exist in this cluster
:param node_name: The name of the node to look for
:return: True/False
'''
return node_name in self.nodes
def node_obj(self, node_name):
"""
Get a node in this cluster.
:param node_name: The node as either a string name, integer position, or
simply the ANPObject, in which case there is nothing to do except
return it.
:return: ANPNode object. If it wasn't found, None is returned.
"""
if isinstance(node_name, ANPNode):
return node_name
else:
return get_item(self.nodes, node_name)
def node_names(self)->list:
'''
:return: List of the string names of the nodes in this cluster
'''
return list(self.nodes.keys())
def node_objs(self)->list:
'''
:return: List of the ANPNode objects in this cluster.
'''
return self.nodes.values()
def cluster_connect(self, dest_cluster)->None:
"""
Make a cluster->cluster connection from this node to the destination.
:param dest_cluster: Either the ANPCluster object to connect to, or
the name of the destination cluster.
:return:
"""
if isinstance(dest_cluster, ANPCluster):
dest_cluster_name = dest_cluster.name
else:
dest_cluster_name = dest_cluster
self.prioritizer.add_alt(dest_cluster_name, ignore_existing=True)
def set_prioritizer_type(self, prioritizer_class)->None:
'''
Sets the cluster prioritizer type
:param prioritizer_class: The new type
:return: None
'''
pri = self.prioritizer
if not isinstance(pri, prioritizer_class):
#Wrong type, get alts from this one, and create correct one
rval = prioritizer_class()
rval.add_alt(pri.alt_names())
self.prioritizer = rval
else:
pass
def data_names(self, append_to=None):
'''
Used when exporting an Excel header for a network, for its data.
:param append_to: If not None, append header strings to this list.
Otherwise we create a new list to append to.
:return: List of strings of comparison name headers. If append_to is not
None, we return append_to with the new string headers appended.
'''
if append_to is None:
append_to = []
if self.prioritizer is not None:
self.prioritizer.data_names(append_to, post_pend="wrt "+self.name)
return append_to
def get_item(tbl:dict, key):
"""
Looks up an item in a dictionary by key first, assuming the key is in the
dictionary. Otherwise, it checks if the key is an integer, and returns
the item in that position.
:param tbl: The dictionary to look in
:param key: The key, or integer position to get the item of
:return: The item, or it not found, None
"""
if key in tbl:
return tbl[key]
elif not isinstance(key, int):
return None
# We have an integer key by this point
if key < 0:
return None
elif key >= len(tbl):
return None
else:
count = 0
for rval in tbl.values():
if count == key:
return rval
count+=1
#Should never make it here
raise ValueError("Shouldn't happen in anp.get_item")
__CLEAN_SPACES_RE = re.compile('\\s+')
def clean_name(name:str)->str:
"""
Cleans up a string for usage by:
1. stripping off begging and ending spaces
2. All spaces convert to one space
3. \t and \n are treated like a space
:param name: The string name to be cleaned
:return: The cleaned name.
"""
rval = name.strip()
return __CLEAN_SPACES_RE.sub(string=rval, repl=' ')
def sum_subnetwork_formula(priorities:pd.Series, dict_of_series:dict):
"""
A function that takes the weighted sum of values. Used for synthesis.
:param priorities: Series whose index are the nodes with subnetworks and
values are their weights.
:param dict_of_series: A dictionary whose keys are the same as the keys of
priorities, i.e. the nodes with subnetworks. The values are Series
whose keys are alternative names and values are the synthesized
alternative scores under that subnetwork.
:return:
"""
subpriorities = priorities[dict_of_series.keys()]
if sum(subpriorities) != 0:
subpriorities /= sum(subpriorities)
rval = pd.Series()
counts = pd.Series(dtype=int)
for subnet_name, vals in dict_of_series.items():
priority = subpriorities[subnet_name]
for alt_name, val in vals.iteritems():
if alt_name in rval:
rval[alt_name] += val * priority
counts[alt_name] += priority
else:
rval[alt_name] = val
counts[alt_name] = priority
# Now let's calculate the averages
for alt_name, val in rval.iteritems():
if counts[alt_name] > 0:
rval[alt_name] /= counts[alt_name]
return rval
class ANPNetwork(Prioritizer):
'''
Represents an ANP prioritizer. Has clusters/nodes, comparisons, etc.
:param create_alts_cluster: If True (which is the default) we start with a
cluster that is the alternatives cluster. Otherwise the model starts
empty.
'''
def __init__(self, create_alts_cluster=True):
self.clusters = {}
if create_alts_cluster:
cl = self.add_cluster("Alternatives")
self.alts_cluster = cl
self.users=[]
self.limitcalc = calculus
self.subnet_formula = sum_subnetwork_formula
self.default_priority_type = None
def add_cluster(self, *args)->ANPCluster:
'''
Adds one or more clusters to a network
:param args: Can be either a single string, or a list of strings
:return: ANPCluster object or list of ANPCluster objects
'''
clusters = unwrap_list(args)
if islist(clusters):
rval = []
for cl in clusters:
rval.append(self.add_cluster(cl))
return rval
else:
#Adding a single cluster
cl = ANPCluster(self, clusters)
self.clusters[clusters] = cl
return cl
def cluster_names(self)->list:
'''
:return: List of string names of the clusters
'''
return list(self.clusters.keys())
def nclusters(self)->int:
'''
:return: The number of clusters in the network.
'''
return len(self.clusters)
def cluster_obj(self, cluster_info:Union[ANPCluster, str])->ANPCluster:
'''
Returns the cluster with given information
:param cluster_info: Either the name of the cluster object to get
or the cluster object, or its int position
:return: The ANPCluster object
'''
if isinstance(cluster_info, ANPCluster):
return cluster_info
else:
return get_item(self.clusters, cluster_info)
def add_node(self, cl, *nodes):
'''
Adds nodes to a cluster
:param cl: The cluster name or object
:param nodes: The name or names of the nodes
:return: Nothing
'''
cluster = self.cluster_obj(cl)
cluster.add_node(nodes)
def nnodes(self, cluster=None)->int:
"""
Returns the number of nodes in the network, or a cluster.
:param cluster: If None, we return the number of nodes in the network.
Otherwise this is the integer position, string name, or ANPCluster
object of the cluster to get the node count within.
:return: The count.
"""
if cluster is None:
rval = pd.Series()
for cname, cluster in self.clusters.items():
rval[cname] = cluster.nnodes()
return sum(rval)
else:
clobj = self.cluster_obj(cluster)
return clobj.nnodes()
def add_alt(self, alt_name:str):
"""
Adds an alternative to the model:
1. Adds the altenrative to alts_cluster if not None
2. For each node with a subnetwork, we add the alternative to that subnetwork.
:param alt_name: The name of the alternative to add
:return: Nothing
"""
if self.alts_cluster is not None:
self.add_node(self.alts_cluster, alt_name)
# We should add this alternative to each subnetwork
for node in self.node_objs_with_subnet():
node.subnetwork.add_alt(alt_name)
def is_user(self, uname)->bool:
'''
Checks if a user exists
:param uname: The name of the user to check for
:return: bool
'''
return uname in self.users
def is_alt(self, altname)->bool:
'''
Checks if an alternative exists
:param altname: The alterantive name to look for
:return: bool
'''
return self.alts_cluster.is_node(altname)
def add_user(self, uname, ignore_dupe=False):
'''
Adds a user to the system
:param uname: The name of the new user
:return: Nothing
:raise ValueError If the user already existed
'''
if islist(uname):
for un in uname:
self.add_user(un, ignore_dupe=ignore_dupe)
return
if self.is_user(uname):
if not ignore_dupe:
raise ValueError("User by the name "+uname+" already existed")
else:
return
self.users.append(uname)
def nusers(self)->int:
'''
:return: The number of users
'''
return len(self.users)
def user_names(self)->list:
'''
:return: List of names of the users
'''
return deepcopy(self.users)
def node_obj(self, node_name)->ANPNode:
'''
Gets the ANPNode object of the node with the given name
:param node_name: The name of the node to get, or it's overall integer
position, or the ANPNode object itself
:return: The ANPNode if it exists, or None
'''
if isinstance(node_name, ANPNode):
return node_name
elif isinstance(node_name, int):
#Reference by integer
node_pos = node_name
node_count = 0
for cluster in self.clusters.values():
rel_pos = node_pos - node_count
if rel_pos < cluster.nnodes():
return cluster.node_obj(rel_pos)
#If we make it here, we were out of bounds
return None
#Okay handle string node name
cluster: ANPCluster
for cname, cluster in self.clusters.items():
rval = cluster.node_obj(node_name)
if rval is not None:
return rval
#Made it here, the node didn't exist
return None
def _get_node_cluster(self, node)->ANPCluster:
'''
Gets the ANPCluster object a node lives in
:param node: The name/integer positions, or ANPNode object itself. See
node_obj() method for more details.
:return: The ANPCluster object this node lives in, or None if it doesn't
exist.
'''
n = self.node_obj(node)
if n is None:
# Could not find the node
return None
return n.cluster
def node_connect(self, src_node, dest_node):
'''
connects 2 nodes
:param src_node: Source node as prescribed by node_object() function
:param dest_node: Destination node as prescribed by node_object() function
:return: Nothing
'''
src = self.node_obj(src_node)
src.node_connect(dest_node)
def node_names(self, cluster=None)->list:
'''
Returns a list of nodes in this network, organized by cluster
:param cluster: If None, we get all nodes in network, else we get nodes
in that cluster, otherwise format as specified by cluster_obj() function.
:return: List of strs of node names
'''
if cluster is not None:
cl = self.cluster_obj(cluster)
return cl.node_names()
rval = []
cl:ANPCluster
for cl in self.clusters.values():
cnodes = cl.node_names()
for name in cnodes:
rval.append(name)
return rval
def node_objs(self)->list:
'''
Returns a list of ANPNodes in this network, organized by cluster
:return: List of strs of node names
'''
rval = []
cl:ANPCluster
for cl in self.clusters.values():
cnodes = cl.node_objs()
for name in cnodes:
rval.append(name)
return rval
def cluster_objs(self)->list:
"""
:return: List of ANPCluster objects in the network
"""
return list(self.clusters.values())
def node_connections(self)->np.ndarray:
"""
Returns the node conneciton matrix for this network.
:return: A numpy array of shape [nnode, nnodes] where item [row, col]
1 means there is a node connection from col -> row, and 0 means
no connection.
"""
nnodes = self.nnodes()
nnames = self.node_names()
rval = np.zeros([nnodes, nnodes])
src_node:ANPNode
for src in range(nnodes):
srcname = nnames[src]
src_node = self.node_obj(srcname)
for dest in range(nnodes):
dest_name = nnames[dest]
if src_node.is_node_node_connection(dest_name):
rval[dest,src]=1
return rval
def unscaled_supermatrix(self, username=None, as_df=False)->np.array:
'''
:param username: If None, gets it for all users. Otherwise gets it for
the user specified. It can also be a list of users, in which case
we combine them, as per the theory.
:param as_df: If True, returns as a dataframe with index and column
names as the names of the nodes in the network. Otherwise just
returns the array.
:return: The unscaled supermatrix as a numpy.array of shape [nnode, nnodes]
'''
nnodes = self.nnodes()
rval = np.zeros([nnodes, nnodes])
nodes = self.node_objs()
col = 0
node:ANPNode
for node in nodes:
rval[:,col] = node.get_unscaled_column(username)
col += 1
if not as_df:
return rval
else:
return matrix_as_df(rval, self.node_names())
def scaled_supermatrix(self, username=None, as_df=False)->np.ndarray:
'''
:param username: If None, gets it for all users. Otherwise gets it for
the user specified. It can also be a list of users, in which case
we combine them, as per the theory.
:param as_df: If True, returns as a dataframe with index and column
names as the names of the nodes in the network. Otherwise just
returns the array.
:return: The scaled supermatrix
'''
rval = self.unscaled_supermatrix(username)
# Now I need to normalized by cluster weights
clusters = self.cluster_objs()
nclusters = len(clusters)
col = 0
for col_cp in range(nclusters):
col_cluster:ANPCluster = clusters[col_cp]
row_nnodes = col_cluster.nnodes()
cluster_pris = col_cluster.prioritizer.priority(username, PriorityType.NORMALIZE)
row_offset = 0
for col_node in col_cluster.node_objs():
row=0
for row_cp in range(nclusters):
row_cluster:ANPCluster = clusters[row_cp]
row_cluster_name = row_cluster.name
if row_cluster_name in cluster_pris:
priority = cluster_pris[row_cluster_name]
else:
priority = 0
for row_node in row_cluster.node_objs():
rval[row, col] *= priority
row += 1
col += 1
normalize(rval, inplace=True)
if not as_df:
return rval
else:
return matrix_as_df(rval, self.node_names())
def global_priority(self, username=None)->pd.Series:
'''
:param username: If None, gets it for all users. Otherwise gets it for
the user specified. It can also be a list of users, in which case
we combine them, as per the theory.
:return: The global priorities Series, index by node name
'''
lm = self.limit_matrix(username)
rval = priority_from_limit(lm)
node_names = self.node_names()
return pd.Series(data=rval, index=node_names)
def global_priority_df(self, user_infos=None)->pd.DataFrame:
'''
:param user_infos: A list of users to do this for, if None is a part
of this list, it means group average. If None, it defaults to
None plus all users.
:return: The global priorities dataframe. Rows are the nodes and
columns are the users. The first user/column is the Group Average
'''
if user_infos is None:
user_infos = list(self.user_names())
user_infos.insert(0, None)
rval = pd.DataFrame()
for user in user_infos:
if user is None:
uname = "Group Average"
else:
uname = user
rval[uname] = self.global_priority(user)
return rval
def limit_matrix(self, username=None, as_df=False):
'''
:param username: If None, gets it for all users. Otherwise gets it for
the user specified. It can also be a list of users, in which case
we combine them, as per the theory.
:param as_df: If True, returns as a dataframe with index and column
names as the names of the nodes in the network. Otherwise just
returns the array.
:return: The limit supermatrix
'''
sm = self.scaled_supermatrix(username)
rval = self.limitcalc(sm)
if not as_df:
return rval
else:
return matrix_as_df(rval, self.node_names())
def alt_names(self)->list:
'''
:return: List of alt names in this ANP model
'''
if self.has_subnet():
# We have some v1 subnetworks, we get alternative names by looking
# there.
rval = []
node: ANPNode
for node in self.node_objs_with_subnet():
alts = node.subnetwork.alt_names()
for alt in alts:
if alt not in rval:
rval.append(alt)
return rval
else:
return self.alts_cluster.node_names()
def priority(self, username=None, ptype:PriorityType=None)->pd.Series:
'''
Synthesize and return the alternative scores
:param username: If None, gets it for all users. Otherwise gets it for
the user specified. It can also be a list of users, in which case
we combine them, as per the theory.
:param ptype: The priority type to use
:return: A pandas.Series indexed on alt names, values are the score
'''
if ptype is None:
# Use the default priority type for this network
ptype = self.default_priority_type
if self.has_subnet():
# Need to synthesize using subnetworks
return self.subnet_synthesize(username=username, ptype=ptype)
else:
gp = self.global_priority(username)
alt_names = self.alt_names()
rval = gp[alt_names]
if sum(rval) != 0:
rval /= sum(rval)
if ptype is not None:
rval = ptype.apply(rval)
return rval
def data_names(self):
'''
Returns the column headers needed to fill in the data for this model
:return: A list of strings that would be usable in excel for parsing
headers
'''
node:ANPNode
rval = []
cluster: ANPCluster
for cluster in self.cluster_objs():
cluster.data_names(rval)
for node in self.node_objs():
node.data_names(rval)
return rval
def node_connection_matrix(self, new_mat:np.ndarray=None):
'''
Returns the current node conneciton matrix if new_mat is None.
Otherwise, for each item [row, col] in the matrix with a value of 1
we connect from node[row] to node[col].
:param new_mat: The new node connection matrix. If None, we return
the current one.
:return: Current connection matrix.
'''
src_node:ANPNode
nnodes = self.nnodes()
nodes = self.node_objs()
node_names = self.node_names()
if new_mat is not None:
for src_node_pos in range(nnodes):
src_node = nodes[src_node_pos]
for dest_node_pos in range(nnodes):
if new_mat[dest_node_pos, src_node_pos] != 0:
src_node.node_connect(node_names[dest_node_pos])
rval = np.zeros([nnodes, nnodes])
for src_node_pos in range(nnodes):
src_node = nodes[src_node_pos]
for dest_node_pos in range(nnodes):
if src_node.is_node_node_connection(node_names[dest_node_pos]):
rval[dest_node_pos, src_node_pos] = 1
return rval
def import_pw_series(self, series:pd.Series)->None:
'''
Takes in a well titled series of data, and pushes it into the right
node's prioritizer (or cluster).
The name should be A vs B wrt C, where A, B, C are node or cluster names.
:param series: The series of data for each user. Index is usernames.
Values are the votes.
:return: Nothing
'''
name = series.name
name = clean_name(name)
info = name.split(' wrt ')
if len(info) < 2:
# We cannot do anything with this, we need a wrt
raise ValueError("No wrt in "+name)
wrt = info[1].strip()
wrtNode:ANPNode
wrtNode = self.node_obj(wrt)
info = info[0].split( ' vs ')
if len(info) < 2:
raise ValueError(" vs was not present in "+name)
row, col = info
rowNode = self.node_obj(row)
colNode = self.node_obj(col)
npri: Pairwise
if (wrtNode is not None) and (rowNode is not None) and (colNode is not None):
# Node pairwise
npri = wrtNode.get_node_prioritizer(rowNode, create=True)
#print("Node comparison "+name)
if not isinstance(npri, Pairwise):
raise ValueError("Node prioritizer was not pairwise")
npri.vote_series(series, row, col, createUnknownUser=True)
self.add_user(series.index, ignore_dupe=True)
else:
# Try cluster pairwise
wrtcluster = self.cluster_obj(wrt)
rowcluster = self.cluster_obj(row)
colcluster = self.cluster_obj(col)
if wrtcluster is None:
raise ValueError("wrt="+wrt+" was not a cluster, and the group was not a node comparison")
if rowcluster is None:
raise ValueError("row="+row+" was not a cluster, and the group was not a node comparison")
if colcluster is None:
raise ValueError("col="+col+" was not a cluster, and the group was not a node comparison")
npri = self.cluster_prioritizer(wrtcluster)
npri.vote_series(series, row, col, createUnknownUser=True)
self.add_user(series.index, ignore_dupe=True)
#print("Cluster comparison "+name)
def set_alts_cluster(self, new_cluster):
'''
Sets the new alternatives cluster
:param new_cluster: Cluster specified as cluster_obj() expects.
:return: Nothing
'''
cl = self.cluster_obj(new_cluster)
self.alts_cluster = cl
def import_rating_series(self, series:pd.Series):
'''
Takes in a well titled series of data, and pushes it into the right
node's prioritizer as ratings (or cluster).
Title should be A wrt B, where A and B are either both node names or
both column names.
:param series: The series of data for each user. Index is usernames.
Values are the votes.
:return: Nothing
'''
name = series.name
name = clean_name(name)
info = name.split(' wrt ')
if len(info) < 2:
# We cannot do anything with this, we need a wrt
raise ValueError("No wrt in "+name)
wrt = info[1].strip()
dest = info[0].strip()
wrtNode:ANPNode
destNode:ANPNode
wrtNode = self.node_obj(wrt)
destNode = self.node_obj(dest)
npri:Rating
if (wrtNode is not None) and (destNode is not None):
# Node ratings
npri = wrtNode.get_node_prioritizer(destNode, create=True, create_class=Rating)
if not isinstance(npri, Rating):
wrtNode.set_node_prioritizer_type(destNode, Rating)
npri = wrtNode.get_node_prioritizer(destNode, create=True)
npri.vote_column(votes=series, alt_name=dest, createUnknownUsers=True)
else:
# Trying cluster ratings
wrtcluster = self.cluster_obj(wrt)
destcluster = self.cluster_obj(dest)
if wrtcluster is None:
raise ValueError("Ratings: wrt is not a cluster wrt="+wrt+" and wasn't a node either")
if destcluster is None:
raise ValueError("Ratings: dest is not a cluster dest="+dest+" and wasn't a node either")
npri = wrtcluster.prioritizer
if not isinstance(npri, Rating):
wrtcluster.set_prioritizer_type(Rating)
npri = wrtcluster.prioritizer
npri.vote_column(votes=series, alt_name=dest, createUnknownUsers=True)
def node_prioritizer(self, wrtnode=None, cluster=None):
'''
Gets the prioritizer for node->cluster connection
:param wrtnode: The node as understood by node_obj() function.
:param cluster: Cluster as understood by cluster_obj() function.
:return: If both wrtnode and cluster are specified, a single node prioritizer
is returned for that comparison (or None if there was nothing there).
Otherwise it returns a dictionary indexed by [wrtnode, cluster] and
whose values are the prioritizers for that (only the non-None ones).
'''
if wrtnode is not None and cluster is not None:
node = self.node_obj(wrtnode)
cl_obj = self.cluster_obj(cluster)
cluster_name = cl_obj.name
return node.get_node_prioritizer(dest_node=cluster_name, dest_is_cluster=True)
elif wrtnode is not None:
# Have wrtnode, do not have cluster
rval = {}
for cluster in self.cluster_names():
pri = self.node_prioritizer(wrtnode, cluster)
if pri is not None:
rval[(wrtnode, cluster)] = pri
return rval
elif cluster is not None:
# Have cluster, but not wrtnode
rval = {}
for wrtnode in self.node_names():
pri = self.node_prioritizer(wrtnode, cluster)
if pri is not None:
rval[(wrtnode, cluster)] = pri
return rval
else:
# Both wrtnode and cluster are none, want all
rval = {}
for wrtnode in self.node_names():
for cluster in self.cluster_names():
pri = self.node_prioritizer(wrtnode, cluster)
if pri is not None:
rval[(wrtnode, cluster)] = pri
return rval
def subnet(self, wrtnode):
'''
Makes wrtnode have a subnetwork if it did not already.
:param wrtnode: The node to give a subnetwork to, or get the subnetwork
of. Node specified as node_obj() function expects.
:return: The ANPNetwork that is the subnet of this node
'''
node = self.node_obj(wrtnode)
if node.subnetwork is not None:
return node.subnetwork
else:
rval = ANPNetwork(create_alts_cluster=False)
node.subnetwork = rval
rval.default_priority_type = PriorityType.IDEALIZE
return rval
def node_invert(self, node, value=None):
'''
Either sets, or tells if a node is inverted
:param node: The node to do this on, as expected by node_obj() function
:param value: If None, we return the boolean about if this node is
inverted. Otherwise specifies the new value.
:return: T/F if value=None, telling if the node is inverted. Otherwise
returns nothing.
'''
node = self.node_obj(node)
if value is None:
return node.invert
else:
node.invert = value
def has_subnet(self)->bool:
'''
:return: True/False telling if some node had a subentwork
'''
for node in self.node_objs():
if node.subnetwork is not None:
return True
return False
def subnet_synthesize(self, username=None, ptype:PriorityType=None):
'''
Does the standard V1 subnetowrk synthesis.
:param username: The user/users to synthesize for. If None, we group
synthesize across all. If a single user, we sythesize for that user
across all. If it is a list, we synthesize for the group that is that
list of users.
:return: Nothing
'''
# First we need our global priorities
pris = self.global_priority(username)
# Next we need the alternative priorities from each subnetwork
subnets = {}
node:ANPNode
for node in self.node_objs_with_subnet():
p = node.subnetwork.priority(username, ptype)
if node.invert:
p = self.invert_priority(p)
subnets[node.name]=p
rval = self.synthesize_combine(pris, subnets)
if ptype is not None:
rval = ptype.apply(rval)
return rval
def node_objs_with_subnet(self):
"""
:return: List of ANPNode objects in this network that have v1 subnets
"""
return [node for node in self.node_objs() if node.subnetwork is not None]
def invert_priority(self, p):
"""
Makes a copy of the list like element p, and inverts. The current
standard inversion is 1-p. There could be others implemented later.
:param p: The list like to invert
:return: New list-like of same type as p, with inverted priorities
"""
rval = deepcopy(p)
for i in range(len(p)):
rval[i] = 1 - rval[i]
return rval
def synthesize_combine(self, priorities:pd.Series, alt_scores:dict):
"""
Performs the actual sythesis step from anp v1 synthesis.
:param priorities: Priorities of the subnetworks
:param alt_scores: Alt scores as dictionary, keys are subnetwork names
values are Series whose keys are alt names.
:return: Series whose keys are alt names, and whose values are the
synthesized scores.
"""
return self.subnet_formula(priorities, alt_scores)
def cluster_prioritizer(self, wrtcluster=None):
"""
Gets the prioritizer for the clusters wrt a given cluster.
:param wrtcluster: WRT cluster identifier as expected by cluster_obj() function.
If None, then we return a dictionary indexed by cluster names and values
are the prioritizers
:return: THe prioritizer for that cluster, or a dictionary of all cluster
prioritizers
"""
if wrtcluster is not None:
cluster = self.cluster_obj(wrtcluster)
return cluster.prioritizer
else:
rval = {}
for cluster in self.cluster_objs():
rval[cluster.name] = cluster.prioritizer
return rval
def to_excel(self, fname):
struct = pd.DataFrame()
cluster:ANPCluster
writer = pd.ExcelWriter(fname, engine='openpyxl')
for cluster in self.cluster_objs():
cluster_name = cluster.name
if cluster == self.alts_cluster:
cluster_name = "*"+str(cluster_name)
struct[cluster_name] = cluster.node_names()
struct.to_excel(writer, sheet_name="struct", index=False)
# Now the node connections
mat = self.node_connection_matrix()
pd.DataFrame(mat).to_excel(writer, sheet_name="connection", index=False, header=False)
# Lastly let's write just the comparison structure
cmp = self.data_names()
pd.DataFrame({"":cmp}).to_excel(writer, sheet_name="votes", index=False, header=True)
writer.save()
writer.close()
def cluster_incon_std_df(self, user_infos=None) -> pd.DataFrame:
"""
:param user_infos: A list of users to do this for, if None is a part
of this list, it means group average. If None, it defaults to
None plus all users.
:return: DataFrame whose columns are clusters, rows
are users (as controlled by user_infos params) and the value is
the inconsistency for the given user on the given comparison.
"""
if user_infos is None:
user_infos = list(self.user_names())
user_infos.insert(0, None)
rval = pd.DataFrame()
# We need the name for the group (i.e. None) to be something useful)
for cluster, pw in self.cluster_prioritizer().items():
if isinstance(pw, Pairwise):
incon = [pw.incon_std(user) for user in user_infos]
rval[cluster] = pd.Series(incon, index=user_infos)
if None in rval.index:
rval = rval.rename(
lambda x: x if x is not None else "Group Average")
return rval
def node_incon_std_df(self, user_infos=None)->pd.DataFrame:
"""
:param user_infos: A list of users to do this for, if None is a part
of this list, it means group average. If None, it defaults to
None plus all users.
:return: DataFrame whose columns are (node,cluster) pairs, rows
are users (as controlled by user_infos params) and the value is
the inconsistency for the given user on the given comparison.
"""
if user_infos is None:
user_infos = list(self.user_names())
user_infos.insert(0, None)
rval =
|
pd.DataFrame()
|
pandas.DataFrame
|
__author__ = 'jlu96'
import prep_jobs as pj
import sys
import os
import pickle
import csv
import pandas as pd
import math
import collections
import itertools
import geneTSmunging as gtm
def get_parser():
# Parse arguments
import argparse
description = 'Prepare cluster jobs by partitioning tests by rows and hyper-parameters.'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-d', '--data_file', required=True)
parser.add_argument('-d2', '--rand_data_file', required=True, help="The effect genes")
parser.add_argument('-lr', '--load_reps', required=True, type=int)
parser.add_argument('-o', '--output_name', required=True)
parser.add_argument('-hlf', '--hyper_list_file', required=True)
parser.add_argument('-t', '--test', required=True)
parser.add_argument('-tn', '--test_name', required=True)
parser.add_argument('-sn', '--script_num', type=int, default=3)
parser.add_argument('-p', '--parallel_num', type=int, default=0)
parser.add_argument('-l', '--lag', type=int, required=True)
parser.add_argument('-n', '--null', type=str, required=True)
parser.add_argument('-cv', '--cv', type=int, default=1, help="Do prep with CV or not. If 0, then skip the CV-making step.")
parser.add_argument('-oa', '--only_array', type=int, default=0, help="Whehter to only save output coefs as arrays (1) or as whole matrices that are integrated by adding (0)")
return parser
def run(args):
if args.test not in {"r", "l", "e"}:
raise ValueError("args.test must be r (ridge), l (lasso) or e (elastic net)")
if args.null not in {"l", "g"}:
raise ValueError("args.null must be l (local) or g (global)")
# Load files
data_file = args.data_file
rand_data_file = args.rand_data_file
if args.load_reps:
genes, geneTS = gtm.load_basic_rep_file_list(data_file)
#dfs, genes, geneTS, df, __, __ = gtm.load_rep_file_list(data_file)
else:
df = pd.read_csv(data_file, sep="\t")
genes, geneTS = gtm.get_gene_TS(df)
n = len(genes)
hyperlist = pickle.load(open(args.hyper_list_file, 'rb'))
# hyper_names = cp.hyperlist_to_namelist(hyperlist)
# Make hyper files for cross_validate loading.
hyper_filenames = []
print("*************")
print("HYPERS")
print("*************")
if not os.path.exists("hyper"):
os.makedirs("hyper")
# for hyper, hyper_name in zip(hyperlist, hyper_names):
for hyper, h in zip(hyperlist, list(range(len(hyperlist)))):
hyper_filename = "hyper" + os.sep + args.output_name + "-hyper-" + str(h) + ".p"
hyper_filenames.append(hyper_filename)
pickle.dump([hyper], open(hyper_filename, 'wb'))
print("Hypers written in format: ", hyper_filename)
# Make row files
# Split up the rows according to number of input scripts
partition_rows = pj.partition_inputs(list(range(n)), args.script_num)
row_filenames = []
print("*************")
print("ROWS")
print("*************")
if not os.path.exists("rows"):
os.makedirs("rows")
for partition_row, i in zip(partition_rows, list(range(len(partition_rows)))):
row_filename = os.path.join("rows", args.output_name + "-row-" + str(i) + ".p")
row_filenames.append(row_filename)
pickle.dump(partition_row, open(row_filename, 'wb'))
print("Row written in format: ", row_filename)
if not os.path.exists("timing"):
os.makedirs("timing")
print("Folder timing created")
resulttimefile = os.path.join("timing", "result_time.csv")
if not os.path.exists(resulttimefile):
with open(resulttimefile, 'w') as csvfile:
f = csv.writer(csvfile)
f.writerow(["Name", "Start", "End", "Elapsed"])
if args.cv != 0:
print("*************")
print("CV")
print("*************")
# Make CV scripts
cv_scripts = []
hyper_output_dict = collections.OrderedDict()
hyper_int_dict = collections.OrderedDict()
if not os.path.exists("cv-scripts"):
os.makedirs("cv-scripts")
cvtimefile = os.path.join("timing", "hyper_time.csv")
if not os.path.exists(cvtimefile):
with open(cvtimefile, 'w') as csvfile:
f = csv.writer(csvfile)
f.writerow(["Name", "Start", "End", "Elapsed"])
for hyper, h, hyper_filename in zip(hyperlist, list(range(len(hyperlist))), hyper_filenames):
hyper_output_group = []
for partition_row, i, row_filename in zip(partition_rows, list(range(len(partition_rows))), row_filenames):
cv_prefix = args.output_name + "-cv-" + str(h) + "-row-" + str(i)
cv_script = os.path.join("cv-scripts", cv_prefix + ".sh")
cv_scripts.append(cv_script)
cv_output = "hyper" + os.sep + cv_prefix + "-result.txt"
hyper_output_group.append(cv_output)
command_string = "time python cross_validate.py -d " + data_file + " -lr " + str(args.load_reps) + " -o " + cv_output + " -hl " + str(hyper_filename) \
+ " -t " + args.test + " -l " + str(args.lag) + " -rl " + str(row_filename)
with open(cv_script, 'w') as outputfile:
outputfile.write("#!/bin/bash\n")
outputfile.write("START=$(date)\n")
#outputfile.write("module load python/2.7\n")
# outputfile.write("module load python/2.7/scipy-mkl\n")
# outputfile.write("module load python/2.7/numpy-mkl\n")
#outputfile.write("module load anaconda\n")
outputfile.write("module load anaconda3\n")
outputfile.write(command_string)
outputfile.write("\n")
outputfile.write("END=$(date)\n")
outputfile.write("echo " + cv_script + ",$START,$END,$SECONDS >> " + cvtimefile + "\n")
os.chmod(cv_script, 0o777)
# Set the output names, prepare for integration of all the hyper parameter fit results
hyper_output_dict[str(hyper)] = hyper_output_group
hyper_int_dict[str(hyper)] = "hyper" + os.sep + args.output_name + "-cv-" + str(h) + "-result.txt"
hyper_output_df = pd.DataFrame(hyper_output_dict)
hyper_int_df = pd.DataFrame(hyper_int_dict, index=[0])
print("Hyper output df is in form", hyper_output_df.head(n=5))
hyper_output_df.to_csv("cv_outputs.txt", sep="\t", index=0)
hyper_int_df.to_csv("cv_integrated.txt", sep="\t", index=0)
print("Partitioned CV fit_result_dfs in cv_outputs.txt", "Integrated CV fit_result_dfs in cv_integrated.txt")
with open("cv_script_list.txt", 'w') as outfile:
for cv_script in cv_scripts:
outfile.write(cv_script + "\n")
print("CV scripts written to cv_script_list.txt")
if args.parallel_num > 0:
print("Parallel Number (# processes per job): " + str(args.parallel_num))
script_groups = pj.partition_inputs(cv_scripts, number=int(math.ceil(len(cv_scripts) * 1.0/args.parallel_num)))
print("Number of script groups ", len(script_groups))
parallel_scripts = []
for i, script_group in zip(list(range(len(script_groups))), script_groups):
appended_script_filenames = ["./" + script_filename for script_filename in script_group]
parallel_script = " & ".join(appended_script_filenames)
parallel_scripts.append(parallel_script)
with open("cv_parallel_script_list.txt", 'w') as scriptfile:
for parallel_script in parallel_scripts:
scriptfile.write(parallel_script + "\n")
print("Parallel script list written to cv_parallel_script_list.txt")
# Integrate hyperparameters
# Begin whole normal fit
hyper_script = "set_hyper.sh"
with open(hyper_script, 'w') as outputfile:
outputfile.write("#!/bin/bash\n")
outputfile.write("START=$(date)\n")
outputfile.write("set -e\n")
outputfile.write("time python integrate_hyper.py -hfd cv_outputs.txt -ind cv_integrated.txt -hl " + args.hyper_list_file + "\n")
outputfile.write("time python set_hyper.py -ind cv_integrated.txt -r " + "hyper" + os.sep + "hyper_df.txt -o " + "hyper" +
os.sep + "best_hyper.p -hl " + args.hyper_list_file + " -tn " + args.test_name + " \n")
outputfile.write("END=$(date)\n")
outputfile.write("echo " + hyper_script + ",$START,$END,$SECONDS >> " + resulttimefile + "\n")
os.chmod(hyper_script, 0o777)
print("set_hyper.sh written")
print("*************")
print("FITTING")
print("*************")
# Run the actual fit
if not os.path.exists("fit"):
os.makedirs("fit")
if not os.path.exists("fit-scripts"):
os.makedirs("fit-scripts")
fittimefile = os.path.join("timing", "fit_time.csv")
if not os.path.exists(fittimefile):
with open(fittimefile, 'w') as csvfile:
f = csv.writer(csvfile)
f.writerow(["Name", "Start", "End", "Elapsed"])
fit_scripts = []
fit_output_prefixes = []
for partition_row, i, row_filename in zip(partition_rows, list(range(len(partition_rows))), row_filenames):
fit_prefix = args.output_name + "-fit-row-" + str(i)
fit_script = os.path.join("fit-scripts", fit_prefix + ".sh")
fit_scripts.append(fit_script)
fit_output_prefix = "fit" + os.sep + fit_prefix
fit_output_prefixes.append(fit_output_prefix)
command_string = "time python fit_all.py -d " + data_file + " -rd " + rand_data_file + " -lr " + str(args.load_reps) + \
" -o " + fit_output_prefix + " -bh " + \
"hyper" + os.sep + "best_hyper.p" + " -t " + args.test + " -l " + str(args.lag) + " -rl " + \
str(row_filename) + " -n " + args.null + " -oa " + str(args.only_array)
with open(fit_script, 'w') as outputfile:
outputfile.write("#!/bin/bash\n")
outputfile.write("START=$(date)\n")
#outputfile.write("module load python/2.7\n")
# outputfile.write("module load python/2.7/scipy-mkl\n")
# outputfile.write("module load python/2.7/numpy-mkl\n")
outputfile.write("module load anaconda3\n")
outputfile.write(command_string)
outputfile.write("\n")
outputfile.write("END=$(date)\n")
outputfile.write("echo " + fit_script + ",$START,$END,$SECONDS >> " + fittimefile + "\n")
os.chmod(fit_script, 0o777)
with open("fit_script_list.txt", 'w') as outfile:
for fit_script in fit_scripts:
outfile.write("./" + fit_script + "\n")
print("Fit scripts written to fit_script_list.txt")
if args.parallel_num > 0:
print("Parallel Number (# processes per job): " + str(args.parallel_num))
script_groups = pj.partition_inputs(fit_scripts, number=int(math.ceil(len(fit_scripts) * 1.0/args.parallel_num)))
print("Number of script groups ", len(script_groups))
parallel_scripts = []
for i, script_group in zip(list(range(len(script_groups))), script_groups):
appended_script_filenames = ["./" + script_filename for script_filename in script_group]
parallel_script = " & ".join(appended_script_filenames)
parallel_scripts.append(parallel_script)
with open("fit_parallel_script_list.txt", 'w') as scriptfile:
for parallel_script in parallel_scripts:
scriptfile.write(parallel_script + "\n")
print("Parallel script list written to fit_parallel_script_list.txt")
# Note the output files
fit_coefs = [fit_output_prefix + "_coefs.p" for fit_output_prefix in fit_output_prefixes]
fit_intercepts = [fit_output_prefix + "_intercepts.p" for fit_output_prefix in fit_output_prefixes]
fit_results = [fit_output_prefix + "_fit_result_df.txt" for fit_output_prefix in fit_output_prefixes]
fit_coefsr = [fit_output_prefix + "_coefsr.p" for fit_output_prefix in fit_output_prefixes]
# fit_interceptsr = [fit_output_prefix + "_interceptsr.p" for fit_output_prefix in fit_output_prefixes]
fit_resultsr = [fit_output_prefix + "_fit_result_dfr.txt" for fit_output_prefix in fit_output_prefixes]
fit_output_dict = collections.OrderedDict()
fit_output_dict["coef"] = fit_coefs
fit_output_dict["coefr"] = fit_coefsr
fit_output_dict["intercept"] = fit_intercepts
# fit_output_dict["interceptr"] = fit_interceptsr
output_matr_df = pd.DataFrame(fit_output_dict)
output_matr_df.to_csv("output_matr_list.txt", sep="\t", index=False)
print("Output matrices written to output_matr_list.txt")
int_matr_dict = collections.OrderedDict()
int_matr_dict["coef"] = "fit" + os.sep + args.output_name + "_coefs.p"
int_matr_dict["coefr"] = "fit" + os.sep + args.output_name + "_coefsr.p"
int_matr_dict["intercept"] = "fit" + os.sep + args.output_name + "_intercepts.p"
# int_matr_dict["interceptr"] = "fit" + os.sep + args.output_name + "_interceptsr.p"
int_matr_df = pd.DataFrame(int_matr_dict, index=[0])
int_matr_df.to_csv("int_matr_list.txt", sep="\t", index=False)
print("integrated matrices written to int_matr_list.txt")
fit_result_dict = collections.OrderedDict()
fit_result_dict["fit_result"] = fit_results
fit_result_dict["fit_resultr"] = fit_resultsr
output_df_df =
|
pd.DataFrame(fit_result_dict)
|
pandas.DataFrame
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# fluctmatch --- https://github.com/tclick/python-fluctmatch
# Copyright (c) 2013-2017 The fluctmatch Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the New BSD license.
#
# Please cite your use of fluctmatch in published work:
#
# <NAME>, <NAME>, and <NAME>.
# Calculation of Enzyme Fluctuograms from All-Atom Molecular Dynamics
# Simulation. Meth Enzymology. 578 (2016), 327-342,
# doi:10.1016/bs.mie.2016.05.024.
#
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from future.builtins import (
dict,
open,
range,
)
from future.utils import (
native_str, )
import logging
import time
from os import environ
import numpy as np
import pandas as pd
from MDAnalysis.lib import util
from MDAnalysis.topology import PSFParser
from MDAnalysis.topology.base import change_squash
from MDAnalysis.core.topologyattrs import (
Atomids, Atomnames, Atomtypes, Masses, Charges, Resids, Resnums, Resnames,
Segids, Bonds, Angles, Dihedrals, Impropers)
from MDAnalysis.core.topology import Topology
from fluctmatch.topology import base
logger = logging.getLogger("MDAnalysis.topology.PSF")
# Changed the segid squash_by to change_squash to prevent segment ID sorting.
class PSF36Parser(PSFParser.PSFParser):
"""Read topology information from a CHARMM/NAMD/XPLOR PSF_ file.
Creates a Topology with the following Attributes:
- ids
- names
- types
- masses
- charges
- resids
- resnames
- segids
- bonds
- angles
- dihedrals
- impropers
.. _PSF: http://www.charmm.org/documentation/c35b1/struct.html
"""
format = 'PSF'
def parse(self):
"""Parse PSF file into Topology
Returns
-------
MDAnalysis *Topology* object
"""
# Open and check psf validity
with open(self.filename, 'r') as psffile:
header = next(psffile)
if not header.startswith("PSF"):
err = ("{0} is not valid PSF file (header = {1})"
"".format(self.filename, header))
logger.error(err)
raise ValueError(err)
header_flags = header[3:].split()
if "NAMD" in header_flags:
self._format = "NAMD" # NAMD/VMD
elif "EXT" in header_flags:
self._format = "EXTENDED" # CHARMM
else:
self._format = "STANDARD" # CHARMM
if "XPLOR" in header_flags:
self._format += "_XPLOR"
next(psffile)
title = next(psffile).split()
if not (title[1] == "!NTITLE"):
err = "{0} is not a valid PSF file".format(psffile.name)
logger.error(err)
raise ValueError(err)
# psfremarks = [psffile.next() for i in range(int(title[0]))]
for _ in range(int(title[0])):
next(psffile)
logger.info("PSF file {0}: format {1}"
"".format(psffile.name, self._format))
# Atoms first and mandatory
top = self._parse_sec(psffile, ('NATOM', 1, 1, self._parseatoms))
# Then possibly other sections
sections = (
# ("atoms", ("NATOM", 1, 1, self._parseatoms)),
(Bonds, ("NBOND", 2, 4, self._parsesection)),
(Angles, ("NTHETA", 3, 3, self._parsesection)),
(Dihedrals, ("NPHI", 4, 2, self._parsesection)),
(Impropers, ("NIMPHI", 4, 2, self._parsesection)),
# ("donors", ("NDON", 2, 4, self._parsesection)),
# ("acceptors", ("NACC", 2, 4, self._parsesection))
)
try:
for attr, info in sections:
next(psffile)
top.add_TopologyAttr(attr(self._parse_sec(psffile, info)))
except StopIteration:
# Reached the end of the file before we expected
pass
return top
def _parseatoms(self, lines, atoms_per, numlines):
"""Parses atom section in a Charmm PSF file.
Normal (standard) and extended (EXT) PSF format are
supported. CHEQ is supported in the sense that CHEQ data is simply
ignored.
CHARMM Format from ``source/psffres.src``:
CHEQ::
II,LSEGID,LRESID,LRES,TYPE(I),IAC(I),CG(I),AMASS(I),IMOVE(I),ECH(I),EHA(I)
standard format:
(I8,1X,A4,1X,A4,1X,A4,1X,A4,1X,I4,1X,2G14.6,I8,2G14.6)
(I8,1X,A4,1X,A4,1X,A4,1X,A4,1X,A4,1X,2G14.6,I8,2G14.6) XPLOR
expanded format EXT:
(I10,1X,A8,1X,A8,1X,A8,1X,A8,1X,I4,1X,2G14.6,I8,2G14.6)
(I10,1X,A8,1X,A8,1X,A8,1X,A8,1X,A4,1X,2G14.6,I8,2G14.6) XPLOR
no CHEQ::
II,LSEGID,LRESID,LRES,TYPE(I),IAC(I),CG(I),AMASS(I),IMOVE(I)
standard format:
(I8,1X,A4,1X,A4,1X,A4,1X,A4,1X,I4,1X,2G14.6,I8)
(I8,1X,A4,1X,A4,1X,A4,1X,A4,1X,A4,1X,2G14.6,I8) XPLOR
expanded format EXT:
(I10,1X,A8,1X,A8,1X,A8,1X,A8,1X,I4,1X,2G14.6,I8)
(I10,1X,A8,1X,A8,1X,A8,1X,A8,1X,A4,1X,2G14.6,I8) XPLOR
NAMD PSF
space separated, see release notes for VMD 1.9.1, psfplugin at
http://www.ks.uiuc.edu/Research/vmd/current/devel.html :
psfplugin: Added more logic to the PSF plugin to determine cases where
the CHARMM "EXTended" PSF format cannot accomodate long atom types, and
we add a "NAMD" keyword to the PSF file flags line at the top of the
file. Upon reading, if we detect the "NAMD" flag there, we know that it
is possible to parse the file correctly using a simple space-delimited
scanf() format string, and we use that strategy rather than holding to
the inflexible column-based fields that are a necessity for
compatibility with CHARMM, CNS, X-PLOR, and other formats. NAMD and the
psfgen plugin already assume this sort of space-delimited formatting,
but that's because they aren't expected to parse the PSF variants
associated with the other programs. For the VMD PSF plugin, having the
"NAMD" tag in the flags line makes it absolutely clear that we're
dealing with a NAMD-specific file so we can take the same approach.
"""
# how to partition the line into the individual atom components
atom_parsers = dict(
STANDARD="I8,1X,A4,1X,A4,1X,A4,1X,A4,1X,I4,1X,2F14.6,I8",
STANDARD_XPLOR="'(I8,1X,A4,1X,A4,1X,A4,1X,A4,1X,A4,1X,2F14.6,I8",
EXTENDED="I10,1X,A8,1X,A8,1X,A8,1X,A8,1X,I4,1X,2F14.6,I8",
EXTENDED_XPLOR="I10,1X,A8,1X,A8,1X,A8,1X,A8,1X,A6,1X,2F14.6,I8",
NAMD="I8,1X,A4,1X,A4,1X,A4,1X,A4,1X,I4,1X,2F14.6,I8",
)
atom_parser = util.FORTRANReader(atom_parsers[self._format])
# Allocate arrays
atomids = np.zeros(numlines, dtype=np.int32)
segids = np.zeros(numlines, dtype=object)
resids = np.zeros(numlines, dtype=np.int32)
resnames = np.zeros(numlines, dtype=object)
atomnames = np.zeros(numlines, dtype=object)
atomtypes = np.zeros(numlines, dtype=object)
charges = np.zeros(numlines, dtype=np.float32)
masses = np.zeros(numlines, dtype=np.float64)
for i in range(numlines):
try:
line = lines()
except StopIteration:
err = ("{0} is not valid PSF file" "".format(self.filename))
logger.error(err)
raise ValueError(err)
try:
vals = atom_parser.read(line)
except ValueError:
# last ditch attempt: this *might* be a NAMD/VMD
# space-separated "PSF" file from VMD version < 1.9.1
try:
atom_parser = util.FORTRANReader(atom_parsers['NAMD'])
vals = atom_parser.read(line)
logger.warn("Guessing that this is actually a"
" NAMD-type PSF file..."
" continuing with fingers crossed!")
logger.info("First NAMD-type line: {0}: {1}"
"".format(i, line.rstrip()))
except ValueError:
atom_parser = util.FORTRANReader(
atom_parsers[self._format].replace("A6", "A4"))
vals = atom_parser.read(line)
logger.warn("Guessing that this is actually a"
" pre CHARMM36 PSF file..."
" continuing with fingers crossed!")
logger.info("First NAMD-type line: {0}: {1}"
"".format(i, line.rstrip()))
atomids[i] = vals[0]
segids[i] = vals[1] if vals[1] else "SYSTEM"
resids[i] = vals[2]
resnames[i] = vals[3]
atomnames[i] = vals[4]
atomtypes[i] = vals[5]
charges[i] = vals[6]
masses[i] = vals[7]
# Atom
atomids = Atomids(atomids - 1)
atomnames = Atomnames(atomnames)
atomtypes = Atomtypes(atomtypes)
charges = Charges(charges)
masses = Masses(masses)
# Residue
# resids, resnames
residx, (new_resids, new_resnames, perres_segids) = change_squash(
(resids, resnames, segids), (resids, resnames, segids))
# transform from atom:Rid to atom:Rix
residueids = Resids(new_resids)
residuenums = Resnums(new_resids.copy())
residuenames = Resnames(new_resnames)
# Segment
segidx, (perseg_segids, ) = change_squash((perres_segids, ),
(perres_segids, ))
segids = Segids(perseg_segids)
top = Topology(
len(atomids),
len(new_resids),
len(segids),
attrs=[
atomids, atomnames, atomtypes, charges, masses, residueids,
residuenums, residuenames, segids
],
atom_resindex=residx,
residue_segindex=segidx)
return top
class PSFWriter(base.TopologyWriterBase):
"""PSF writer that implements the CHARMM PSF topology format.
Requires the following attributes to be present:
- ids
- names
- types
- masses
- charges
- resids
- resnames
- segids
- bonds
.. versionchanged:: 3.0.0
Uses numpy arrays for bond, angle, dihedral, and improper outputs.
Parameters
----------
filename : str or :class:`~MDAnalysis.lib.util.NamedStream`
name of the output file or a stream
n_atoms : int, optional
The number of atoms in the output trajectory.
extended
extended format
cmap
include CMAP section
cheq
include charge equilibration
title
title lines at beginning of the file
charmm_version
Version of CHARMM for formatting (default: 41)
"""
format = "PSF"
units = dict(time=None, length=None)
_fmt = dict(
STD="%8d %-4s %-4d %-4s %-4s %4d %14.6f%14.6f%8d",
STD_XPLOR="{%8d %4s %-4d %-4s %-4s %-4s %14.6f%14.6f%8d",
STD_XPLOR_C35="%4d %-4s %-4d %-4s %-4s %-4s %14.6f%14.6f%8d",
EXT="%10d %-8s %8d %-8s %-8s %4d %14.6f%14.6f%8d",
EXT_XPLOR="%10d %-8s %-8d %-8s %-8s %-6s %14.6f%14.6f%8d",
EXT_XPLOR_C35="%10d %-8s %-8d %-8s %-8s %-4s %14.6f%14.6f%8d")
def __init__(self, filename, **kwargs):
self.filename = util.filename(filename, ext="psf")
self._extended = kwargs.get("extended", True)
self._cmap = kwargs.get("cmap", True)
self._cheq = kwargs.get("cheq", True)
self._version = kwargs.get("charmm_version", 41)
self._universe = None
self._fmtkey = "EXT" if self._extended else "STD"
date = time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime())
user = environ["USER"]
self._title = kwargs.get(
"title", (
"* Created by fluctmatch on {date}".format(date=date),
"* User: {user}".format(user=user),
))
if not util.iterable(self._title):
self._title = util.asiterable(self._title)
self.col_width = 10 if self._extended else 8
self.sect_hdr = "{:>10d} !{}" if self._extended else "{:>8d} !{}"
self.sect_hdr2 = ("{:>10d}{:>10d} !{}"
if self._extended else "{:>8d}{:>8d} !{}")
self.sections = (("bonds", "NBOND: bonds\n",
8), ("angles", "NTHETA: angles\n",
9), ("dihedrals", "NPHI: dihedrals\n",
8), ("impropers", "NIMPHI: impropers\n",
8), ("donors", "NDON: donors\n", 8),
("acceptors", "NACC: acceptors\n", 8))
def write(self, universe):
"""Write universe to PSF format.
Parameters
----------
universe : :class:`~MDAnalysis.Universe` or :class:`~MDAnalysis.AtomGroup`
A collection of atoms in a universe or atomgroup with bond
definitions.
"""
self._universe = universe
xplor = not np.issubdtype(universe.atoms.types.dtype, np.signedinteger)
header = "PSF"
if self._extended:
header += " EXT"
if self._cheq:
header += " CHEQ"
if xplor:
header += " XPLOR"
if self._cmap:
header += " CMAP"
header += "\n"
if xplor:
self._fmtkey += "_XPLOR"
if self._version < 36:
self._fmtkey += "_C35"
with open(self.filename, "wb") as psffile:
psffile.write(header.encode())
psffile.write("\n".encode())
n_title = len(self._title)
psffile.write(self.sect_hdr.format(n_title, "NTITLE").encode())
psffile.write("\n".encode())
for _ in self._title:
psffile.write(_.encode())
psffile.write("\n".encode())
psffile.write("\n".encode())
self._write_atoms(psffile)
for section in self.sections:
self._write_sec(psffile, section)
self._write_other(psffile)
def _write_atoms(self, psffile):
"""Write atom section in a Charmm PSF file.
Normal (standard) and extended (EXT) PSF format are
supported.
CHARMM Format from ``source/psffres.src``:
no CHEQ::
standard format:
(I8,1X,A4,1X,A4,1X,A4,1X,A4,1X,I4,1X,2G14.6,I8)
(I8,1X,A4,1X,A4,1X,A4,1X,A4,1X,I4,1X,2G14.6,I8,2G14.6) CHEQ
(I8,1X,A4,1X,A4,1X,A4,1X,A4,1X,A6,1X,2G14.6,I8) XPLOR
(I8,1X,A4,1X,A4,1X,A4,1X,A4,1X,A6,1X,2G14.6,I8,2G14.6) XPLOR,CHEQ
(I8,1X,A4,1X,A4,1X,A4,1X,A4,1X,A4,1X,2G14.6,I8) XPLOR,c35
(I8,1X,A4,1X,A4,1X,A4,1X,A4,1X,A4,1X,2G14.6,I8,2G14.6) XPLOR,c35,CHEQ
expanded format EXT:
(I10,1X,A8,1X,A8,1X,A8,1X,A8,1X,I4,1X,2G14.6,I8)
(I10,1X,A8,1X,A8,1X,A8,1X,A8,1X,I4,1X,2G14.6,I8,2G14.6) CHEQ
(I10,1X,A8,1X,A8,1X,A8,1X,A8,1X,A6,1X,2G14.6,I8) XPLOR
(I10,1X,A8,1X,A8,1X,A8,1X,A8,1X,A6,1X,2G14.6,I8,2G14.6) XPLOR,CHEQ
(I10,1X,A8,1X,A8,1X,A8,1X,A8,1X,A4,1X,2G14.6,I8) XPLOR,c35
(I10,1X,A8,1X,A8,1X,A8,1X,A8,1X,A4,1X,2G14.6,I8,2G14.6) XPLOR,c35,CHEQ
"""
fmt = self._fmt[self._fmtkey]
psffile.write(
self.sect_hdr.format(self._universe.atoms.n_atoms,
"NATOM").encode())
psffile.write("\n".encode())
atoms = self._universe.atoms
lines = (np.arange(atoms.n_atoms) + 1, atoms.segids, atoms.resids,
atoms.resnames, atoms.names, atoms.types, atoms.charges,
atoms.masses, np.zeros_like(atoms.ids))
lines = pd.concat([
|
pd.DataFrame(_)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 28 11:18:37 2021
@author: <NAME>
INFO:
This script is run to extract variables of interest in a MIDAS AWS data file
It creates a standardized data structure used in the UHA analysis and in the script MIDAS_DataTreatment_and_WindRoses_ERL.py
"""
import pandas as pd
import os.path
city = 'London'
years = [2015, 2016, 2017, 2018, 2019, 2020]
datadir = '' + city + '/' ### Directory where MIDAS stations has been downloaded
savedir = '' + city + '/Filtered/' ### Directory where the newly organized data will be stored
def find_csv_filenames(path_to_dir, suffix=".csv"):
filenames = os.listdir(path_to_dir)
return [ filename for filename in filenames if filename.endswith( suffix ) ]
list_of_weather_stations_f = find_csv_filenames(datadir)
f_info_name_out = 'MIDAS_MetaData_' + city + '.csv'
info_length = 279
var_names_loc = info_length + 1
var_interest = ['id', 'air_temperature', 'wind_direction', 'wind_speed', 'wind_speed_unit_id', 'rltv_hum']
for yr in years:
list_of_weather_stations_f_year = list(filter(lambda x: x.endswith(str(yr) + '.csv'),
list_of_weather_stations_f))
period_interest = pd.date_range(start='01-01-' + str(yr), end='01-01-' + str(yr + 1),
freq='1H', closed='left')
for aws in list_of_weather_stations_f_year:
info_aws =
|
pd.read_csv(datadir + aws, skiprows=0, nrows=info_length, index_col=False, header=None)
|
pandas.read_csv
|
from collections import OrderedDict
from datetime import datetime, timedelta
import numpy as np
import numpy.ma as ma
import pytest
from pandas._libs import iNaT, lib
from pandas.core.dtypes.common import is_categorical_dtype, is_datetime64tz_dtype
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
Series,
Timestamp,
date_range,
isna,
period_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray, period_array
class TestSeriesConstructors:
@pytest.mark.parametrize(
"constructor,check_index_type",
[
# NOTE: some overlap with test_constructor_empty but that test does not
# test for None or an empty generator.
# test_constructor_pass_none tests None but only with the index also
# passed.
(lambda: Series(), True),
(lambda: Series(None), True),
(lambda: Series({}), True),
(lambda: Series(()), False), # creates a RangeIndex
(lambda: Series([]), False), # creates a RangeIndex
(lambda: Series((_ for _ in [])), False), # creates a RangeIndex
(lambda: Series(data=None), True),
(lambda: Series(data={}), True),
(lambda: Series(data=()), False), # creates a RangeIndex
(lambda: Series(data=[]), False), # creates a RangeIndex
(lambda: Series(data=(_ for _ in [])), False), # creates a RangeIndex
],
)
def test_empty_constructor(self, constructor, check_index_type):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
expected = Series()
result = constructor()
assert len(result.index) == 0
tm.assert_series_equal(result, expected, check_index_type=check_index_type)
def test_invalid_dtype(self):
# GH15520
msg = "not understood"
invalid_list = [pd.Timestamp, "pd.Timestamp", list]
for dtype in invalid_list:
with pytest.raises(TypeError, match=msg):
Series([], name="time", dtype=dtype)
def test_invalid_compound_dtype(self):
# GH#13296
c_dtype = np.dtype([("a", "i8"), ("b", "f4")])
cdt_arr = np.array([(1, 0.4), (256, -13)], dtype=c_dtype)
with pytest.raises(ValueError, match="Use DataFrame instead"):
Series(cdt_arr, index=["A", "B"])
def test_scalar_conversion(self):
# Pass in scalar is disabled
scalar = Series(0.5)
assert not isinstance(scalar, float)
# Coercion
assert float(Series([1.0])) == 1.0
assert int(Series([1.0])) == 1
def test_constructor(self, datetime_series):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty_series = Series()
assert datetime_series.index.is_all_dates
# Pass in Series
derived = Series(datetime_series)
assert derived.index.is_all_dates
assert tm.equalContents(derived.index, datetime_series.index)
# Ensure new index is not created
assert id(datetime_series.index) == id(derived.index)
# Mixed type Series
mixed = Series(["hello", np.NaN], index=[0, 1])
assert mixed.dtype == np.object_
assert mixed[1] is np.NaN
assert not empty_series.index.is_all_dates
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
assert not Series().index.is_all_dates
# exception raised is of type Exception
with pytest.raises(Exception, match="Data must be 1-dimensional"):
Series(np.random.randn(3, 3), index=np.arange(3))
mixed.name = "Series"
rs = Series(mixed).name
xp = "Series"
assert rs == xp
# raise on MultiIndex GH4187
m = MultiIndex.from_arrays([[1, 2], [3, 4]])
msg = "initializing a Series from a MultiIndex is not supported"
with pytest.raises(NotImplementedError, match=msg):
Series(m)
@pytest.mark.parametrize("input_class", [list, dict, OrderedDict])
def test_constructor_empty(self, input_class):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series()
empty2 = Series(input_class())
# these are Index() and RangeIndex() which don't compare type equal
# but are just .equals
tm.assert_series_equal(empty, empty2, check_index_type=False)
# With explicit dtype:
empty = Series(dtype="float64")
empty2 = Series(input_class(), dtype="float64")
tm.assert_series_equal(empty, empty2, check_index_type=False)
# GH 18515 : with dtype=category:
empty = Series(dtype="category")
empty2 = Series(input_class(), dtype="category")
tm.assert_series_equal(empty, empty2, check_index_type=False)
if input_class is not list:
# With index:
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series(index=range(10))
empty2 = Series(input_class(), index=range(10))
tm.assert_series_equal(empty, empty2)
# With index and dtype float64:
empty = Series(np.nan, index=range(10))
empty2 = Series(input_class(), index=range(10), dtype="float64")
tm.assert_series_equal(empty, empty2)
# GH 19853 : with empty string, index and dtype str
empty = Series("", dtype=str, index=range(3))
empty2 = Series("", index=range(3))
tm.assert_series_equal(empty, empty2)
@pytest.mark.parametrize("input_arg", [np.nan, float("nan")])
def test_constructor_nan(self, input_arg):
empty = Series(dtype="float64", index=range(10))
empty2 = Series(input_arg, index=range(10))
tm.assert_series_equal(empty, empty2, check_index_type=False)
@pytest.mark.parametrize(
"dtype",
["f8", "i8", "M8[ns]", "m8[ns]", "category", "object", "datetime64[ns, UTC]"],
)
@pytest.mark.parametrize("index", [None, pd.Index([])])
def test_constructor_dtype_only(self, dtype, index):
# GH-20865
result = pd.Series(dtype=dtype, index=index)
assert result.dtype == dtype
assert len(result) == 0
def test_constructor_no_data_index_order(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
result = pd.Series(index=["b", "a", "c"])
assert result.index.tolist() == ["b", "a", "c"]
def test_constructor_no_data_string_type(self):
# GH 22477
result = pd.Series(index=[1], dtype=str)
assert np.isnan(result.iloc[0])
@pytest.mark.parametrize("item", ["entry", "ѐ", 13])
def test_constructor_string_element_string_type(self, item):
# GH 22477
result = pd.Series(item, index=[1], dtype=str)
assert result.iloc[0] == str(item)
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
ser = Series(["x", None], dtype=string_dtype)
result = ser.isna()
expected = Series([False, True])
tm.assert_series_equal(result, expected)
assert ser.iloc[1] is None
ser = Series(["x", np.nan], dtype=string_dtype)
assert np.isnan(ser.iloc[1])
def test_constructor_series(self):
index1 = ["d", "b", "a", "c"]
index2 = sorted(index1)
s1 = Series([4, 7, -5, 3], index=index1)
s2 = Series(s1, index=index2)
tm.assert_series_equal(s2, s1.sort_index())
def test_constructor_iterable(self):
# GH 21987
class Iter:
def __iter__(self):
for i in range(10):
yield i
expected = Series(list(range(10)), dtype="int64")
result = Series(Iter(), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_sequence(self):
# GH 21987
expected = Series(list(range(10)), dtype="int64")
result = Series(range(10), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_single_str(self):
# GH 21987
expected = Series(["abc"])
result = Series("abc")
tm.assert_series_equal(result, expected)
def test_constructor_list_like(self):
# make sure that we are coercing different
# list-likes to standard dtypes and not
# platform specific
expected = Series([1, 2, 3], dtype="int64")
for obj in [[1, 2, 3], (1, 2, 3), np.array([1, 2, 3], dtype="int64")]:
result = Series(obj, index=[0, 1, 2])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["bool", "int32", "int64", "float64"])
def test_constructor_index_dtype(self, dtype):
# GH 17088
s = Series(Index([0, 2, 4]), dtype=dtype)
assert s.dtype == dtype
@pytest.mark.parametrize(
"input_vals",
[
([1, 2]),
(["1", "2"]),
(list(pd.date_range("1/1/2011", periods=2, freq="H"))),
(list(pd.date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern"))),
([pd.Interval(left=0, right=5)]),
],
)
def test_constructor_list_str(self, input_vals, string_dtype):
# GH 16605
# Ensure that data elements from a list are converted to strings
# when dtype is str, 'str', or 'U'
result = Series(input_vals, dtype=string_dtype)
expected = Series(input_vals).astype(string_dtype)
tm.assert_series_equal(result, expected)
def test_constructor_list_str_na(self, string_dtype):
result = Series([1.0, 2.0, np.nan], dtype=string_dtype)
expected = Series(["1.0", "2.0", np.nan], dtype=object)
tm.assert_series_equal(result, expected)
assert np.isnan(result[2])
def test_constructor_generator(self):
gen = (i for i in range(10))
result = Series(gen)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
gen = (i for i in range(10))
result = Series(gen, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_map(self):
# GH8909
m = map(lambda x: x, range(10))
result = Series(m)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
m = map(lambda x: x, range(10))
result = Series(m, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_categorical(self):
cat = pd.Categorical([0, 1, 2, 0, 1, 2], ["a", "b", "c"], fastpath=True)
res = Series(cat)
tm.assert_categorical_equal(res.values, cat)
# can cast to a new dtype
result = Series(pd.Categorical([1, 2, 3]), dtype="int64")
expected = pd.Series([1, 2, 3], dtype="int64")
tm.assert_series_equal(result, expected)
# GH12574
cat = Series(pd.Categorical([1, 2, 3]), dtype="category")
assert is_categorical_dtype(cat)
assert is_categorical_dtype(cat.dtype)
s = Series([1, 2, 3], dtype="category")
assert is_categorical_dtype(s)
assert is_categorical_dtype(s.dtype)
def test_constructor_categorical_with_coercion(self):
factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"])
# test basic creation / coercion of categoricals
s = Series(factor, name="A")
assert s.dtype == "category"
assert len(s) == len(factor)
str(s.values)
str(s)
# in a frame
df = DataFrame({"A": factor})
result = df["A"]
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
df = DataFrame({"A": s})
result = df["A"]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
# multiples
df = DataFrame({"A": s, "B": s, "C": 1})
result1 = df["A"]
result2 = df["B"]
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
assert result2.name == "B"
assert len(df) == len(factor)
str(df.values)
str(df)
# GH8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
assert result == expected
result = x.person_name[0]
assert result == expected
result = x.person_name.loc[0]
assert result == expected
def test_constructor_categorical_dtype(self):
result = pd.Series(
["a", "b"], dtype=CategoricalDtype(["a", "b", "c"], ordered=True)
)
assert is_categorical_dtype(result.dtype) is True
tm.assert_index_equal(result.cat.categories, pd.Index(["a", "b", "c"]))
assert result.cat.ordered
result = pd.Series(["a", "b"], dtype=CategoricalDtype(["b", "a"]))
assert is_categorical_dtype(result.dtype)
tm.assert_index_equal(result.cat.categories, pd.Index(["b", "a"]))
assert result.cat.ordered is False
# GH 19565 - Check broadcasting of scalar with Categorical dtype
result = Series(
"a", index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
expected = Series(
["a", "a"], index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
tm.assert_series_equal(result, expected)
def test_constructor_categorical_string(self):
# GH 26336: the string 'category' maintains existing CategoricalDtype
cdt = CategoricalDtype(categories=list("dabc"), ordered=True)
expected = Series(list("abcabc"), dtype=cdt)
# Series(Categorical, dtype='category') keeps existing dtype
cat = Categorical(list("abcabc"), dtype=cdt)
result = Series(cat, dtype="category")
tm.assert_series_equal(result, expected)
# Series(Series[Categorical], dtype='category') keeps existing dtype
result = Series(result, dtype="category")
tm.assert_series_equal(result, expected)
def test_categorical_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat, copy=True)
assert s.cat is not cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat)
assert s.values is cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_unordered_compare_equal(self):
left = pd.Series(["a", "b", "c"], dtype=CategoricalDtype(["a", "b"]))
right = pd.Series(pd.Categorical(["a", "b", np.nan], categories=["a", "b"]))
tm.assert_series_equal(left, right)
def test_constructor_maskedarray(self):
data = ma.masked_all((3,), dtype=float)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
data[0] = 0.0
data[2] = 2.0
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0.0, np.nan, 2.0], index=index)
tm.assert_series_equal(result, expected)
data[1] = 1.0
result = Series(data, index=index)
expected = Series([0.0, 1.0, 2.0], index=index)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=int)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan], dtype=float)
tm.assert_series_equal(result, expected)
data[0] = 0
data[2] = 2
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0, np.nan, 2], index=index, dtype=float)
tm.assert_series_equal(result, expected)
data[1] = 1
result = Series(data, index=index)
expected = Series([0, 1, 2], index=index, dtype=int)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=bool)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan], dtype=object)
tm.assert_series_equal(result, expected)
data[0] = True
data[2] = False
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([True, np.nan, False], index=index, dtype=object)
tm.assert_series_equal(result, expected)
data[1] = True
result = Series(data, index=index)
expected = Series([True, True, False], index=index, dtype=bool)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype="M8[ns]")
result = Series(data)
expected = Series([iNaT, iNaT, iNaT], dtype="M8[ns]")
tm.assert_series_equal(result, expected)
data[0] = datetime(2001, 1, 1)
data[2] = datetime(2001, 1, 3)
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series(
[datetime(2001, 1, 1), iNaT, datetime(2001, 1, 3)],
index=index,
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
data[1] = datetime(2001, 1, 2)
result = Series(data, index=index)
expected = Series(
[datetime(2001, 1, 1), datetime(2001, 1, 2), datetime(2001, 1, 3)],
index=index,
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
def test_constructor_maskedarray_hardened(self):
# Check numpy masked arrays with hard masks -- from GH24574
data = ma.masked_all((3,), dtype=float).harden_mask()
result = pd.Series(data)
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range("20090415", "20090519", freq="B")
data = {k: 1 for k in rng}
result = Series(data, index=rng)
assert result.index is rng
def test_constructor_default_index(self):
s = Series([0, 1, 2])
tm.assert_index_equal(s.index, pd.Index(np.arange(3)))
@pytest.mark.parametrize(
"input",
[
[1, 2, 3],
(1, 2, 3),
list(range(3)),
pd.Categorical(["a", "b", "a"]),
(i for i in range(3)),
map(lambda x: x, range(3)),
],
)
def test_constructor_index_mismatch(self, input):
# GH 19342
# test that construction of a Series with an index of different length
# raises an error
msg = "Length of passed values is 3, index implies 4"
with pytest.raises(ValueError, match=msg):
Series(input, index=np.arange(4))
def test_constructor_numpy_scalar(self):
# GH 19342
# construction with a numpy scalar
# should not raise
result = Series(np.array(100), index=np.arange(4), dtype="int64")
expected = Series(100, index=np.arange(4), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_broadcast_list(self):
# GH 19342
# construction with single-element container and index
# should raise
msg = "Length of passed values is 1, index implies 3"
with pytest.raises(ValueError, match=msg):
Series(["foo"], index=["a", "b", "c"])
def test_constructor_corner(self):
df = tm.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
assert isinstance(s, Series)
def test_constructor_sanitize(self):
s = Series(np.array([1.0, 1.0, 8.0]), dtype="i8")
assert s.dtype == np.dtype("i8")
s = Series(np.array([1.0, 1.0, np.nan]), copy=True, dtype="i8")
assert s.dtype == np.dtype("f8")
def test_constructor_copy(self):
# GH15125
# test dtype parameter has no side effects on copy=True
for data in [[1.0], np.array([1.0])]:
x = Series(data)
y = pd.Series(x, copy=True, dtype=float)
# copy=True maintains original data in Series
tm.assert_series_equal(x, y)
# changes to origin of copy does not affect the copy
x[0] = 2.0
assert not x.equals(y)
assert x[0] == 2.0
assert y[0] == 1.0
@pytest.mark.parametrize(
"index",
[
pd.date_range("20170101", periods=3, tz="US/Eastern"),
pd.date_range("20170101", periods=3),
pd.timedelta_range("1 day", periods=3),
pd.period_range("2012Q1", periods=3, freq="Q"),
pd.Index(list("abc")),
pd.Int64Index([1, 2, 3]),
pd.RangeIndex(0, 3),
],
ids=lambda x: type(x).__name__,
)
def test_constructor_limit_copies(self, index):
# GH 17449
# limit copies of input
s = pd.Series(index)
# we make 1 copy; this is just a smoke test here
assert s._mgr.blocks[0].values is not index
def test_constructor_pass_none(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
s = Series(None, index=range(5))
assert s.dtype == np.float64
s = Series(None, index=range(5), dtype=object)
assert s.dtype == np.object_
# GH 7431
# inference on the index
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
s = Series(index=np.array([None]))
expected = Series(index=Index([None]))
tm.assert_series_equal(s, expected)
def test_constructor_pass_nan_nat(self):
# GH 13467
exp =
|
Series([np.nan, np.nan], dtype=np.float64)
|
pandas.Series
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.