prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""
.. _example5:
Fifth Example: Demultiplexor - multiplexor
-----------------------------------------------
An imaginative layout using a classifier to predict the cluster labels and fitting a separate model for each cluster.
Steps of the **PipeGraph**:
- **scaler**: A :class:`MinMaxScaler` data preprocessor
- **classifier**: A :class:`GaussianMixture` classifier
- **demux**: A custom :class:`Demultiplexer` class in charge of splitting the input arrays accordingly to the selection input vector
- **lm_0**: A :class:`LinearRegression` model
- **lm_1**: A :class:`LinearRegression` model
- **lm_2**: A :class:`LinearRegression` model
- **mux**: A custom :class:`Multiplexer` class in charge of combining different input arrays into a single one accordingly to the selection input vector
.. figure:: https://raw.githubusercontent.com/mcasl/PipeGraph/master/examples/images/Diapositiva5.png
Figure 1. PipeGraph diagram showing the steps and their connections
"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import GridSearchCV
from pipegraph.base import PipeGraph, Demultiplexer, Multiplexer
import matplotlib.pyplot as plt
from sklearn.mixture import GaussianMixture
X_first = pd.Series(np.random.rand(100,))
y_first = pd.Series(4 * X_first + 0.5*np.random.randn(100,))
X_second = pd.Series(np.random.rand(100,) + 3)
y_second = pd.Series(-4 * X_second + 0.5*np.random.randn(100,))
X_third = pd.Series(np.random.rand(100,) + 6)
y_third = pd.Series(2 * X_third + 0.5*np.random.randn(100,))
X = pd.concat([X_first, X_second, X_third], axis=0).to_frame()
y = | pd.concat([y_first, y_second, y_third], axis=0) | pandas.concat |
import pandas as pd
import numpy as np
from pathlib import Path
from compositions import *
RELMASSS_UNITS = {
'%': 10**-2,
'wt%': 10**-2,
'ppm': 10**-6,
'ppb': 10**-9,
'ppt': 10**-12,
'ppq': 10**-15,
}
def scale_function(in_unit, target_unit='ppm'):
if not pd.isna(in_unit):
return RELMASSS_UNITS[in_unit.lower()] / \
RELMASSS_UNITS[target_unit.lower()]
else:
return 1.
class RefComp(object):
"""
Reference compositional model object, principally used for normalisation.
"""
def __init__(self, filename, **kwargs):
self.data = pd.read_csv(filename, **kwargs)
self.data = self.data.set_index('var')
self.original_data = self.data.copy() # preserve unaltered record
self.add_oxides()
self.collect_vars()
self.set_units()
def add_oxides(self):
"""
Compositional models typically include elements in both oxide and elemental form,
typically divided into 'majors' and 'traces'.
For the purposes of normalisation - we need
i) to be able to access values for the form found in the sample dataset,
ii) for original values and uncertanties to be preserved, and
iii) for closure to be preserved.
There are multiple ways to acheive this - one is to create linked element-oxide tables,
and another is to force working in one format (i.e. Al2O3 (wt%) --> Al (ppm))
"""
pass
def collect_vars(self,
headers=['Reservoir', 'Reference', 'ModelName', 'ModelType'],
floatvars=['value', 'unc_2sigma', 'constraint_value']):
self.vars = [i for i in self.data.index if (not | pd.isna(self.data.loc[i, 'value']) | pandas.isna |
import matplotlib
import pandas as pd
import numpy as np
import cvxpy as cp
from cvxopt import matrix, solvers
import pickle
import matplotlib.pyplot as plt
import os
from tqdm import tqdm
from colorama import Fore
from config import RISK_FREE_RATE, DATAPATH, EXPECTED_RETURN, STOCKS_NUMBER, MONTO_CARLO_TIMES
class InvestmentStrategy:
@staticmethod
def process_data_x_matrix(datapath):
df_raw = pd.read_excel(datapath)
df_raw = df_raw.T
df_raw = df_raw.drop(index=['code', 'name'], columns=[0])
df_raw = df_raw.fillna(method='ffill')
# 第32只股票第一天就是空缺值,用向前填补方式
df = df_raw.fillna(method='backfill')
return df
@staticmethod
def process_data_contain_hs300(datapath):
df_raw = pd.read_excel(datapath)
df_raw = df_raw.T
df_raw = df_raw.drop(index=['code', 'name'])
# df_raw.to_excel("./test1.xlsx")
# print(df_raw.isnull().any())
df_raw = df_raw.fillna(method='ffill')
# 第32只股票第一天就是空缺值,用向前填补方式
df = df_raw.fillna(method='backfill')
return df
@staticmethod
def day_yield_compute(x_matrix):
day_yield = (x_matrix.shift(-1) - x_matrix) / x_matrix
return day_yield.iloc[:-1, :]
@staticmethod
def ex_vector_compute(x_matrix):
day_yield = (x_matrix.shift(-1) - x_matrix) / x_matrix
day_avg_yield = day_yield.mean().to_numpy()
return day_yield.iloc[:-1, :], day_avg_yield
@staticmethod
def ex_matrix_compute(x_matrix, ex_numpy_vector):
ex_np = np.repeat(np.expand_dims(ex_numpy_vector, axis=0), x_matrix.shape[0], axis=0)
ex_matrix = pd.DataFrame(ex_np, index=x_matrix.index, columns=x_matrix.columns)
return ex_matrix
@staticmethod
def cov_matrix_compute(x_ex_matrix):
return np.matmul(x_ex_matrix.T.to_numpy(), x_ex_matrix.to_numpy()) / (x_ex_matrix.shape[0] - 1)
def compute_weight(self, x_matrix, total_days=252, method="Markowitz", starttime=0, endtime=0):
# ex_numpy_vector是r拔 (50,) numpy
# x_matrix是矩阵X [6个月天数 rows x 50 columns] 比如第一次计算权重就是(1212, 50)
# ex_matrix是EX矩阵 [6个月天数 rows x 50 columns]
# x_ex_matrix矩阵X-EX
# 协方差矩阵:cov (50, 50)
total_days_every_year = total_days / 5
day_yield_matrix, ex_numpy_vector = self.ex_vector_compute(x_matrix)
ex_matrix = self.ex_matrix_compute(day_yield_matrix, ex_numpy_vector)
x_ex_matrix = day_yield_matrix - ex_matrix
cov_matrix_numpy = self.cov_matrix_compute(x_ex_matrix)
# stocks_number = 50
n = STOCKS_NUMBER
one_matrix = np.ones((1, n))
'''
# cvxopt这个包也能做
P = matrix(cov_matrix_numpy.tolist())
# print(P)
# print('*' * 50)
q = matrix([0.0] * 50)
# G = matrix([[-1.0, 0.0], [0.0, -1.0]])
# h = matrix([0.0, 0.0])
A = matrix(np.vstack((ex_numpy_vector, one_matrix))) # 原型为cvxopt.matrix(array,dims),等价于A = matrix([[1.0],[1.0]])
# print(A)
b = matrix([0.1, 1.0])
result = solvers.qp(P=P, q=q, A=A, b=b)
print(result)
print(result['x'])
'''
if method == "Markowitz":
print("\033[0;36;m 开始计算组合权重,采用策略:\033[0m \033[0;34;m Markowitz投资组合 \033[0m")
# print("\033[0;36;m 开始求解二次规划:\033[0m")
annual_yield_vector = ex_numpy_vector * total_days_every_year
w = cp.Variable(n)
prob = cp.Problem(cp.Minimize((1 / 2) * cp.quad_form(w, cov_matrix_numpy)),
[annual_yield_vector.T @ w == EXPECTED_RETURN,
one_matrix @ w == 1])
prob.solve()
# print("\nThe optimal value is", prob.value)
# # print("A solution w is")
# # print(w.value)
print("\033[0;36;m 完成Markowitz投资组合最优权重二次规划求解,方差最优值为:\033[0m \033[0;34;m {} \033[0m".format(prob.value))
return w.value
r_p_list = []
sigma_p_list = []
sharpe_ratio_list = []
weight_list = []
if method == "MontoCarlo":
print("\033[0;36;m 开始计算组合权重,采用策略:\033[0m \033[0;34;m Monto Carlo 求解最大夏普比率 \033[0m")
# 正态分布均值设置为 1 / 50 更符合
np.random.seed(1)
risk_free_rate_day = RISK_FREE_RATE / total_days_every_year
bar = tqdm(list(range(int(MONTO_CARLO_TIMES))),
bar_format='{l_bar}%s{bar}%s{r_bar}' % (Fore.BLUE, Fore.RESET))
for _ in bar:
# bar.set_description(f"现在到Monto Carlo第{_}次")
weights = np.random.normal(1 / n, 1.0, n - 1)
weights_last = 1 - np.sum(weights)
weights = np.append(weights, weights_last)
weights_row_vector = np.expand_dims(weights, axis=0)
yield_avg_vector = np.expand_dims(ex_numpy_vector, axis=0)
sigma_p = np.sqrt(np.matmul(np.matmul(weights_row_vector, cov_matrix_numpy), weights_row_vector.T))[0][
0]
r_p = np.matmul(weights_row_vector, yield_avg_vector.T)[0][0]
sharpe_ratio = (r_p - risk_free_rate_day) / sigma_p
r_p_list.append(r_p)
sigma_p_list.append(sigma_p)
sharpe_ratio_list.append(sharpe_ratio)
weight_list.append(weights)
r_p_list_numpy = np.array(r_p_list)
sigma_p_list_numpy = np.array(sigma_p_list)
sharpe_ratio_list_numpy = np.array(sharpe_ratio_list)
weight_list_numpy = np.array(weight_list)
# 最大夏普比率
max_sharpe_ratio = np.max(sharpe_ratio_list_numpy)
max_sharpe_ratio_index = np.argmax(sharpe_ratio_list_numpy)
# 对应的标准差和均值
sigma_rp = [sigma_p_list_numpy[max_sharpe_ratio_index], r_p_list_numpy[max_sharpe_ratio_index]]
# r_p与无风险利率组合达到10%收益目标,alpha为投资于无风险利率权重,但其实alpha要接近97%,因为此时市场组合夏普比率最大,日收益率在10%以上,与年收益10%的目标收益和年利率3%无风险利率相去甚远
alpha = (EXPECTED_RETURN / total_days_every_year - sigma_rp[1]) / (risk_free_rate_day - sigma_rp[1])
weight_list_numpy_opt_alpha = np.append(weight_list_numpy[max_sharpe_ratio_index], alpha)
print("\033[0;36;m 完成 Monto Carlo 策略权重求解 \033[0m")
# 作图
filename = os.path.join(os.getcwd(), 'images')
if not os.path.exists(filename):
os.makedirs(filename)
plt.figure(figsize=(8, 6))
plt.style.use('seaborn-dark')
plt.rcParams['savefig.dpi'] = 300 # 图片像素
plt.rcParams['figure.dpi'] = 300 # 分辨率
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
plt.scatter(sigma_p_list_numpy, r_p_list_numpy, c=r_p_list_numpy / sigma_p_list_numpy,
marker='o', cmap='coolwarm')
plt.plot([0, sigma_rp[0]], [risk_free_rate_day, sigma_rp[1]], 'r')
# plt.annotate('max Sharpe ratio:'.format(max_sharpe_ratio), xy=rp_sigma, xytext=(3, 1.5),
# arrowprops=dict(facecolor='black', shrink=0.05),
# )
plt.annotate('max Sharpe ratio:{}'.format(max_sharpe_ratio), xy=sigma_rp)
plt.xlabel('日标准差')
plt.ylabel('日收益率')
plt.colorbar(label='Sharpe ratio')
plt.title("Monta Carlo抽样{}次获得CAL和有效前沿".format(MONTO_CARLO_TIMES))
plt.savefig("./images/Montacarlo_CAL_{}_{}_{}".format(MONTO_CARLO_TIMES, starttime, endtime), dpi=300)
print("\033[0;36;m 完成资本市场线作图 \033[0m")
return weight_list_numpy_opt_alpha
@staticmethod
def get_six_month_map(x_matrix):
dfx = pd.DataFrame(x_matrix.index, columns=['time'])
dfx["year"] = pd.to_datetime(pd.DataFrame(x_matrix.index, columns=['time'])['time'], format='%Y-%m-%d').dt.year
dfx["month"] = pd.to_datetime(pd.DataFrame(x_matrix.index, columns=['time'])['time'],
format='%Y-%m-%d').dt.month
dfx['yearmonth'] = dfx.apply(lambda r: r['time'][:-2], axis=1)
dfx = dfx.drop_duplicates(['yearmonth'])
index_six_month = dfx[(dfx['month'] == 1) | (dfx['month'] == 7)].index.tolist()
index_slice = int(len(index_six_month) / 2)
compare_list1 = index_six_month[index_slice:]
compare_list2 = compare_list1[1:]
compare_list2.append(x_matrix.shape[0])
compare_list = list(zip(compare_list1, compare_list2))
six_map = {k: v for k, v in zip(index_six_month[index_slice:], index_six_month[:index_slice])}
return six_map, compare_list
def save_weights_markowitz(self):
x_matrix_total = self.process_data_x_matrix(DATAPATH)
six_map, compare_list = self.get_six_month_map(x_matrix_total)
weight_list = []
bar = tqdm(six_map.items(), bar_format='{l_bar}%s{bar}%s{r_bar}' % (Fore.BLUE, Fore.RESET))
for k, v in bar:
start_time = x_matrix_total.iloc[v:k, :].index[0]
end_time = x_matrix_total.iloc[v:k, :].index[-1]
bar.set_description(f"进入{start_time}--{end_time}权重计算")
df_weight = x_matrix_total.iloc[v:k, :]
total_days = k - v
weight = self.compute_weight(df_weight, total_days)
weight_list.append(weight)
# 保存权重
filename = os.path.join(os.getcwd(), 'weights')
if not os.path.exists(filename):
os.makedirs(filename)
with open('./weights/weights_Markowitz.pickle', 'wb') as f:
pickle.dump(weight_list, f)
with open('./weights/weights_Markowitz.txt', 'w') as f2:
f2.write(str(weight_list))
print("\033[0;36;m 权重保存完毕 \033[0m")
def save_weights_montocarlo(self):
x_matrix_total = self.process_data_x_matrix(DATAPATH)
six_map, compare_list = self.get_six_month_map(x_matrix_total)
weight_list = []
bar = tqdm(six_map.items(), bar_format='{l_bar}%s{bar}%s{r_bar}' % (Fore.BLUE, Fore.RESET))
for k, v in bar:
df_weight = x_matrix_total.iloc[v:k, :]
total_days = k - v
start_time = x_matrix_total.iloc[v:k, :].index[0]
end_time = x_matrix_total.iloc[v:k, :].index[-1]
bar.set_description(f"进入{start_time}--{end_time}权重计算")
weight = self.compute_weight(df_weight, total_days, method="MontoCarlo", starttime=start_time,
endtime=end_time)
weight_list.append(weight)
# 保存权重
filename = os.path.join(os.getcwd(), 'weights')
if not os.path.exists(filename):
os.makedirs(filename)
with open('./weights/weights_MontoCarlo.pickle', 'wb') as f:
pickle.dump(weight_list, f)
with open('./weights/weights_MontoCarlo.txt', 'w') as f2:
f2.write(str(weight_list))
print("\033[0;36;m 权重保存完毕 \033[0m")
def compare_performance(self, method="Markowitz"):
print("\033[0;36;m 开始与HS300表现比较,比较策略为 \033[0m \033[0;34;m {} \033[0m".format(method))
total_compare_matrix = pd.DataFrame(columns=['HS300', 'Portfolio', "Period"])
total_compare_matrix_convert_one = | pd.DataFrame(columns=['HS300', 'Portfolio', "Period"]) | pandas.DataFrame |
"""Tests for piece.py"""
from fractions import Fraction
import pandas as pd
import numpy as np
from harmonic_inference.data.data_types import KeyMode, PitchType
from harmonic_inference.data.piece import Note, Key, Chord, ScorePiece, get_reduction_mask
import harmonic_inference.utils.harmonic_constants as hc
import harmonic_inference.utils.rhythmic_utils as ru
import harmonic_inference.utils.harmonic_utils as hu
def test_note_from_series():
def check_equals(note_dict, note, measures_df, pitch_type):
assert pitch_type == note.pitch_type
if pitch_type == PitchType.MIDI:
assert (note_dict['midi'] % hc.NUM_PITCHES[PitchType.MIDI]) == note.pitch_class
else:
assert note.pitch_class == note_dict['tpc'] + hc.TPC_C
assert note.octave == note_dict['midi'] // hc.NUM_PITCHES[PitchType.MIDI]
assert note.onset == (note_dict['mc'], note_dict['onset'])
assert note.offset == (note_dict['offset_mc'], note_dict['offset_beat'])
assert note.duration == note_dict['duration']
assert note.onset_level == ru.get_metrical_level(
note_dict['onset'],
measures_df.loc[measures_df['mc'] == note_dict['mc']].squeeze(),
)
assert note.offset_level == ru.get_metrical_level(
note_dict['offset_beat'],
measures_df.loc[measures_df['mc'] == note_dict['offset_mc']].squeeze(),
)
note_dict = {
'midi': 50,
'tpc': 5,
'mc': 1,
'onset': Fraction(1, 2),
'offset_mc': 2,
'offset_beat': Fraction(3, 4),
'duration': Fraction(5, 6),
}
key_values = {
'midi': range(127),
'tpc': range(-hc.TPC_C, hc.TPC_C),
'mc': range(3),
'onset': [i * Fraction(1, 2) for i in range(3)],
'offset_mc': range(3),
'offset_beat': [i * Fraction(1, 2) for i in range(3)],
'duration': [i * Fraction(1, 2) for i in range(3)],
}
measures_df = pd.DataFrame({
'mc': list(range(10)),
'timesig': '12/8'
})
for key, values in key_values.items():
for value in values:
note_dict[key] = value
note_series = pd.Series(note_dict)
note = Note.from_series(note_series, measures_df, PitchType.MIDI)
check_equals(note_dict, note, measures_df, PitchType.MIDI)
note = Note.from_series(note_series, measures_df, PitchType.TPC)
check_equals(note_dict, note, measures_df, PitchType.TPC)
note_dict['tpc'] = hc.NUM_PITCHES[PitchType.TPC] - hc.TPC_C
assert Note.from_series(pd.Series(note_dict), measures_df, PitchType.TPC) is None
note_dict['tpc'] = 0 - hc.TPC_C - 1
assert Note.from_series(pd.Series(note_dict), measures_df, PitchType.TPC) is None
def test_chord_from_series():
def check_equals(chord_dict, chord, measures_df, pitch_type, key):
assert chord.pitch_type == pitch_type
assert chord.chord_type == hu.get_chord_type_from_string(chord_dict['chord_type'])
assert chord.inversion == hu.get_chord_inversion(chord_dict['figbass'])
assert chord.onset == (chord_dict['mc'], chord_dict['onset'])
assert chord.offset == (chord_dict['mc_next'], chord_dict['onset_next'])
assert chord.duration == chord_dict['duration']
assert chord.onset_level == ru.get_metrical_level(
chord_dict['onset'],
measures_df.loc[measures_df["mc"] == chord_dict["mc"]].squeeze(),
)
assert chord.offset_level == ru.get_metrical_level(
chord_dict['onset_next'],
measures_df.loc[measures_df["mc"] == chord_dict["mc_next"]].squeeze(),
)
root = chord_dict['root']
if pitch_type == PitchType.MIDI:
root = hu.tpc_interval_to_midi_interval(root)
assert chord.root == hu.transpose_pitch(key.local_tonic, root, pitch_type)
assert chord.bass == hu.get_bass_note(
chord.chord_type,
chord.root,
chord.inversion,
chord.pitch_type,
)
chord_dict = {
'numeral': 'III',
'root': 5,
'bass_note': 5,
'chord_type': 'M',
'figbass': '',
'globalkey': 'A',
'globalkey_is_minor': False,
'localkey': 'iii',
'localkey_is_minor': True,
'relativeroot': pd.NA,
'offset_mc': 2,
'offset_beat': Fraction(3, 4),
'duration': Fraction(5, 6),
'mc': 1,
'onset': Fraction(1, 2),
'mc_next': 2,
'onset_next': Fraction(3, 4),
}
key_values = {
'root': range(-2, 2),
'bass_note': range(-7, 7),
'chord_type': hc.STRING_TO_CHORD_TYPE.keys(),
'figbass': hc.FIGBASS_INVERSIONS.keys(),
'mc': range(3),
'onset': [i * Fraction(1, 2) for i in range(3)],
'mc_next': range(3),
'onset_next': [i * Fraction(1, 2) for i in range(3)],
'duration': [i * Fraction(1, 2) for i in range(3)],
}
measures_df = pd.DataFrame({
'mc': list(range(10)),
'timesig': '12/8'
})
for key, values in key_values.items():
for value in values:
chord_dict[key] = value
chord_series = pd.Series(chord_dict)
for pitch_type in PitchType:
chord = Chord.from_series(chord_series, measures_df, pitch_type)
local_key = Key.from_series(chord_series, pitch_type)
check_equals(chord_dict, chord, measures_df, pitch_type, local_key)
# @none returns None
for numeral in ['@none', pd.NA]:
chord_dict['numeral'] = numeral
chord_series = pd.Series(chord_dict)
for pitch_type in PitchType:
assert Chord.from_series(chord_series, measures_df, pitch_type) is None
chord_dict['numeral'] = 'III'
# Bad key returns None
chord_dict['localkey'] = 'Error'
chord_series = pd.Series(chord_dict)
for pitch_type in PitchType:
assert Chord.from_series(chord_series, measures_df, pitch_type) is None
chord_dict['localkey'] = 'iii'
# Bad relativeroot is not ok
chord_dict['relativeroot'] = 'Error'
chord_series = pd.Series(chord_dict)
for pitch_type in PitchType:
assert Chord.from_series(chord_series, measures_df, pitch_type) is None
def test_key_from_series():
def get_relative(global_tonic, global_mode, relative_numeral, pitch_type):
"""Get the relative key tonic of a numeral in a given global key."""
local_interval = hu.get_interval_from_numeral(relative_numeral, global_mode, pitch_type)
local_tonic = hu.transpose_pitch(global_tonic, local_interval, pitch_type)
return local_tonic
def check_equals(key_dict, key, pitch_type):
assert key.tonic_type == pitch_type
# Check mode
if not pd.isnull(key_dict['relativeroot']):
final_root = key_dict['relativeroot'].split('/')[0]
assert (
key.relative_mode == KeyMode.MINOR if final_root[-1].islower() else KeyMode.MAJOR
)
else:
assert key.relative_mode == key.local_mode
assert key.local_mode == KeyMode.MINOR if key_dict['localkey_is_minor'] else KeyMode.MAJOR
# Check tonic
if not pd.isnull(key_dict['relativeroot']):
# We can rely on this non-relative local key. It is checked below
key_tonic = key.local_tonic
key_mode = key.local_mode
for relative_numeral in reversed(key_dict['relativeroot'].split('/')):
key_tonic = get_relative(key_mode, relative_numeral, pitch_type)
key_mode = KeyMode.MINOR if relative_numeral[-1].islower() else KeyMode.MAJOR
assert key_tonic == key.relative_tonic
assert key_mode == key.relative_mode
else:
assert key.relative_tonic == key.local_tonic
assert key.relative_mode == key.local_mode
global_key_tonic = hu.get_pitch_from_string(key_dict['globalkey'], pitch_type)
global_mode = KeyMode.MINOR if key_dict['globalkey_is_minor'] else KeyMode.MAJOR
local_key_tonic = get_relative(
global_key_tonic, global_mode, key_dict['localkey'], pitch_type
)
local_key_mode = KeyMode.MINOR if key_dict['localkey_is_minor'] else KeyMode.MAJOR
assert key.local_tonic == local_key_tonic
assert key.local_mode == local_key_mode
key_dict = {
'globalkey': 'A',
'globalkey_is_minor': False,
'localkey': 'iii',
'localkey_is_minor': True,
'relativeroot': pd.NA,
}
# A few ad-hoc
key_tpc = Key.from_series(pd.Series(key_dict), PitchType.TPC)
key_midi = Key.from_series(pd.Series(key_dict), PitchType.MIDI)
assert key_tpc.local_mode == KeyMode.MINOR == key_midi.local_mode
assert key_tpc.local_tonic == hc.TPC_C + hc.ACCIDENTAL_ADJUSTMENT[PitchType.TPC]
assert key_midi.local_tonic == 1
key_dict['globalkey_is_minor'] = True
key_tpc = Key.from_series(pd.Series(key_dict), PitchType.TPC)
key_midi = Key.from_series(pd.Series(key_dict), PitchType.MIDI)
assert key_tpc.local_mode == KeyMode.MINOR == key_midi.local_mode
assert key_tpc.local_tonic == hc.TPC_C
assert key_midi.local_tonic == 0
key_dict['localkey_is_minor'] = False
key_tpc = Key.from_series( | pd.Series(key_dict) | pandas.Series |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/feature_testing.ipynb (unless otherwise specified).
__all__ = ['get_tabular_object', 'train_predict', 'SPLIT_PARAMS', 'hist_plot_preds', 'BoldlyWrongTimeseries']
# Cell
from loguru import logger
from fastai.tabular.all import *
from ashrae import loading, preprocessing, inspection
from sklearn import linear_model, tree, model_selection, ensemble
import tqdm
import plotly.express as px
import pandas as pd
import ipywidgets as widgets
# Cell
def get_tabular_object(df:pd.DataFrame, var_names:dict,
splits=None, procs:list=None):
if procs is None: procs = []
return TabularPandas(df.copy(), procs,
var_names['cats'], var_names['conts'],
y_names=var_names['dep_var'],
splits=splits)
SPLIT_PARAMS = dict(
train_frac = .8,
split_kind = 'time_split_day',
)
def train_predict(df:pd.DataFrame, var_names:dict,
model, model_params:dict=None, n_rep:int=3,
n_samples_train:int=10_000,
n_samples_valid:int=10_000,
procs:list=[Categorify, FillMissing, Normalize],
split_params:dict=None):
split_params = SPLIT_PARAMS if split_params is None else split_params
y_col = var_names['dep_var']
score_vals = []
model_params = {} if model_params is None else model_params
to = get_tabular_object(df, var_names, procs=procs)
for i in tqdm.tqdm(range(n_rep), total=n_rep, desc='Repetition'):
m = model(**model_params)
splits = preprocessing.split_dataset(df, **split_params)
mask = to.xs.index.isin(splits[0])
_X = to.xs.loc[~mask, :].iloc[:n_samples_train]
_y = to.ys.loc[~mask, y_col].iloc[:n_samples_train]
print(_X['primary_use'].values[:5])
m.fit(_X.values, _y.values)
_X = to.xs.loc[mask, :].iloc[:n_samples_valid]
_y = to.ys.loc[mask, y_col].iloc[:n_samples_valid]
pred = m.predict(_X.values)
s = torch.sqrt(F.mse_loss(tensor(pred), tensor(_y.values))).item()
score_vals.append({'iter': i, 'rmse loss': s})
return pd.DataFrame(score_vals)
# Cell
def hist_plot_preds(y0:np.ndarray, y1:np.ndarray,
label0:str='y0', label1:str='y1'):
res = pd.concat(
(
pd.DataFrame({
'y': y0,
'set': [label0] * len(y0)
}),
pd.DataFrame({
'y':y1,
'set': [label1] * len(y1)
})
),
ignore_index=True
)
return px.histogram(res, x='y', color='set', marginal='box',
barmode='overlay', histnorm='probability density')
# Cell
class BoldlyWrongTimeseries:
def __init__(self, xs, y_true, y_pred, info:pd.DataFrame=None):
if info is None:
self.df = xs.loc[:,['meter', 'building_id', 'timestamp']].copy()
else:
assert all([v in info.columns.values for v in ['meter', 'building_id', 'timestamp']])
self.df = xs.join(info)
for col in ['meter', 'building_id']:
self.df[col] = self.df[col].astype('category')
self.df[col].cat.set_categories(sorted(self.df[col].unique()),
ordered=True, inplace=True)
self.df['y_true'] = y_true
self.df['y_pred'] = y_pred
self.compute_misses()
def compute_misses(self):
fun = lambda x: np.sqrt(np.mean(x**2))
self.miss = (self.df.assign(difference=lambda x: x['y_pred']-x['y_true'])
.groupby(['building_id', 'meter'])
.agg(loss= | pd.NamedAgg(column='difference', aggfunc=fun) | pandas.NamedAgg |
# coding=utf-8
from datetime import datetime
from wit import Wit
from string import Template
from time import sleep
from collections import namedtuple
from pathlib import Path
import pandas as pd
import deepcut
import os
import glob
import pickle
import config
toq_key = config.toq_key
say_key = config.say_key
sub_key = config.sub_key
sec_key = config.sec_key
who_key = config.who_key
now_here = os.getcwd()
def get_file_name(dir_file):
fn = os.path.basename(dir_file)
fn_alone = os.path.splitext(fn)[0]
return fn_alone
# df คือตาราง extend คือคำที่ให้เข้าใจว่าตารางนี้เปลี่ยนไปอย่างไร และเป็นชื่อโฟลเดอร์สำหรับเก็บไฟล์นี้ด้วย
def export_file(old_table, new_table, extend):
file_name = os.path.basename(old_table)
fn_no_extension = os.path.splitext(file_name)[0]
path_here = os.getcwd()
# ส่งออกตาราง df
directory = os.path.join(path_here, extend)
if not os.path.exists(directory):
os.makedirs(directory)
export_file_dir = os.path.join(directory, fn_no_extension + '_{!s}.csv'.format(extend))
new_table.to_csv(export_file_dir, sep='\t', encoding='utf-8')
print('ส่งออกไฟล์ {!s} แล้ว'.format(fn_no_extension + '_{!s}.csv'.format(extend)))
# เริ่มจากนำเข้า csv ที่ได้จาก txt ที่ export มาจาก line
# แล้วนำไปเปลี่ยนแปลงให้ได้ตารางที่ประกอบด้วย เวลาในการส่งข้อความ (time) ชื่อผู้ส่งข้อความ (name) และ ข้อความ (text)
def clean_table(file_path):
# chat คือ ตารางที่มาจากตาราง csv ที่เราจะ clean
chat = pd.read_csv(file_path)
# chat_mod คือ ตารางที่มาจาก chat แต่ใส่ชื่อให้คอลัมน์ใหม่
chat_mod = pd.DataFrame({'time': chat.ix[:, 0], 'name': chat.ix[:, 1], 'text': chat.ix[:, 2]})
# ถ้าข้อมูลที่ส่งเข้ามาตัดอักษรห้าตัวข้างหน้าแล้วเป็นวันที่ จะถูกส่งกลับแค่วันที่
# ส่วนข้อมูลอื่น ๆ ที่ไม่ใช่เงื่อนไขนี้ จะไม่ถูกทำอะไร ส่งกลับแบบเดิม
def validate(date_text):
try:
datetime.strptime(date_text[5:], '%d/%m/%Y')
b = date_text[5:]
return b
except ValueError:
return date_text
# ตรวจสอบข้อมูลที่ส่งเข้ามาว่าอยู่ในรูปแบบ '%H:%M' หรือไม่
def tm(t):
try:
datetime.strptime(t, '%H:%M')
return True
except ValueError:
return False
# ตรวจสอบข้อมูลที่ส่งเข้ามาว่าอยู่ในรูปแบบ '%d/%m/%Y' หรือไม่
def date(d):
try:
datetime.strptime(d, '%d/%m/%Y')
return True
except ValueError:
return False
# เอาข้อมูลในคอลัมน์ time ตัวที่มีชื่อวัน ตัดชื่อวันออก ตัวอื่น ๆ ไม่ทำไร แล้วใส่เป็น list
na = []
for vela in chat_mod['time']:
k = validate(str(vela))
na.append(k)
# เอาข้อมูลในลิสต์ na มาดู
for s in na:
# ถ้าข้อมูลในลิสต์อยู่ในรูปแบบ '%H:%M'
if tm(s):
# ถ้าข้อมูลใน na ตำแหน่งที่อยู่ก่อนหน้า s อยู่ในรูปแบบ '%d/%m/%Y'
if date(na[na.index(s) - 1]):
# ให้เปลี่ยนข้อมูลตำแหน่ง s เป็น ข้อมูลตำแหน่งก่อนหน้า ตามด้วย วรรค ตามด้วย s ตามเดิม
na[na.index(s)] = na[na.index(s) - 1] + " " + s
# ถ้าข้อมูลใน na ตำแหน่งที่อยู่ก่อนหน้า s ถูกตัดท้าย 6 ตัวอักษร แล้วอยู่ในรูปแบบ '%d/%m/%Y'
elif date(na[na.index(s) - 1][:-6]):
# ให้เปลี่ยนข้อมูลตำแหน่ง s เป็น ข้อมูลตำแหน่งก่อนหน้า ที่ถูกตัดท้าย 6 ตัวอักษรแล้ว ตามด้วย วรรค
# ตามด้วย s ตามเดิม
na[na.index(s)] = na[na.index(s) - 1][:-6] + " " + s
# ถ้าข้อมูลอยู่ในรูปแบบอื่น ๆ ไม่ต้องทำไร
else:
pass
# เสร็จแล้วจะได้ na ที่มีสมาชิกอยู่ในรูปแบบ %d/%m/%Y %H:%M
# time_mod คือคอลัมน์ที่มีวันที่อยู่หน้าเวลา ในรูปแบบ %d/%m/%Y %H:%M
chat_mod['time_mod'] = pd.Series(na)
# fd เป็นตารางที่มี 3 คอลัมน์
fd = chat_mod[['time_mod', 'name', 'text']]
# dfd เป็นตารางที่ลบ row ที่คอลัมน์ text ไม่มีค่า
dfd = fd.dropna(subset=['text'])
# ลิสต์เหล่านี้มาจากแต่ละคอลัมน์ของ dfd
a1 = dfd['time_mod'].tolist()
a2 = dfd['name'].tolist()
a3 = dfd['text'].tolist()
# นำ a1 a2 a3 มาสร้างตารางใหม่ ชื่อ df
df = | pd.DataFrame({'time': a1, 'name': a2, 'text': a3}) | pandas.DataFrame |
"""
PIData contains a number of auxiliary classes that define common functionality
among :class:`PIPoint` and :class:`PIAFAttribute` objects.
"""
# pragma pylint: disable=unused-import
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import (
ascii,
bytes,
chr,
dict,
filter,
hex,
input,
int,
list,
map,
next,
object,
oct,
open,
pow,
range,
round,
str,
super,
zip,
)
from datetime import datetime
# pragma pylint: enable=unused-import
try:
from abc import ABC, abstractmethod
except ImportError:
from abc import ABCMeta, abstractmethod
from __builtin__ import str as BuiltinStr
ABC = ABCMeta(BuiltinStr("ABC"), (object,), {"__slots__": ()})
from pandas import DataFrame, Series
from PIconnect.AFSDK import AF
from PIconnect.PIConsts import (
BufferMode,
CalculationBasis,
ExpressionSampleType,
RetrievalMode,
SummaryType,
TimestampCalculation,
UpdateMode,
get_enumerated_value,
)
from PIconnect.time import timestamp_to_index, to_af_time_range, to_af_time
class PISeries(Series):
"""PISeries
Create a timeseries, derived from :class:`pandas.Series`
Args:
tag (str): Name of the new series
timestamp (List[datetime]): List of datetime objects to
create the new index
value (List): List of values for the timeseries, should be equally long
as the `timestamp` argument
uom (str, optional): Defaults to None. Unit of measurement for the
series
.. todo::
Remove class, return to either plain :class:`pandas.Series` or a
composition where the Series is just an attribute
"""
version = "0.1.0"
def __init__(self, tag, timestamp, value, uom=None, *args, **kwargs):
Series.__init__(self, data=value, index=timestamp, name=tag, *args, **kwargs)
self.tag = tag
self.uom = uom
class PISeriesContainer(ABC):
"""PISeriesContainer
With the ABC class we represent a general behaviour with PI Point object
(General class for objects that return :class:`PISeries` objects).
.. todo::
Move `__boundary_types` to PIConsts as a new enumeration
"""
version = "0.1.0"
__boundary_types = {
"inside": AF.Data.AFBoundaryType.Inside,
"outside": AF.Data.AFBoundaryType.Outside,
"interpolate": AF.Data.AFBoundaryType.Interpolated,
}
def __init__(self):
pass
@abstractmethod
def _recorded_values(self, time_range, boundary_type, filter_expression):
"""Abstract implementation for recorded values
The internals for retrieving recorded values from PI and PI-AF are
different and should therefore be implemented by the respective data
containers.
"""
pass
@abstractmethod
def _interpolated_values(self, time_range, interval, filter_expression):
pass
@abstractmethod
def _interpolated_value(self, time):
pass
@abstractmethod
def _recorded_value(self, time, retrieval_mode):
pass
@abstractmethod
def _summary(self, time_range, summary_types, calculation_basis, time_type):
pass
@abstractmethod
def _summaries(
self, time_range, interval, summary_types, calculation_basis, time_type
):
pass
@abstractmethod
def _filtered_summaries(
self,
time_range,
interval,
filter_expression,
summary_types,
calculation_basis,
filter_evaluation,
filter_interval,
time_type,
):
pass
@abstractmethod
def _current_value(self):
pass
@abstractmethod
def _update_value(self, value, update_mode, buffer_mode):
pass
@abstractmethod
def name(self):
pass
@abstractmethod
def units_of_measurement(self):
pass
@property
def current_value(self):
"""current_value
Return the current value of the attribute."""
return self._current_value()
def interpolated_value(self, time):
"""interpolated_value
Return a PISeries with an interpolated value at the given time
Args:
time (str): String containing the date, and possibly time,
for which to retrieve the value. This is parsed, using
:afsdk:`AF.Time.AFTime <M_OSIsoft_AF_Time_AFTime__ctor_7.htm>`.
Returns:
PISeries: A PISeries with a single row, with the corresponding time as
the index
"""
time = to_af_time(time)
pivalue = self._interpolated_value(time)
return PISeries(
tag=self.name,
value=pivalue.Value,
timestamp=[timestamp_to_index(pivalue.Timestamp.UtcTime)],
uom=self.units_of_measurement,
)
def recorded_value(self, time, retrieval_mode=RetrievalMode.AUTO):
"""recorded_value
Return a PISeries with the recorded value at or close to the given time
Args:
time (str): String containing the date, and possibly time,
for which to retrieve the value. This is parsed, using
:afsdk:`AF.Time.AFTime <M_OSIsoft_AF_Time_AFTime__ctor_7.htm>`.
retrieval_mode (int or :any:`PIConsts.RetrievalMode`): Flag determining
which value to return if no value available at the exact requested
time.
Returns:
PISeries: A PISeries with a single row, with the corresponding time as
the index
"""
time = to_af_time(time)
pivalue = self._recorded_value(time, retrieval_mode)
return PISeries(
tag=self.name,
value=pivalue.Value,
timestamp=[timestamp_to_index(pivalue.Timestamp.UtcTime)],
uom=self.units_of_measurement,
)
def update_value(
self,
value,
time=None,
update_mode=UpdateMode.NO_REPLACE,
buffer_mode=BufferMode.BUFFER_IF_POSSIBLE,
):
"""Update value for existing PI object.
Args:
value: value type should be in cohesion with PI object or
it will raise PIException: [-10702] STATE Not Found
time (datetime, optional): it is not possible to set future value,
it raises PIException: [-11046] Target Date in Future.
You can combine update_mode and time to change already stored value.
"""
if time:
time = to_af_time(time)
value = AF.Asset.AFValue(value, time)
return self._update_value(value, int(update_mode), int(buffer_mode))
def recorded_values(
self, start_time, end_time, boundary_type="inside", filter_expression=""
):
"""recorded_values
Return a PISeries of recorded data.
Data is returned between the given *start_time* and *end_time*,
inclusion of the boundaries is determined by the *boundary_type*
attribute. Both *start_time* and *end_time* are parsed by AF.Time and
allow for time specification relative to "now" by use of the asterisk.
By default the *boundary_type* is set to 'inside', which returns from
the first value after *start_time* to the last value before *end_time*.
The other options are 'outside', which returns from the last value
before *start_time* to the first value before *end_time*, and
'interpolate', which interpolates the first value to the given
*start_time* and the last value to the given *end_time*.
*filter_expression* is an optional string to filter the returned
values, see OSIsoft PI documentation for more information.
The AF SDK allows for inclusion of filtered data, with filtered values
marked as such. At this point PIconnect does not support this and
filtered values are always left out entirely.
Args:
start_time (str or datetime): Containing the date, and possibly time,
from which to retrieve the values. This is parsed, together
with `end_time`, using
:afsdk:`AF.Time.AFTimeRange <M_OSIsoft_AF_Time_AFTimeRange__ctor_1.htm>`.
end_time (str or datetime): Containing the date, and possibly time,
until which to retrieve values. This is parsed, together
with `start_time`, using
:afsdk:`AF.Time.AFTimeRange <M_OSIsoft_AF_Time_AFTimeRange__ctor_1.htm>`.
boundary_type (str, optional): Defaults to 'inside'. Key from the
`__boundary_types` dictionary to describe how to handle the
boundaries of the time range.
filter_expression (str, optional): Defaults to ''. Query on which
data to include in the results. See :ref:`filtering_values`
for more information on filter queries.
Returns:
PISeries: Timeseries of the values returned by the SDK
Raises:
ValueError: If the provided `boundary_type` is not a valid key a
`ValueError` is raised.
"""
time_range = to_af_time_range(start_time, end_time)
boundary_type = self.__boundary_types.get(boundary_type.lower())
filter_expression = self._normalize_filter_expression(filter_expression)
if boundary_type is None:
raise ValueError(
"Argument boundary_type must be one of "
+ ", ".join('"%s"' % x for x in sorted(self.__boundary_types.keys()))
)
pivalues = self._recorded_values(time_range, boundary_type, filter_expression)
timestamps, values = [], []
for value in pivalues:
timestamps.append(timestamp_to_index(value.Timestamp.UtcTime))
values.append(value.Value)
return PISeries(
tag=self.name,
timestamp=timestamps,
value=values,
uom=self.units_of_measurement,
)
def interpolated_values(self, start_time, end_time, interval, filter_expression=""):
"""interpolated_values
Return a PISeries of interpolated data.
Data is returned between *start_time* and *end_time* at a fixed
*interval*. All three values are parsed by AF.Time and the first two
allow for time specification relative to "now" by use of the
asterisk.
*filter_expression* is an optional string to filter the returned
values, see OSIsoft PI documentation for more information.
The AF SDK allows for inclusion of filtered data, with filtered
values marked as such. At this point PIconnect does not support this
and filtered values are always left out entirely.
Args:
start_time (str or datetime): Containing the date, and possibly time,
from which to retrieve the values. This is parsed, together
with `end_time`, using
:afsdk:`AF.Time.AFTimeRange <M_OSIsoft_AF_Time_AFTimeRange__ctor_1.htm>`.
end_time (str or datetime): Containing the date, and possibly time,
until which to retrieve values. This is parsed, together
with `start_time`, using
:afsdk:`AF.Time.AFTimeRange <M_OSIsoft_AF_Time_AFTimeRange__ctor_1.htm>`.
interval (str): String containing the interval at which to extract
data. This is parsed using
:afsdk:`AF.Time.AFTimeSpan.Parse <M_OSIsoft_AF_Time_AFTimeSpan_Parse_1.htm>`.
filter_expression (str, optional): Defaults to ''. Query on which
data to include in the results. See :ref:`filtering_values`
for more information on filter queries.
Returns:
PISeries: Timeseries of the values returned by the SDK
"""
time_range = to_af_time_range(start_time, end_time)
interval = AF.Time.AFTimeSpan.Parse(interval)
filter_expression = self._normalize_filter_expression(filter_expression)
pivalues = self._interpolated_values(time_range, interval, filter_expression)
timestamps, values = [], []
for value in pivalues:
timestamps.append(timestamp_to_index(value.Timestamp.UtcTime))
values.append(value.Value)
return PISeries(
tag=self.name,
timestamp=timestamps,
value=values,
uom=self.units_of_measurement,
)
def summary(
self,
start_time,
end_time,
summary_types,
calculation_basis=CalculationBasis.TIME_WEIGHTED,
time_type=TimestampCalculation.AUTO,
):
"""summary
Return one or more summary values over a single time range.
Args:
start_time (str or datetime): Containing the date, and possibly time,
from which to retrieve the values. This is parsed, together
with `end_time`, using :afsdk:`AF.Time.AFTimeRange <M_OSIsoft_AF_Time_AFTimeRange__ctor_1.htm>`.
end_time (str or datetime): Containing the date, and possibly time,
until which to retrieve values. This is parsed, together
with `start_time`, using :afsdk:`AF.Time.AFTimeRange <M_OSIsoft_AF_Time_AFTimeRange__ctor_1.htm>`.
summary_types (int or PIConsts.SummaryType): Type(s) of summaries
of the data within the requested time range.
calculation_basis (int or PIConsts.CalculationBasis, optional):
Event weighting within an interval. See :ref:`event_weighting`
and :any:`CalculationBasis` for more information. Defaults to
CalculationBasis.TIME_WEIGHTED.
time_type (int or PIConsts.TimestampCalculation, optional):
Timestamp to return for each of the requested summaries. See
:ref:`summary_timestamps` and :any:`TimestampCalculation` for
more information. Defaults to TimestampCalculation.AUTO.
Returns:
pandas.DataFrame: Dataframe with the unique timestamps as row index
and the summary name as column name.
"""
time_range = to_af_time_range(start_time, end_time)
summary_types = int(summary_types)
calculation_basis = int(calculation_basis)
time_type = int(time_type)
pivalues = self._summary(
time_range, summary_types, calculation_basis, time_type
)
df = DataFrame()
for summary in pivalues:
key = SummaryType(summary.Key).name
value = summary.Value
timestamp = timestamp_to_index(value.Timestamp.UtcTime)
value = value.Value
df = df.join(DataFrame(data={key: value}, index=[timestamp]), how="outer")
return df
def summaries(
self,
start_time,
end_time,
interval,
summary_types,
calculation_basis=CalculationBasis.TIME_WEIGHTED,
time_type=TimestampCalculation.AUTO,
):
"""summaries
Return one or more summary values for each interval within a time range
Args:
start_time (str or datetime): Containing the date, and possibly time,
from which to retrieve the values. This is parsed, together
with `end_time`, using
:afsdk:`AF.Time.AFTimeRange <M_OSIsoft_AF_Time_AFTimeRange__ctor_1.htm>`.
end_time (str or datetime): Containing the date, and possibly time,
until which to retrieve values. This is parsed, together
with `start_time`, using
:afsdk:`AF.Time.AFTimeRange <M_OSIsoft_AF_Time_AFTimeRange__ctor_1.htm>`.
interval (str): String containing the interval at which to extract
data. This is parsed using
:afsdk:`AF.Time.AFTimeSpan.Parse <M_OSIsoft_AF_Time_AFTimeSpan_Parse_1.htm>`.
summary_types (int or PIConsts.SummaryType): Type(s) of summaries
of the data within the requested time range.
calculation_basis (int or PIConsts.CalculationBasis, optional):
Event weighting within an interval. See :ref:`event_weighting`
and :any:`CalculationBasis` for more information. Defaults to
CalculationBasis.TIME_WEIGHTED.
time_type (int or PIConsts.TimestampCalculation, optional):
Timestamp to return for each of the requested summaries. See
:ref:`summary_timestamps` and :any:`TimestampCalculation` for
more information. Defaults to TimestampCalculation.AUTO.
Returns:
pandas.DataFrame: Dataframe with the unique timestamps as row index
and the summary name as column name.
"""
time_range = to_af_time_range(start_time, end_time)
interval = AF.Time.AFTimeSpan.Parse(interval)
summary_types = int(summary_types)
calculation_basis = int(calculation_basis)
time_type = int(time_type)
pivalues = self._summaries(
time_range, interval, summary_types, calculation_basis, time_type
)
df = DataFrame()
for summary in pivalues:
key = SummaryType(summary.Key).name
timestamps, values = zip(
*[
(timestamp_to_index(value.Timestamp.UtcTime), value.Value)
for value in summary.Value
]
)
df = df.join(DataFrame(data={key: values}, index=timestamps), how="outer")
return df
def filtered_summaries(
self,
start_time,
end_time,
interval,
filter_expression,
summary_types,
calculation_basis=None,
filter_evaluation=None,
filter_interval=None,
time_type=None,
):
"""filtered_summaries
Return one or more summary values for each interval within a time range
Args:
start_time (str or datetime): String containing the date, and possibly time,
from which to retrieve the values. This is parsed, together
with `end_time`, using
:afsdk:`AF.Time.AFTimeRange <M_OSIsoft_AF_Time_AFTimeRange__ctor_1.htm>`.
end_time (str or datetime): String containing the date, and possibly time,
until which to retrieve values. This is parsed, together
with `start_time`, using
:afsdk:`AF.Time.AFTimeRange <M_OSIsoft_AF_Time_AFTimeRange__ctor_1.htm>`.
interval (str): String containing the interval at which to extract
data. This is parsed using
:afsdk:`AF.Time.AFTimeSpan.Parse <M_OSIsoft_AF_Time_AFTimeSpan_Parse_1.htm>`.
filter_expression (str, optional): Defaults to ''. Query on which
data to include in the results. See :ref:`filtering_values`
for more information on filter queries.
summary_types (int or PIConsts.SummaryType): Type(s) of summaries
of the data within the requested time range.
calculation_basis (int or PIConsts.CalculationBasis, optional):
Event weighting within an interval. See :ref:`event_weighting`
and :any:`CalculationBasis` for more information. Defaults to
CalculationBasis.TIME_WEIGHTED.
filter_evaluation (int or PIConsts,ExpressionSampleType, optional):
Determines whether the filter is applied to the raw events in
the database, of if it is applied to an interpolated series
with a regular interval. Defaults to
ExpressionSampleType.EXPRESSION_RECORDED_VALUES.
filter_interval (str, optional): String containing the interval at
which to extract apply the filter. This is parsed using
:afsdk:`AF.Time.AFTimeSpan.Parse <M_OSIsoft_AF_Time_AFTimeSpan_Parse_1.htm>`.
time_type (int or PIConsts.TimestampCalculation, optional):
Timestamp to return for each of the requested summaries. See
:ref:`summary_timestamps` and :any:`TimestampCalculation` for
more information. Defaults to TimestampCalculation.AUTO.
Returns:
pandas.DataFrame: Dataframe with the unique timestamps as row index
and the summary name as column name.
"""
time_range = to_af_time_range(start_time, end_time)
interval = AF.Time.AFTimeSpan.Parse(interval)
filter_expression = self._normalize_filter_expression(filter_expression)
calculation_basis = get_enumerated_value(
enumeration=CalculationBasis,
value=calculation_basis,
default=CalculationBasis.TIME_WEIGHTED,
)
filter_evaluation = get_enumerated_value(
enumeration=ExpressionSampleType,
value=filter_evaluation,
default=ExpressionSampleType.EXPRESSION_RECORDED_VALUES,
)
time_type = get_enumerated_value(
enumeration=TimestampCalculation,
value=time_type,
default=TimestampCalculation.AUTO,
)
filter_interval = AF.Time.AFTimeSpan.Parse(filter_interval)
pivalues = self._filtered_summaries(
time_range,
interval,
filter_expression,
summary_types,
calculation_basis,
filter_evaluation,
filter_interval,
time_type,
)
df = | DataFrame() | pandas.DataFrame |
import pandas as pd
import datetime
from apscheduler.schedulers.background import BackgroundScheduler
def round_datetime_to_minute(dt):
dt = dt - datetime.timedelta(seconds=dt.second, microseconds=dt.microsecond)
return dt
class Occurrences:
def __init__(self):
self.update_graph = (lambda: print('should not enter'))
now_datetime = datetime.datetime.now()
now_floored = round_datetime_to_minute(now_datetime)
self.curr_occ_list = | pd.DataFrame({'Datetime': [now_floored], 'Cnt': [0]}) | pandas.DataFrame |
import streamlit as st
import os
import pandas as pd
import numpy as np
import datetime
import plotly.express as px
import plotly as plty
import seaborn as sns
import country_converter as coco
from bokeh.io import output_file, show, output_notebook, save
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource, HoverTool
from bokeh.palettes import Viridis as palette
from bokeh.transform import factor_cmap
import warnings
warnings.filterwarnings("ignore")
# ======================================================================== Load and Cache the data
# @st.cache(persist=True)
def getdata():
WORLD_CONFIRMED_URL = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'
WORLD_DEATHS_URL = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv'
WORLD_RECOVERED_URL = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv'
world_confirmed = pd.read_csv(WORLD_CONFIRMED_URL)
world_deaths = pd.read_csv(WORLD_DEATHS_URL)
world_recovered = pd.read_csv(WORLD_RECOVERED_URL)
sets = [world_confirmed, world_deaths, world_recovered]
# yesterday's date
yesterday = pd.to_datetime(world_confirmed.columns[-1]).date()
today_date = str(pd.to_datetime(yesterday).date() + datetime.timedelta(days=1))
return (yesterday, today_date, sets)
yesterday, today_date, sets = getdata()[0], getdata()[1], getdata()[2]
# ========================================================================================= clean
def drop_neg(df):
# Drop negative entries entries
idx_l = df[df.iloc[:, -1] < 0].index.tolist()
for i in idx_l:
df.drop([i], inplace=True)
return df.reset_index(drop=True)
sets = [drop_neg(i) for i in sets]
for i in range(3):
sets[i].rename(columns={'Country/Region': 'Country', 'Province/State': 'State'}, inplace=True)
sets[i][['State']] = sets[i][['State']].fillna('')
sets[i].fillna(0, inplace=True)
# Change dates to datetime format
sets[i].columns = sets[i].columns[:4].tolist() + [pd.to_datetime(sets[i].columns[j]).date()
for j in range(4, len(sets[i].columns))]
sets_grouped = []
cases = ['confirmed cases', 'deaths', 'recovered cases']
for i in range(3):
o = sets[i].groupby('Country').sum()
o.rename(index={'US': 'United States'}, inplace=True)
sets_grouped.append(o)
# get continent names
for df in sets_grouped:
continent = coco.convert(names=df.index.tolist(), to='Continent')
df['Continent'] = continent
# ========================================================================================= top countries
def bokehB(dataF, case):
# Bokeh bar plots. The function takes a dataframe, datF, as the one provided by the raw data
# (dates as columns, countries as rows). It first takes the last column as yesterday's date.
df = dataF.iloc[:, -2:].sort_values(by=dataF.columns[-2], ascending=False).head(20)
df['totals'] = df.iloc[:, 0]
df.drop(df.columns[0], axis=1, inplace=True)
cont_cat = len(df['Continent'].unique())
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
source = ColumnDataSource(df)
select_tools = ['save']
tooltips = [
('Country', '@Country'), ('Totals', '@totals{0,000}')
]
p = figure(x_range=df.index.tolist(), plot_width=840, plot_height=600,
x_axis_label='Country',
y_axis_label='Totals',
title="Top Countries with {} as of ".format(case) + today_date,
tools=select_tools)
p.vbar(x='Country', top='totals', width=0.9, alpha=0.7, source=source,
legend_field="Continent",
color=factor_cmap('Continent', palette=palette[cont_cat], factors=df.Continent.unique()))
p.xgrid.grid_line_color = None
p.y_range.start = 0
p.xaxis.major_label_orientation = 1
p.left[0].formatter.use_scientific = False
p.add_tools(HoverTool(tooltips=tooltips))
return p
def bokehB_mort(num=100):
# Bokeh bar plots. The function already includes the confirmed and deaths dataframes,
# and operates over them to calculate th mortality rate depending on num (number of
# minimum deaths to consider for a country). The rest is equivalent to the BokehB()
# function.
# top countries by deaths rate with at least num deaths
top_death = sets_grouped[1][yesterday].sort_values(ascending=False)
top_death = top_death[top_death > num]
# Inner join to the confirmed set, compute mortality rate and take top 20
df_mort = | pd.concat([sets_grouped[0][yesterday], top_death], axis=1, join='inner') | pandas.concat |
#!/home/twixtrom/miniconda3/envs/analogue/bin/python
##############################################################################################
# run_wrf.py - Code for calculating the best member over a date range
#
#
# by <NAME>
# Texas Tech University
# 22 January 2019
#
##############################################################################################
import subprocess
import sys
import os
import glob
import operator
import warnings
from pathlib import Path
from analogue_algorithm import (check_logs, concat_files, create_wrf_namelist,
find_max_coverage, find_analogue,
find_analogue_precip_area, increment_time, rmse_dask)
import numpy as np
from netCDF4 import num2date
import pandas as pd
import xarray as xr
from dask import compute
from dask.diagnostics import ProgressBar
from scipy.ndimage import gaussian_filter
warnings.filterwarnings("ignore")
# load_modules = subprocess.call('module load intel impi netcdf-serial', shell=True)
# ndays = float(os.environ['SGE_TASK_ID']) - 1
ndays = sys.argv[1]
# datestr = sys.argv[1]
# ndays = 0
# Define initial period start date
start_date = pd.Timestamp(2016, 1, 2, 12)
# chunks_forecast = {'time': 1, 'pressure': 1}
# chunks_dataset = {'time': 1}
chunks_forecast = None
chunks_dataset = None
mem_list = ['mem'+str(i) for i in range(1, 21)]
mp_list = ['mem'+str(i) for i in range(1, 11)]
pbl_list = ['mem1', *['mem'+str(i) for i in range(11, 21)]]
analogue_param = {
'sigma': 1.,
'pcp_threshold': 10.,
'sum_threshold': 50.,
'pcp_operator': operator.ge,
'logfile': '/home/twixtrom/adaptive_WRF/adaptive_WRF/an_selection_log_201601_thomp_retro.log',
'cape_threshold': 1000.,
'cape_operator': operator.ge,
'height_500hPa_threshold': 5700.,
'height_500hPa_operator': operator.le,
'start_date': '2015-01-01T12:00:00',
'dataset_dir': '/lustre/scratch/twixtrom/dataset_variables/temp/',
'mp_an_method': 'Different Points - pcpT00+hgt500f00+capeT-3',
'pbl_an_method': 'Same Points - pcpT00+hgt500T00',
'dt': '1D'}
# Define ensemble physics options
model_phys = {
'mem1': (8, 1, 1),
'mem2': (3, 1, 1),
'mem3': (6, 1, 1),
'mem4': (16, 1, 1),
'mem5': (18, 1, 1),
'mem6': (19, 1, 1),
'mem7': (10, 1, 1),
'mem8': (1, 1, 1),
'mem9': (5, 1, 1),
'mem10': (9, 1, 1),
'mem11': (8, 2, 2),
'mem12': (8, 5, 1),
'mem13': (8, 6, 1),
'mem14': (8, 7, 1),
'mem15': (8, 12, 1),
'mem16': (8, 4, 4),
'mem17': (8, 8, 1),
'mem18': (8, 9, 1),
'mem19': (8, 10, 10),
'mem20': (8, 99, 1)
}
# Define model configuration parameters
wrf_param = {
'dir_control': '/lustre/scratch/twixtrom/adaptive_wrf_post/control_thompson',
'dir_dataset': '/lustre/scratch/twixtrom/dataset_variables/temp/',
'rootdir': '/home/twixtrom/adaptive_WRF/',
'scriptsdir': '/home/twixtrom/adaptive_WRF/adaptive_WRF/',
'dir_run': '/lustre/scratch/twixtrom/adaptive_wrf_run/adaptive_run/',
'dir_compressed_gfs': '/lustre/scratch/twixtrom/gfs_compress_201601/',
'check_log': 'check_log_adaptive.log',
# Domain-Specific Parameters
'norm_cores': 36,
'model_Nx1': 508, # number of grid points in x-direction
'model_Ny1': 328, # number of grid points in y-direction
'model_Nz': 38, # number of grid points in vertical
'model_ptop': 5000, # Pressure at model top
'model_gridspx1': 12000, # gridspacing in x (in meters)
'model_gridspy1': 12000, # gridspacing in y
'dt': 36, # model time step (in sec)
'model_centlat': 38.0, # center latitude of domain
'model_centlon': -103.0, # center longitude of domain
'model_stdlat1': 30.0, # first true latitude of domain
'model_stdlat2': 60.0, # second true latitude of domain
'model_stdlon': -101.0, # standard longitude
'dlbc': 360, # number of minutes in between global model BCs
'output_interval': 180, # Frequency of model output to file mother domain
'output_intervalNEST': 60, # Frequency of model output to file - Nest
'model_num_in_output': 10000, # Output times per file
'fct_len': 2880, # Minutes to forecast for
'feedback': 0, # 1-way(0) or 2-way nest(1)
'enum': 0, # Number of physics runs
'siz1': 29538340980, # File size dom 1
'siz2': 15445197060, # File size dom 2
# Nested domains info
'model_gridspx1_nest': 4000,
'model_gridspy1_nest': 4000,
'iparent_st_nest': 200,
'jparent_st_nest': 80,
'model_Nx1_nest': 322,
'model_Ny1_nest': 271,
'parent_id_nest': 1,
'grid_ratio_nest': 3,
# Locations of important directories
'dir_wps': '/lustre/work/twixtrom/WPSV3.5.1/',
'dir_wrf': '/lustre/work/twixtrom/WRFV3.5.1/run/',
'dir_sub': '/home/twixtrom/adaptive_WRF/adaptive_WRF/',
'dir_store': '/lustre/scratch/twixtrom/adaptive_wrf_save/adaptive_wrf_thomp_retro/',
'dir_scratch': '/lustre/scratch/twixtrom/',
'dir_gfs': '/lustre/scratch/twixtrom/gfs_data/',
# Parameters for the model (not changed very often)
'model_mp_phys': 8, # microphysics scheme
'model_spec_zone': 1, # number of grid points with tendencies
'model_relax_zone': 4, # number of blended grid points
'dodfi': 0, # Do Dfi 3-yes 0-no
'model_lw_phys': 1, # model long wave scheme
'model_sw_phys': 1, # model short wave scheme
'model_radt': 30, # radiation time step (in minutes)
'model_sfclay_phys': 1, # surface layer physics
'model_surf_phys': 2, # land surface model
'model_pbl_phys': 1, # pbl physics
'model_bldt': 0, # boundary layer time steps (0 : each time steps, in min)
'model_cu_phys': 6, # cumulus param
'model_cu_phys_nest': 0, # cumulus param 3km
'model_cudt': 5, # cumuls time step
'model_use_surf_flux': 1, # 1 is yes
'model_use_snow': 0,
'model_use_cloud': 1,
'model_soil_layers': 4,
'model_w_damping': 1,
'model_diff_opt': 1,
'model_km_opt': 4,
'model_dampcoef': 0.2,
'model_tbase': 300.,
'model_nwp_diagnostics': 1,
'model_do_radar_ref': 1,
'dampopt': 3,
'zdamp': 5000.}
# Calculated terms
wrf_param['fct_len_hrs'] = wrf_param['fct_len'] / 60.
wrf_param['dlbc_hrs'] = wrf_param['dlbc'] / 60.
wrf_param['assim_bzw'] = wrf_param['model_spec_zone'] + wrf_param['model_relax_zone']
wrf_param['otime'] = wrf_param['output_interval'] / 60.
wrf_param['otime_nest'] = wrf_param['output_intervalNEST'] / 60.
wrf_param['model_BC_interval'] = wrf_param['dlbc'] * 60.
# Clear log if this is the first run
# if ndays == 0:
# os.remove(analogue_param['logfile'])
# Find date and time of model start and end
model_initial_date = increment_time(start_date, days=int(ndays))
# model_initial_date = pd.Timestamp(datestr)
model_end_date = increment_time(model_initial_date, hours=wrf_param['fct_len_hrs'])
datep = increment_time(model_initial_date, hours=-1)
print('Starting forecast for: ' + str(model_initial_date), flush=True)
# Determine number of input metgrid levels
# GFS changed from 27 to 32 on May 15, 2016
if model_initial_date < | pd.to_datetime('2016-05-11T12:00:00') | pandas.to_datetime |
# coding: utf-8
# Create input features for the boosted decision tree model.
import os
import sys
import math
import datetime
import pandas as pd
from sklearn.pipeline import Pipeline
from common.features.lag import LagFeaturizer
from common.features.rolling_window import RollingWindowFeaturizer
from common.features.stats import PopularityFeaturizer
from common.features.temporal import TemporalFeaturizer
# Append TSPerf path to sys.path
tsperf_dir = os.getcwd()
if tsperf_dir not in sys.path:
sys.path.append(tsperf_dir)
# Import TSPerf components
from utils import df_from_cartesian_product
import retail_sales.OrangeJuice_Pt_3Weeks_Weekly.common.benchmark_settings as bs
pd.set_option("display.max_columns", None)
def oj_preprocess(df, aux_df, week_list, store_list, brand_list, train_df=None):
df["move"] = df["logmove"].apply(lambda x: round(math.exp(x)))
df = df[["store", "brand", "week", "move"]].copy()
# Create a dataframe to hold all necessary data
d = {"store": store_list, "brand": brand_list, "week": week_list}
data_grid = df_from_cartesian_product(d)
data_filled = pd.merge(data_grid, df, how="left", on=["store", "brand", "week"])
# Get future price, deal, and advertisement info
data_filled = | pd.merge(data_filled, aux_df, how="left", on=["store", "brand", "week"]) | pandas.merge |
#!/usr/bin/python3
import sys
import pandas as pd
import numpy as np
import os
import concurrent.futures
import functools, itertools
import sofa_time
import statistics
import multiprocessing as mp
import socket
import ipaddress
# sys.path.insert(0, '/home/st9540808/Desktop/sofa/bin')
import sofa_models, sofa_preprocess
import sofa_config
import sofa_print
colors_send = ['#14f2e0', '#41c8e5', '#6e9eeb']
colors_recv = ['#9a75f0', '#c74bf6', '#f320fa', '#fe2bcc']
color_send = itertools.cycle(colors_send)
color_recv = itertools.cycle(colors_recv)
sofa_ros2_fieldnames = [
"timestamp", # 0
"event", # 1
"duration", # 2
"deviceId", # 3
"copyKind", # 4
"payload", # 5
"bandwidth", # 6
"pkt_src", # 7
"pkt_dst", # 8
"pid", # 9
"tid", # 10
"name", # 11
"category", # 12
"unit", # 13
"msg_id"] # 14
# @profile
def extract_individual_rosmsg(df_send_, df_recv_, *df_others_):
""" Return a dictionary with topic name as key and
a list of ros message as value.
Structure of return value: {topic_name: {(guid, seqnum): log}}
where (guid, seqnum) is a msg_id
"""
# Convert timestamp to unix time
# unix_time_off = statistics.median(sofa_time.get_unix_mono_diff() for i in range(100))
# for df in (df_send, df_recv, *df_others):
# df['ts'] = df['ts'] + unix_time_off
df_send_[1]['ts'] = df_send_[1]['ts'] + df_send_[0].cpu_time_offset + df_send_[0].unix_time_off
df_recv_[1]['ts'] = df_recv_[1]['ts'] + df_recv_[0].cpu_time_offset + df_recv_[0].unix_time_off
df_others = []
for cfg_to_pass, df_other in df_others_:
df_other['ts'] = df_other['ts'] + cfg_to_pass.cpu_time_offset + cfg_to_pass.unix_time_off
df_others.append(df_other)
df_send = df_send_[1]
df_recv = df_recv_[1]
# sort by timestamp
df_send.sort_values(by=['ts'], ignore_index=True)
df_recv.sort_values(by=['ts'], ignore_index=True)
# publish side
gb_send = df_send.groupby('guid')
all_publishers_log = {guid:log for guid, log in gb_send}
# subscription side
gb_recv = df_recv.groupby('guid')
all_subscriptions_log = {guid:log for guid, log in gb_recv}
# other logs (assume there's no happen-before relations that needed to be resolved)
# every dataframe is a dictionary in `other_log_list`
gb_others = [df_other.groupby('guid') for df_other in df_others]
other_log_list = [{guid:log for guid, log in gb_other} for gb_other in gb_others]
# find guids that are in both subsciption and publisher log
interested_guids = all_subscriptions_log.keys() \
& all_publishers_log.keys()
res = {}
for guid in interested_guids:
# get a publisher from log
df = all_publishers_log[guid]
df_send_partial = all_publishers_log[guid].copy()
add_data_calls = df[~pd.isna(df['seqnum'])] # get all non-NaN seqnums in log
try:
pubaddr, = pd.unique(df['publisher']).dropna()
print(pubaddr)
except ValueError as e:
print('Find a guid that is not associated with a publisher memory address. Error: ' + str(e))
continue
# print(add_data_calls)
all_RTPSMsg_idx = ((df_send['func'] == '~RTPSMessageGroup') & (df_send['publisher'] == pubaddr))
all_RTPSMsgret_idx = ((df_send['func'] == '~RTPSMessageGroup exit') & (df_send['publisher'] == pubaddr))
all_sendSync_idx = ((df_send['func'] == 'sendSync') & (df_send['publisher'] == pubaddr))
all_nn_xpack_idx = (df['func'] == 'nn_xpack_send1')
modified_rows = []
for idx, add_data_call in add_data_calls.iterrows():
ts = add_data_call['ts']
rcl_idx = df.loc[(df['ts'] < ts) & (df['layer'] == 'rcl')]['ts'].idxmax()
df_send_partial.loc[rcl_idx, 'seqnum'] = add_data_call.loc['seqnum']
# For grouping RTPSMessageGroup function
try:
ts_gt = (df_send['ts'] > ts) # ts greater than that of add_data_call
RTPSMsg_idx = df_send.loc[ts_gt & all_RTPSMsg_idx]['ts'].idxmin()
modified_row = df_send.loc[RTPSMsg_idx]
modified_row.at['seqnum'] = add_data_call.loc['seqnum']
modified_row.at['guid'] = guid
modified_rows.append(modified_row)
RTPSMsgret_idx = df_send.loc[ts_gt & all_RTPSMsgret_idx]['ts'].idxmin()
modified_row = df_send.loc[RTPSMsgret_idx]
modified_row.at['seqnum'] = add_data_call.loc['seqnum']
modified_row.at['guid'] = guid
modified_rows.append(modified_row)
sendSync_idx = df_send.loc[ts_gt & (df_send['ts'] < df_send.loc[RTPSMsgret_idx, 'ts']) & all_sendSync_idx]
sendSync = sendSync_idx.copy()
sendSync['seqnum'] = add_data_call.loc['seqnum']
modified_rows.extend(row for _, row in sendSync.iterrows())
except ValueError as e:
pass
if 'rmw_cyclonedds_cpp' in df['implementation'].values:
try:
df_cls = other_log_list[0][guid]
seqnum = add_data_call.loc['seqnum']
max_ts = df_cls[(df_cls['layer'] == 'cls_egress') & (df_cls['seqnum'] == seqnum)]['ts'].max()
index = df.loc[(ts < df['ts']) & (df['ts'] < max_ts) & all_nn_xpack_idx].index
df_send_partial.loc[index, 'seqnum'] = seqnum
except ValueError as e:
pass
df_send_partial = pd.concat([df_send_partial, | pd.DataFrame(modified_rows) | pandas.DataFrame |
"""Classes that represent production profiles"""
import numpy as np
import pandas as pd
from palantir.facilities import OilWell
from scipy import optimize
# Initial estimate of Di for newton.optimize
OIL_WELL_INITIAL_DI = 0.000880626223092
GAS_WELL_INITIAL_DI = 0.000880626223092 # TODO check this
def _decline(di, t, qoi, b):
"""Arp's equation for general decline in a well
- qoi: initial rate of production
- di: initial decline rate
- b: curvature (b=0 exponential)
"""
return qoi / ((1 + b * di * t) ** (1 / b))
def _zero_function(di, t, qoi, b, uor):
"""Solve di to yield uor total barrels"""
qo = _decline(di, t, qoi, b)
return qo.sum() - uor
class Profiles:
"""Represents aggregated production profiles"""
def __init__(self):
self.curves = pd.DataFrame()
def add(self, well):
# well_curves = Profile(well=well).curves
t = pd.Series(range(0, well.active_period))
if isinstance(well, OilWell):
di = OIL_WELL_INITIAL_DI
b_oil = well.b_oil
if well.is_new_well:
initial_oil_rate = well.initial_oil_rate
ultimate_oil_recovery = well.ultimate_oil_recovery
else:
initial_oil_rate = well.oil_rate
ultimate_oil_recovery = well.ultimate_oil_recovery - well.oil_cumulative
# calculate di_oil
di_oil = optimize.newton(
_zero_function,
di, args=(
t,
initial_oil_rate,
b_oil,
ultimate_oil_recovery))
# generate oil curve
qo = _decline(
di_oil,
t,
initial_oil_rate,
b_oil)
# generate gas curve
gor = well.gas_oil_ratio[0] + (
well.gas_oil_ratio[1] - well.gas_oil_ratio[0]) * t / well.active_period
qg = qo * gor
# generate condensate curve
qc = pd.Series(np.zeros(well.active_period))
else: # it's a gas well
# calculate di_gas
di_gas = optimize.newton(
_zero_function,
GAS_WELL_INITIAL_DI, args=(
t,
well.initial_gas_rate,
well.b_gas,
well.ultimate_gas_recovery))
# generate gas curve
qg = _decline(
di_gas,
t,
well.initial_gas_rate,
well.b_gas)
# generate condensate curve
qc = qg * well.gas_condensate_ratio
# generate oil curve
qo = pd.Series(np.zeros(well.active_period))
# pack into dataframe
well_dict = {
'asset': [well.asset.name.lower()] * well.active_period,
'pex': [well.pex.name.lower()] * well.active_period,
'whp': [well.whp.name.lower()] * well.active_period,
'well': [well.name.lower()] * well.active_period,
'date': | pd.date_range(well.start_date, periods=well.active_period) | pandas.date_range |
import argparse
import logging
import os
import tqdm
import numpy as np
import pandas as pd
from sys import getsizeof
def arg_parser():
description = ("Merge FCAS data in directories to parquet chunks.\n"
+ "Indexed on sorted datetime column to improve Dask speed")
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-path', type=str, required=True,
help='recursive search for files with format in path')
parser.add_argument('-format', type=str, required=True,
help='.{format} to search for. csv or parquet')
parser.add_argument('-memory_limit', type=int, required=True,
help=('memory (MB) before file write.'
+ ' Recommended RAM/2'))
args = parser.parse_args()
return args
def write_parquet(df_list, path, i):
concat_df = pd.concat(df_list)
concat_df = concat_df.sort_index()
chunk_name = path + os.sep + f'chunk{i}.parquet'
concat_df.to_parquet(chunk_name)
return chunk_name
def pathfiles_to_chunks(path, fformat, mem_limit):
read_files = walk_dirs_for_files(path, fformat)
concat_list = []
concat_df = pd.DataFrame()
i = 0
mem = 0
for file in tqdm.tqdm(read_files, desc='Reading file:'):
df = read_dataframes(fformat, file)
concat_list.append(df)
df_mem = getsizeof(df)
mem += df_mem / 1e6
if mem < mem_limit:
logging.info(f'Memory: {mem}')
elif mem >= mem_limit:
chunk_name = write_parquet(concat_list, path, i)
logging.info(f'Writing chunk {chunk_name}')
i += 1
concat_list = []
concat_df = pd.DataFrame()
mem = 0
final_len = len(concat_list)
if final_len > 0:
chunk_name = write_parquet(concat_list, path, i)
logging.info(f'Writing chunk {chunk_name}')
def walk_dirs_for_files(path, fformat):
read_files = []
for root, subs, files in os.walk(path):
if files:
logging.info(f' Reading files in {root}')
flist = [root + os.sep + x for x in files if fformat in x.lower()]
if flist:
read_files.extend(flist)
if not read_files:
logging.error(' Check path and format. No files to read')
raise argparse.ArgumentError()
else:
return sorted(read_files)
def read_dataframes(fformat, path):
original_cols = ['TIMESTAMP', 'ELEMENTNUMBER', 'VARIABLENUMBER',
'VALUE', 'VALUEQUALITY']
cols = ['datetime', 'elementnumber', 'variablenumber',
'fcas_value', 'valuequality']
if fformat == 'csv':
df = pd.read_csv(path)
verfied_cols = df.columns[df.columns.isin(original_cols)]
df = df[verfied_cols]
if len(verfied_cols) == 0:
df = | pd.read_csv(path, header=None) | pandas.read_csv |
import pandas as pd
import random
import itertools
def create_net_edges(start_node, end_node):
node1 = random.randint(start_node,end_node)
node2 = random.randint(start_node,end_node)
return node1, node2
def list_edges(n_edges, start_node, end_node):
edges = [(create_net_edges(start_node, end_node)) for x in range(n_edges)]
return edges
def create_sub_group(n_edges, start_node, end_node):
edge_list = list_edges(n_edges, start_node, end_node)
df = pd.DataFrame(edge_list)
return df
def create_network(n_nodes, n_subgroups):
start = 1
sub_length = n_nodes/n_subgroups
end = sub_length
n_edges = n_subgroups*100
net_df = | pd.DataFrame() | pandas.DataFrame |
"""
accounting.py
Accounting and Financial functions.
project : pf
version : 0.0.0
status : development
modifydate :
createdate :
website : https://github.com/tmthydvnprt/pf
author : tmthydvnprt
email : <EMAIL>
maintainer : tmthydvnprt
license : MIT
copyright : Copyright 2016, tmthydvnprt
credits :
"""
import datetime
import numpy as np
import pandas as pd
from pf.constants import DAYS_IN_YEAR
from pf.util import get_age
################################################################################################################################
# Financial Statements
################################################################################################################################
def calc_balance(accounts=None, category_dict=None):
"""
Calculate daily balances of grouped assets/liabilities based on `category_dict`s from `accounts`, returns a DataFrame.
Balance sheet is split into these sections:
Assets
Current
Cash
...
Long Term
Investments
Property
...
Liabilities
Current
Credit Card
...
Long Term
Loans
...
categories = {
'Assets' : {
'Current': {
# User category keys and account DataFrame columns list for values
'Cash & Cash Equivalents': [
('Cash', 'BofA Checking'),
('Cash', 'BofA Savings'),
...
],
'User Category': [...]
...
},
'Long Term': {...}
},
'Liabilities' : {
'Current': {...},
'Long Term': {...}
}
}
"""
# Aggregate accounts based on category definition, via 3 level dictionary comprehension
balance_dict = {
(k0, k1, k2): accounts[v2].sum(axis=1) if v2 else pd.Series(0, index=accounts.index)
for k0, v0 in category_dict.iteritems()
for k1, v1 in v0.iteritems()
for k2, v2 in v1.iteritems()
}
# Convert to DataFrame
balance = pd.DataFrame(balance_dict)
return balance.fillna(0.0)
def balance_sheet(balance=None, period=datetime.datetime.now().year):
"""
Calculate and return a balance sheet.
Balance will be based on the last entry of account data (e.g. December 31st) for the given `period` time period,
which defaults to the current year.
All levels may be user defined by the category dictonary. The value of the last level must contain valid pandas DataFrame
column selectors, e.g. `Account Type` for single index column / level 0 access or `('Cash', 'Account Name')` for
multilevel indexing.
If a sequence of periods is passed, each period's data will be calculated and concatenated as MultiIndex columns.
Example:
```
balance = calc_balance(accounts, category_dict=categories)
balancesheet = balance_sheet(balance, period=2015)
```
"""
# Force to list, so code below is the same for all cases
if not isinstance(period, list):
period = [period]
balance_sheets = []
for p in period:
# Force period to string
p = str(p)
# Sum over Period and convert to Statement DataFrame
p_balance = pd.DataFrame(balance[p].iloc[-1])
p_balance.columns = ['$']
p_balance.index.names = ['Category', 'Type', 'Item']
# Calculate Net
net = p_balance[['$']].sum(level=[0, 1]).sum(level=1)
net.index = pd.MultiIndex.from_tuples([('Net', x0, 'Total') for x0 in net.index])
net.index.names = ['Category', 'Type', 'Item']
# Add Net
balance_df = pd.concat([p_balance, net])
# Calculate percentages of level 0
balance_df['%'] = 100.0 * balance_df.div(balance_df.sum(level=0), level=0)
# Calculate heirarchical totals
l1_totals = balance_df.sum(level=[0, 1])
l1_totals.index = pd.MultiIndex.from_tuples([(x0, x1, 'Total') for x0, x1 in l1_totals.index])
l1_totals.index.names = ['Category', 'Type', 'Item']
l0_totals = balance_df.sum(level=[0])
l0_totals.index = pd.MultiIndex.from_tuples([(x0, 'Total', ' ') for x0 in l0_totals.index])
l0_totals.index.names = ['Category', 'Type', 'Item']
# Add totals to dataframe
balance_df = balance_df.combine_first(l1_totals)
balance_df = balance_df.combine_first(l0_totals)
# Update columns with period
balance_df.columns = pd.MultiIndex.from_product([[p], balance_df.columns])
# Add to main list
balance_sheets.append(balance_df)
# Concatenate all the periods together
balance_sheets_df = | pd.concat(balance_sheets, 1) | pandas.concat |
from logging import NullHandler
from numpy.__config__ import show
from pkg_resources import yield_lines
import streamlit as st
import pandas as pd
import numpy as np
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import matplotlib.pyplot as plt
from streamlit_echarts import st_echarts
import requests
import json
from pyvis import network as net
from stvis import pv_static
import io
import collections
from sklearn import preprocessing
import base64
from io import BytesIO
import os
from PIL import Image
import webbrowser
# download function
def get_table_download_link(df):
"""Generates a link allowing the data in a given panda dataframe to be downloaded
in: dataframe
out: href string
"""
csv = df.to_csv(index=True)
b64 = base64.b64encode(csv.encode()).decode() # some strings <-> bytes conversions necessary here
href = f'<a href="data:file/csv;base64,{b64}">Download data</a>'
return href
# function to generate a data frame of gene symbol and openTargets association score
def opentargets_gene_score(disease_name):
# Set base URL of GraphQL API endpoint
base_url = "https://api.platform.opentargets.org/api/v4/graphql"
# query disease id via GraphQL API
query_string1 = """
query searchDiseaseID($diseaseName: String!, $entityNames: [String!]) {
search(queryString: $diseaseName, entityNames: $entityNames ) {
total
hits{
id
name
}
}
}
"""
query_string2 = """
query associatedTargets($diseaseID: String!) {
disease(efoId: $diseaseID) {
id
name
associatedTargets(page: { index: 0, size: 300 }) {
count
rows {
target {
approvedSymbol
}
score
}
}
}
}
"""
# Set variables object of arguments to be passed to endpoint
variables = {"diseaseName": disease_name, "entityNames": ["disease"]}
# Perform POST request and check status code of response
r = requests.post(base_url, json={"query": query_string1, "variables": variables})
try:
df = pd.json_normalize(r.json()["data"]["search"]["hits"])
disease_id = df.loc[df["name"].str.lower() == disease_name.lower(), "id"].values[0]
variables = {"diseaseID": disease_id}
r = requests.post(base_url, json={"query": query_string2, "variables": variables})
gene_scoreDF = pd.json_normalize(r.json()["data"]["disease"]["associatedTargets"]["rows"])
gene_scoreDF = gene_scoreDF.rename({
"score": "opentargets_associations",
"target.approvedSymbol": "gene_symbol"
}, axis = 1)
except:
gene_scoreDF = []
return gene_scoreDF
def proteins_interaction(input_protein):
string_api_url = "https://string-db.org/api"
output_format = "tsv"
method = "network"
request_url = "/".join([string_api_url, output_format, method])
params = {
"identifiers" : "%0d".join(input_protein), # your protein in a list
"species" : 9606, # species NCBI identifier
"caller_identity" : "stargazer" # your app name
}
response = requests.post(request_url, data=params)
r = response.content
rawData = pd.read_csv(io.StringIO(r.decode('utf-8')), sep = "\t")
rawData_drop = rawData.drop_duplicates(keep = 'first').reset_index().drop('index', axis=1)
return rawData_drop
def go_enrichment(input_gene):
string_api_url = "https://string-db.org/api"
output_format = "tsv"
method = "enrichment"
request_url = "/".join([string_api_url, output_format, method])
params = {
"identifiers" : "%0d".join(input_gene), # your protein
"species" : 9606, # species NCBI identifier
"caller_identity" : "stargazer" # your app name
}
response = requests.post(request_url, data=params)
r = response.content
rawData = pd.read_csv(io.StringIO(r.decode('utf-8')), sep = "\t")
return rawData
## main page set up
st.set_page_config(layout="wide", page_title="StarGazer")
# loading logo
col1, col2 = st.columns((4, 1))
path = os.getcwd()
logo = Image.open(path + "/assets/logo.png")
col2.image(logo, output_format = "PNG", width = 200)
# tile of the dashboard and description
st.markdown("***")
st.title("StarGazer: Multi-omics evidence-based drug target prioritization")
select = st.sidebar.selectbox('Search by', ["--", 'Gene', 'Variant', 'PheWAS', 'GWAS', 'GWAS_PheWAS Union', 'GWAS_PheWAS Intersection', "Protein-protein Interaction", 'Disease Target Prioritization'], key='1')
if 'count' not in st.session_state:
st.session_state.count = 0
# initialising app
path = os.getcwd()
# import dataset
df = pd.read_csv(path + "/assets/phewas-catalog.csv")
# fill na with "Unknown"
df['gene_name'] = df['gene_name'].fillna("UNKNOWN")
df_selected = df[["gene_name", "snp", "phewas phenotype", "p-value", "odds-ratio", "gwas-associations"]]
# Adding COVID-19 module
full_url = "https://www.ebi.ac.uk/gwas/rest/api/efoTraits/MONDO_0100096/associations?projection=associationByEfoTrait"
r = requests.get(full_url, json={})
if r.status_code == 200:
COVID_df = pd.json_normalize(r.json()["_embedded"]["associations"], ["snps", ["genomicContexts"]], ["orPerCopyNum", "pvalue"], errors = "ignore")[["gene.geneName", "pvalue", "_links.snp.href", "orPerCopyNum"]].drop_duplicates(keep = "first")
COVID_df["_links.snp.href"] = COVID_df["_links.snp.href"].str.strip("{?projection}").str.split("Polymorphisms/").str[1]
COVID_df = COVID_df.loc[COVID_df["orPerCopyNum"].notna(), :].reset_index(drop = True)
COVID_df["orPerCopyNum"] = COVID_df["orPerCopyNum"].astype(float)
COVID_df = COVID_df.rename({
"gene.geneName": "gene_name",
"pvalue": "p-value",
"_links.snp.href": "snp",
"orPerCopyNum": "odds-ratio"
}, axis = 1)
COVID_df[["phewas phenotype", "gwas-associations"]] = "COVID-19"
df_selected = df_selected.append(COVID_df).reset_index(drop = True)
df_selected.to_csv(path + "/assets/df_selected.csv")
# extract data from Pharos
query_string = """
query AllTargets {
targets(
filter: {
facets: [{
facet: "Target Development Level",
values: ["Tclin", "Tchem", "Tbio", "Tdark"]
}]
}
) {
targets (top : 100000) {
sym
tdl
}
}
}
"""
r = requests.post("https://pharos-api.ncats.io/graphql", json={"query": query_string})
if r.status_code == 200:
df_druggable = pd.DataFrame(r.json()["data"]["targets"]["targets"]).drop_duplicates(keep = 'first').reset_index().drop('index', axis=1)
df_druggable.to_csv(path + "/assets/df_druggable.csv")
if select == "Gene":
path = os.getcwd()
df_selected = pd.read_csv(path + "/assets/df_selected.csv")
df_druggable = pd.read_csv(path + "/assets/df_druggable.csv")
st.markdown("This dashboard shows the associated phenotypes of your genes of interest.")
# sidebar -- gene & variant select boxs
gene = sorted(df_selected["gene_name"].unique().tolist())
select_gene = st.sidebar.selectbox('Gene', gene, key='2')
variant = sorted(df_selected[df_selected["gene_name"] == select_gene]["snp"].unique().tolist())
select_variant = st.sidebar.selectbox('Variant', variant, key='3')
# subset the data frame
df_variant = df_selected[df_selected["snp"] == select_variant]
# sidebar -- p-value slider
df_variant.sort_values(by=['odds-ratio'], inplace=True, ascending=False)
select_p = st.sidebar.text_input(label = "P-value", help = "Defaults to p = 0.05. Accepts scientific notation, e.g., 5E-4, 3e-9", value = "0.05")
try:
if (float(select_p) <= 1) & (float(select_p) > 0):
select_p = float(select_p)
else:
select_p = 0.05
except:
select_p = 0.05
# display the top 5 destructive / protective phenoytpes
df_variant_p = df_variant[df_variant["p-value"] <= select_p]
df_variant_p_des = df_variant_p[df_variant_p["odds-ratio"] >= 1]
df_variant_p_des = df_variant_p_des[["phewas phenotype", "gene_name", "snp", "odds-ratio", "p-value", "gwas-associations"]].reset_index().drop("index", axis= 1)
df_variant_p_pro = df_variant_p[df_variant_p["odds-ratio"] < 1]
df_variant_p_pro = df_variant_p_pro[["phewas phenotype", "gene_name", "snp", "odds-ratio", "p-value", "gwas-associations"]].reset_index().drop("index", axis= 1)
st.header("Gene: " + "*" + select_gene + "*" + ", Variant: " + "*" + select_variant + "*" + ", P-value <= " + "*" + str(round(select_p, 4)) + "*")
with st.container():
col1, col2 = st.columns(2)
with col1:
st.subheader("Odds ratios of associated phenotypes")
df_variant_p.sort_values(by=['odds-ratio'], inplace=True, ascending=True)
fig = px.bar(df_variant_p, y = "phewas phenotype", x = "odds-ratio", color = "odds-ratio", color_continuous_scale = px.colors.sequential.RdBu_r, color_continuous_midpoint = 1, height= 1000)
#fig.add_vline(x = 1)
st.plotly_chart(fig, use_container_width= True)
with col2:
st.subheader("Data")
st.markdown(get_table_download_link(df_variant_p), unsafe_allow_html=True)
st.write('Risk allele-associated phenotypes (odds ratio > 1)')
st.dataframe(df_variant_p_des, height = 400)
st.write('Protective allele-associated phenotypes (odds ratio < 1)')
st.dataframe(df_variant_p_pro, height = 400)
elif select == "Variant":
path = os.getcwd()
df_selected = pd.read_csv(path + "/assets/df_selected.csv")
df_druggable = pd.read_csv(path + "/assets/df_druggable.csv")
st.markdown("This dashboard shows the associated phenotypes of your gene variants of interest.")
# sidebar -- variant select box
variant = sorted(df_selected["snp"].unique().tolist())
select_variant = st.sidebar.selectbox('Variant', variant, key='4')
# subset the data frame
df_variant = df_selected[df_selected["snp"] == select_variant]
# sidebar -- p-value slider
df_variant.sort_values(by=['odds-ratio'], inplace=True, ascending=False)
select_p = st.sidebar.text_input(label = "P-value", help = "Defaults to p = 0.05. Accepts scientific notation, e.g., 5E-4, 3e-9", value = "0.05")
try:
if (float(select_p) <= 1) & (float(select_p) > 0):
select_p = float(select_p)
else:
select_p = 0.05
except:
select_p = 0.05
# display the top 5 destructive / protective phenoytpes
df_variant_p = df_variant[df_variant["p-value"] <= select_p]
df_variant_p_des = df_variant_p[df_variant_p["odds-ratio"] >= 1]
df_variant_p_des = df_variant_p_des[["phewas phenotype", "gene_name", "snp", "odds-ratio", "p-value", "gwas-associations"]].reset_index().drop("index", axis= 1)
df_variant_p_pro = df_variant_p[df_variant_p["odds-ratio"] < 1]
df_variant_p_pro = df_variant_p_pro[["phewas phenotype", "gene_name", "snp", "odds-ratio", "p-value", "gwas-associations"]].reset_index().drop("index", axis= 1)
st.header("Variant: " + "*" + select_variant + "*" + ", P-value <= " + "*" + str(round(select_p, 4)) + "*")
st.write("Associated gene: " + ", ".join(df_variant_p["gene_name"].unique().tolist()))
with st.container():
col1, col2 = st.columns(2)
with col1:
st.subheader("Odds ratios of associated phenotypes")
df_variant_p.sort_values(by=['odds-ratio'], inplace=True, ascending=True)
fig = px.bar(df_variant_p, y = "phewas phenotype", x = "odds-ratio", color = "odds-ratio", color_continuous_scale = px.colors.sequential.RdBu_r, color_continuous_midpoint = 1, height= 1000)
st.plotly_chart(fig, use_container_width= True)
with col2:
st.subheader("Data")
st.markdown(get_table_download_link(df_variant_p), unsafe_allow_html=True)
st.write('Risk allele-associated phenotypes (odds ratio > 1)')
st.dataframe(df_variant_p_des, height = 400)
st.write('Protective allele-associated phenotypes (odds ratio < 1)')
st.dataframe(df_variant_p_pro, height = 400)
elif select == "GWAS":
path = os.getcwd()
df_selected = pd.read_csv(path + "/assets/df_selected.csv")
df_druggable = pd.read_csv(path + "/assets/df_druggable.csv")
st.markdown("This dashboard shows the gene variants found in GWASs that are associated with your diseases of interest. It displays all associations, before separating associations into risk and protective. ")
# extract diseases
disease = ["--"]
for i in list(set(df_selected["gwas-associations"].tolist())):
disease.extend(i.split(", "))
disease = sorted(list(set(disease)))
# sidebar -- disease select box
select_disease = st.sidebar.selectbox('Disease', disease, key='5')
# subset the data frame for GWAS-associations
df_disease_gwas = df_selected[df_selected["gwas-associations"].str.contains(select_disease)]
df_disease_gwas = df_disease_gwas[["snp", "gene_name", "odds-ratio", "p-value", "phewas phenotype", "gwas-associations"]]
# sidebar -- p-value slider
select_p = st.sidebar.text_input(label = "P-value", help = "Defaults to p = 0.05. Accepts scientific notation, e.g., 5E-4, 3e-9", value = "0.05")
try:
if (float(select_p) <= 1) & (float(select_p) > 0):
select_p = float(select_p)
else:
select_p = 0.05
except:
select_p = 0.05
df_disease_gwas = df_disease_gwas[df_disease_gwas["p-value"] <= select_p]
# find the druggable genes
# druggable evidence
df_disease_gwas = pd.merge(df_disease_gwas, df_druggable, left_on='gene_name', right_on = "sym", how='left')
df_disease_gwas['tdl'] = df_disease_gwas['tdl'].fillna("None")
df_disease_gwas = df_disease_gwas.rename(columns={'tdl': 'druggability level'})
# subset the data by odds ratio
df_disease_gwas = df_disease_gwas.reset_index().drop("index", axis= 1)
df_disease_gwas_sub_des = df_disease_gwas[df_disease_gwas["odds-ratio"] >= 1]
df_disease_gwas_sub_pro = df_disease_gwas[df_disease_gwas["odds-ratio"] < 1]
# count the druggability levels and generate data for pie chart (risk)
df_tdl_des = df_disease_gwas_sub_des[["gene_name", "druggability level"]].drop_duplicates(keep = 'first').reset_index().drop('index', axis=1)
df_tdl_pro = df_disease_gwas_sub_pro[["gene_name", "druggability level"]].drop_duplicates(keep = 'first').reset_index().drop('index', axis=1)
df_tdl_all = df_disease_gwas[["gene_name", "druggability level"]].drop_duplicates(keep = 'first').reset_index().drop('index', axis=1)
st.header("Disease: " + "*" + select_disease + "*" + ", P-value <= " + "*" + str(round(select_p, 4)) + "*")
st.subheader("All alleles")
with st.container():
col1, col2= st.columns([1,1])
with col1:
st.write("Percentage of genes in each druggability level:")
elements_count = collections.Counter(df_tdl_all["druggability level"].tolist())
gene_count = collections.Counter(df_tdl_all["gene_name"].tolist())
df_disease_phewas_gwas_pie = pd.DataFrame(dict(elements_count).items(), columns= ["druggability level", "value"])
gene_count_phewas_gwas_pie = pd.DataFrame(dict(gene_count).items(), columns= ["gene", "value"])
labels = ["None", "Tdark", "Tbio", "Tchem", "Tclin"]
df_disease_phewas_gwas_pie["druggability level"] = pd.Categorical(df_disease_phewas_gwas_pie["druggability level"], labels)
fig = px.pie(df_disease_phewas_gwas_pie.sort_values("druggability level").reset_index(drop = True), values='value', names='druggability level', color='druggability level', color_discrete_map = {
"None": "rgb(203,213,232)",
"Tdark": "rgb(141,160,203)",
"Tbio": "rgb(223,217,164)",
"Tchem": "rgb(229,134,6)",
"Tclin": "#DC3912"})
fig.update_traces(textposition='inside', textinfo='percent+label', showlegend=False, sort=False, rotation=0)
st.plotly_chart(fig)
with col2:
fig = px.scatter(df_disease_gwas, x = "gene_name", y = "odds-ratio", color = "druggability level", hover_name='snp', color_discrete_map = {
"None": "rgb(203,213,232)",
"Tdark": "rgb(141,160,203)",
"Tbio": "rgb(223,217,164)",
"Tchem": "rgb(229,134,6)",
"Tclin": "#DC3912"},
category_orders = {
"druggability level": ["None", "Tdark", "Tbio", "Tchem", "Tclin"]})
# size = [20]*len(df_disease_phewas_gwas),
fig.add_hline(y = 1, line_width=1)
st.plotly_chart(fig, use_container_width= True)
with st.expander("See data"):
st.write("GWAS data:")
df_disease_gwas = df_disease_gwas[["gene_name", "snp", "odds-ratio", "p-value", "phewas phenotype", "gwas-associations", "druggability level"]]
df_disease_gwas.sort_values(by=['odds-ratio'], inplace=True, ascending=False)
df_disease_gwas = df_disease_gwas.reset_index().drop("index", axis= 1)
st.markdown(get_table_download_link(df_disease_gwas.reset_index()), unsafe_allow_html=True)
st.write(df_disease_gwas)
st.subheader("Risk alleles (odds ratio > 1)")
with st.container():
col1, col2= st.columns([1,1])
with col1:
st.write("Percentage of genes in each druggability level:")
elements_count = collections.Counter(df_tdl_des["druggability level"].tolist())
df_disease_phewas_gwas_sub_des_pie = pd.DataFrame(dict(elements_count).items(), columns= ["druggability level", "value"])
labels = ["None", "Tdark", "Tbio", "Tchem", "Tclin"]
df_disease_phewas_gwas_sub_des_pie["druggability level"] = pd.Categorical(df_disease_phewas_gwas_sub_des_pie["druggability level"], labels)
fig = px.pie(df_disease_phewas_gwas_sub_des_pie.sort_values("druggability level").reset_index(drop = True), values='value', names='druggability level', color='druggability level', color_discrete_map = {
"None": "rgb(203,213,232)",
"Tdark": "rgb(141,160,203)",
"Tbio": "rgb(223,217,164)",
"Tchem": "rgb(229,134,6)",
"Tclin": "#DC3912"})
fig.update_traces(textposition='inside', textinfo='percent+label', showlegend=False, sort=False, rotation=0)
st.plotly_chart(fig)
with col2:
fig = px.scatter(df_disease_gwas_sub_des, x = "gene_name", y = "odds-ratio", color = "druggability level", hover_name='snp', color_discrete_map = {
"None": "rgb(203,213,232)",
"Tdark": "rgb(141,160,203)",
"Tbio": "rgb(223,217,164)",
"Tchem": "rgb(229,134,6)",
"Tclin": "#DC3912"},
category_orders = {
"druggability level": ["None", "Tdark", "Tbio", "Tchem", "Tclin"]})
# size = [20]*len(df_disease_phewas_gwas),
fig.add_hline(y = 1, line_width=1)
st.plotly_chart(fig, use_container_width= True)
with st.expander("See data"):
st.write("GWAS data (Odds-ratio > 1):")
df_disease_gwas_sub_des = df_disease_gwas_sub_des[["gene_name", "snp", "odds-ratio", "p-value", "phewas phenotype", "gwas-associations", "druggability level"]]
df_disease_gwas_sub_des.sort_values(by=['odds-ratio'], inplace=True, ascending=False)
df_disease_gwas_sub_des = df_disease_gwas_sub_des.reset_index().drop("index", axis= 1)
st.markdown(get_table_download_link(df_disease_gwas_sub_des.reset_index()), unsafe_allow_html=True)
st.write(df_disease_gwas_sub_des)
st.subheader("Protective alleles (odds ratio < 1)")
with st.container():
col1, col2= st.columns([1,1])
with col1:
st.write("Percentage of genes in each druggability level:")
elements_count = collections.Counter(df_tdl_pro["druggability level"].tolist())
df_disease_phewas_gwas_sub_pro_pie = pd.DataFrame(dict(elements_count).items(), columns= ["druggability level", "value"])
labels = ["None", "Tdark", "Tbio", "Tchem", "Tclin"]
df_disease_phewas_gwas_sub_pro_pie["druggability level"] = pd.Categorical(df_disease_phewas_gwas_sub_pro_pie["druggability level"], labels)
fig = px.pie(df_disease_phewas_gwas_sub_pro_pie.sort_values("druggability level").reset_index(drop = True), values='value', names='druggability level', color='druggability level', color_discrete_map = {
"None": "rgb(203,213,232)",
"Tdark": "rgb(141,160,203)",
"Tbio": "rgb(223,217,164)",
"Tchem": "rgb(229,134,6)",
"Tclin": "#DC3912"})
fig.update_traces(textposition='inside', textinfo='percent+label', showlegend=False, sort=False, rotation=0)
st.plotly_chart(fig)
with col2:
fig = px.scatter(df_disease_gwas_sub_pro, x = "gene_name", y = "odds-ratio", color = "druggability level", hover_name='snp', color_discrete_map = {
"None": "rgb(203,213,232)",
"Tdark": "rgb(141,160,203)",
"Tbio": "rgb(223,217,164)",
"Tchem": "rgb(229,134,6)",
"Tclin": "#DC3912"},
category_orders = {
"druggability level": ["None", "Tdark", "Tbio", "Tchem", "Tclin"]})
# size = [20]*len(df_disease_phewas_gwas),
fig.add_hline(y = 1, line_width=1)
st.plotly_chart(fig, use_container_width= True)
with st.expander("See data"):
st.write("GWAS data (Odds-ratio < 1):")
df_disease_gwas_sub_pro = df_disease_gwas_sub_pro[["gene_name", "snp", "odds-ratio", "p-value", "phewas phenotype", "gwas-associations", "druggability level"]]
df_disease_gwas_sub_pro.sort_values(by=['odds-ratio'], inplace=True, ascending=False)
df_disease_gwas_sub_pro = df_disease_gwas_sub_pro.reset_index().drop("index", axis= 1)
st.markdown(get_table_download_link(df_disease_gwas_sub_pro.reset_index()), unsafe_allow_html=True)
st.write(df_disease_gwas_sub_pro)
elif select == "PheWAS":
path = os.getcwd()
df_selected = pd.read_csv(path + "/assets/df_selected.csv")
df_druggable = pd.read_csv(path + "/assets/df_druggable.csv")
st.markdown("This dashboard shows the gene variants found in PheWASs that are associated with your diseases of interest. It displays all associations, before separating associations into risk and protective.")
# extract diseases
disease = ["--"]
disease.extend(df_selected["phewas phenotype"].tolist())
disease = sorted(list(set(disease)))
# sidebar -- disease select box
select_disease = st.sidebar.selectbox('Disease', disease, key='6')
# subset the data frame for PheWAS
df_disease_phewas = df_selected[df_selected["phewas phenotype"].str.contains(select_disease)]
# subset the data frame for phewas phenotype
df_disease_phewas = df_disease_phewas[["snp", "gene_name", "odds-ratio", "p-value", "phewas phenotype", "gwas-associations"]]
# sidebar -- p-value slider
select_p = st.sidebar.text_input(label = "P-value", help = "Defaults to p = 0.05. Accepts scientific notation, e.g., 5E-4, 3e-9", value = "0.05")
try:
if (float(select_p) <= 1) & (float(select_p) > 0):
select_p = float(select_p)
else:
select_p = 0.05
except:
select_p = 0.05
df_disease_phewas = df_disease_phewas[df_disease_phewas["p-value"] <= select_p]
# find the druggable genes
# druggable evidence
df_disease_phewas = pd.merge(df_disease_phewas, df_druggable, left_on='gene_name', right_on = "sym", how='left')
df_disease_phewas['tdl'] = df_disease_phewas['tdl'].fillna("None")
df_disease_phewas = df_disease_phewas.rename(columns={'tdl': 'druggability level'})
# subset the data by odds ratio
df_disease_phewas = df_disease_phewas.reset_index().drop("index", axis= 1)
df_disease_phewas_sub_des = df_disease_phewas[df_disease_phewas["odds-ratio"] >= 1]
df_disease_phewas_sub_pro = df_disease_phewas[df_disease_phewas["odds-ratio"] < 1]
# count the druggability levels and generate data for pie chart (risk)
df_tdl_des = df_disease_phewas_sub_des[["gene_name", "druggability level"]].drop_duplicates(keep = 'first').reset_index().drop('index', axis=1)
df_tdl_pro = df_disease_phewas_sub_pro[["gene_name", "druggability level"]].drop_duplicates(keep = 'first').reset_index().drop('index', axis=1)
df_tdl_all = df_disease_phewas[["gene_name", "druggability level"]].drop_duplicates(keep = 'first').reset_index().drop('index', axis=1)
st.header("Disease: " + "*" + select_disease + "*" + ", P-value <= " + "*" + str(round(select_p, 4)) + "*")
st.subheader("All alleles")
with st.container():
col1, col2= st.columns([1,1])
with col1:
st.write("Percentage of genes in each druggability level:")
elements_count = collections.Counter(df_tdl_all["druggability level"].tolist())
df_disease_phewas_gwas_pie = pd.DataFrame(dict(elements_count).items(), columns= ["druggability level", "value"])
labels = ["None", "Tdark", "Tbio", "Tchem", "Tclin"]
df_disease_phewas_gwas_pie["druggability level"] = pd.Categorical(df_disease_phewas_gwas_pie["druggability level"], labels)
fig = px.pie(df_disease_phewas_gwas_pie.sort_values("druggability level").reset_index(drop = True), values='value', names='druggability level', color='druggability level', color_discrete_map = {
"None": "rgb(203,213,232)",
"Tdark": "rgb(141,160,203)",
"Tbio": "rgb(223,217,164)",
"Tchem": "rgb(229,134,6)",
"Tclin": "#DC3912"})
fig.update_traces(textposition='inside', textinfo='percent+label', showlegend=False, sort=False, rotation=0)
st.plotly_chart(fig)
with col2:
fig = px.scatter(df_disease_phewas, x = "gene_name", y = "odds-ratio", color = "druggability level", hover_name='snp', color_discrete_map = {
"None": "rgb(203,213,232)",
"Tdark": "rgb(141,160,203)",
"Tbio": "rgb(223,217,164)",
"Tchem": "rgb(229,134,6)",
"Tclin": "#DC3912"},
category_orders = {
"druggability level": ["None", "Tdark", "Tbio", "Tchem", "Tclin"]})
# size = [20]*len(df_disease_phewas_gwas),
fig.add_hline(y = 1, line_width=1)
st.plotly_chart(fig, use_container_width= True)
with st.expander("See data"):
st.write("PheWAS data:")
df_disease_phewas = df_disease_phewas[["gene_name", "snp", "odds-ratio", "p-value", "phewas phenotype", "gwas-associations", "druggability level"]]
df_disease_phewas.sort_values(by=['odds-ratio'], inplace=True, ascending=False)
df_disease_phewas = df_disease_phewas.reset_index().drop("index", axis= 1)
st.markdown(get_table_download_link(df_disease_phewas.reset_index()), unsafe_allow_html=True)
st.write(df_disease_phewas)
st.subheader("Risk alleles (odds ratio > 1)")
with st.container():
col1, col2= st.columns([1,1])
with col1:
st.write("Percentage of genes in each druggability level:")
elements_count = collections.Counter(df_tdl_des["druggability level"].tolist())
df_disease_phewas_gwas_sub_des_pie = pd.DataFrame(dict(elements_count).items(), columns= ["druggability level", "value"])
labels = ["None", "Tdark", "Tbio", "Tchem", "Tclin"]
df_disease_phewas_gwas_sub_des_pie["druggability level"] = pd.Categorical(df_disease_phewas_gwas_sub_des_pie["druggability level"], labels)
fig = px.pie(df_disease_phewas_gwas_sub_des_pie.sort_values("druggability level").reset_index(drop = True), values='value', names='druggability level', color='druggability level', color_discrete_map = {
"None": "rgb(203,213,232)",
"Tdark": "rgb(141,160,203)",
"Tbio": "rgb(223,217,164)",
"Tchem": "rgb(229,134,6)",
"Tclin": "#DC3912"})
fig.update_traces(textposition='inside', textinfo='percent+label', showlegend=False, sort=False, rotation=0)
st.plotly_chart(fig)
with col2:
fig = px.scatter(df_disease_phewas_sub_des, x = "gene_name", y = "odds-ratio", color = "druggability level", hover_name='snp', color_discrete_map = {
"None": "rgb(203,213,232)",
"Tdark": "rgb(141,160,203)",
"Tbio": "rgb(223,217,164)",
"Tchem": "rgb(229,134,6)",
"Tclin": "#DC3912"},
category_orders = {
"druggability level": ["None", "Tdark", "Tbio", "Tchem", "Tclin"]})
# size = [20]*len(df_disease_phewas_gwas),
fig.add_hline(y = 1, line_width=1)
st.plotly_chart(fig, use_container_width= True)
with st.expander("See data"):
st.write("PheWAS data (Odds-ratio >= 1):")
df_disease_phewas_sub_des = df_disease_phewas_sub_des[["gene_name", "snp", "odds-ratio", "p-value", "phewas phenotype", "gwas-associations", "druggability level"]]
df_disease_phewas_sub_des.sort_values(by=['odds-ratio'], inplace=True, ascending=False)
df_disease_phewas_sub_des = df_disease_phewas_sub_des.reset_index().drop("index", axis= 1)
st.markdown(get_table_download_link(df_disease_phewas_sub_des.reset_index()), unsafe_allow_html=True)
st.write(df_disease_phewas_sub_des)
st.subheader("Protective alleles (odds ratio < 1)")
with st.container():
col1, col2= st.columns([1,1])
with col1:
st.write("Percentage of genes in each druggability level:")
elements_count = collections.Counter(df_tdl_pro["druggability level"].tolist())
df_disease_phewas_gwas_sub_pro_pie = pd.DataFrame(dict(elements_count).items(), columns= ["druggability level", "value"])
labels = ["None", "Tdark", "Tbio", "Tchem", "Tclin"]
df_disease_phewas_gwas_sub_pro_pie["druggability level"] = pd.Categorical(df_disease_phewas_gwas_sub_pro_pie["druggability level"], labels)
fig = px.pie(df_disease_phewas_gwas_sub_pro_pie.sort_values("druggability level").reset_index(drop = True), values='value', names='druggability level', color='druggability level', color_discrete_map = {
"None": "rgb(203,213,232)",
"Tdark": "rgb(141,160,203)",
"Tbio": "rgb(223,217,164)",
"Tchem": "rgb(229,134,6)",
"Tclin": "#DC3912"})
fig.update_traces(textposition='inside', textinfo='percent+label', showlegend=False, sort=False, rotation=0)
st.plotly_chart(fig)
with col2:
fig = px.scatter(df_disease_phewas_sub_pro, x = "gene_name", y = "odds-ratio", color = "druggability level", hover_name='snp', color_discrete_map = {
"None": "rgb(203,213,232)",
"Tdark": "rgb(141,160,203)",
"Tbio": "rgb(223,217,164)",
"Tchem": "rgb(229,134,6)",
"Tclin": "#DC3912"},
category_orders = {
"druggability level": ["None", "Tdark", "Tbio", "Tchem", "Tclin"]})
# size = [20]*len(df_disease_phewas_gwas),
fig.add_hline(y = 1, line_width=1)
st.plotly_chart(fig, use_container_width= True)
with st.expander("See data"):
st.write("PheWAS data (Odds-ratio < 1):")
df_disease_phewas_sub_pro = df_disease_phewas_sub_pro[["gene_name", "snp", "odds-ratio", "p-value", "phewas phenotype", "gwas-associations", "druggability level"]]
df_disease_phewas_sub_pro.sort_values(by=['odds-ratio'], inplace=True, ascending=False)
df_disease_phewas_sub_pro = df_disease_phewas_sub_pro.reset_index().drop("index", axis= 1)
st.markdown(get_table_download_link(df_disease_phewas_sub_pro.reset_index()), unsafe_allow_html=True)
st.write(df_disease_phewas_sub_pro)
elif select == "GWAS_PheWAS Union":
path = os.getcwd()
df_selected = pd.read_csv(path + "/assets/df_selected.csv")
df_druggable = | pd.read_csv(path + "/assets/df_druggable.csv") | pandas.read_csv |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import decimal
from datetime import datetime
from distutils.version import LooseVersion
import inspect
import sys
import unittest
from io import StringIO
from typing import List
import numpy as np
import pandas as pd
from pandas.tseries.offsets import DateOffset
from pyspark import StorageLevel
from pyspark.ml.linalg import SparseVector
from pyspark.sql.types import StructType
from pyspark import pandas as ps
from pyspark.pandas.config import option_context
from pyspark.pandas.exceptions import PandasNotImplementedError
from pyspark.pandas.frame import CachedDataFrame
from pyspark.pandas.missing.frame import _MissingPandasLikeDataFrame
from pyspark.pandas.typedef.typehints import (
extension_dtypes,
extension_dtypes_available,
extension_float_dtypes_available,
extension_object_dtypes_available,
)
from pyspark.testing.pandasutils import (
have_tabulate,
PandasOnSparkTestCase,
SPARK_CONF_ARROW_ENABLED,
tabulate_requirement_message,
)
from pyspark.testing.sqlutils import SQLTestUtils
from pyspark.pandas.utils import name_like_string
class DataFrameTest(PandasOnSparkTestCase, SQLTestUtils):
@property
def pdf(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=np.random.rand(9),
)
@property
def psdf(self):
return ps.from_pandas(self.pdf)
@property
def df_pair(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
return pdf, psdf
def test_dataframe(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf["a"] + 1, pdf["a"] + 1)
self.assert_eq(psdf.columns, pd.Index(["a", "b"]))
self.assert_eq(psdf[psdf["b"] > 2], pdf[pdf["b"] > 2])
self.assert_eq(-psdf[psdf["b"] > 2], -pdf[pdf["b"] > 2])
self.assert_eq(psdf[["a", "b"]], pdf[["a", "b"]])
self.assert_eq(psdf.a, pdf.a)
self.assert_eq(psdf.b.mean(), pdf.b.mean())
self.assert_eq(psdf.b.var(), pdf.b.var())
self.assert_eq(psdf.b.std(), pdf.b.std())
pdf, psdf = self.df_pair
self.assert_eq(psdf[["a", "b"]], pdf[["a", "b"]])
self.assertEqual(psdf.a.notnull().rename("x").name, "x")
# check ps.DataFrame(ps.Series)
pser = pd.Series([1, 2, 3], name="x", index=np.random.rand(3))
psser = ps.from_pandas(pser)
self.assert_eq(pd.DataFrame(pser), ps.DataFrame(psser))
# check psdf[pd.Index]
pdf, psdf = self.df_pair
column_mask = pdf.columns.isin(["a", "b"])
index_cols = pdf.columns[column_mask]
self.assert_eq(psdf[index_cols], pdf[index_cols])
def _check_extension(self, psdf, pdf):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(psdf, pdf, check_exact=False)
for dtype in psdf.dtypes:
self.assertTrue(isinstance(dtype, extension_dtypes))
else:
self.assert_eq(psdf, pdf)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_extension_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series([1, 2, None, 4], dtype="Int8"),
"b": pd.Series([1, None, None, 4], dtype="Int16"),
"c": pd.Series([1, 2, None, None], dtype="Int32"),
"d": pd.Series([None, 2, None, 4], dtype="Int64"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
self._check_extension(psdf + psdf, pdf + pdf)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_astype_extension_dtypes(self):
pdf = pd.DataFrame(
{
"a": [1, 2, None, 4],
"b": [1, None, None, 4],
"c": [1, 2, None, None],
"d": [None, 2, None, 4],
}
)
psdf = ps.from_pandas(pdf)
astype = {"a": "Int8", "b": "Int16", "c": "Int32", "d": "Int64"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_extension_object_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series(["a", "b", None, "c"], dtype="string"),
"b": pd.Series([True, None, False, True], dtype="boolean"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_astype_extension_object_dtypes(self):
pdf = pd.DataFrame({"a": ["a", "b", None, "c"], "b": [True, None, False, True]})
psdf = ps.from_pandas(pdf)
astype = {"a": "string", "b": "boolean"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_extension_float_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, None, 4.0], dtype="Float32"),
"b": pd.Series([1.0, None, 3.0, 4.0], dtype="Float64"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
self._check_extension(psdf + 1, pdf + 1)
self._check_extension(psdf + psdf, pdf + pdf)
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_astype_extension_float_dtypes(self):
pdf = pd.DataFrame({"a": [1.0, 2.0, None, 4.0], "b": [1.0, None, 3.0, 4.0]})
psdf = ps.from_pandas(pdf)
astype = {"a": "Float32", "b": "Float64"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
def test_insert(self):
#
# Basic DataFrame
#
pdf = pd.DataFrame([1, 2, 3])
psdf = ps.from_pandas(pdf)
psdf.insert(1, "b", 10)
pdf.insert(1, "b", 10)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(2, "c", 0.1)
pdf.insert(2, "c", 0.1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(3, "d", psdf.b + 1)
pdf.insert(3, "d", pdf.b + 1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psser = ps.Series([4, 5, 6])
self.assertRaises(ValueError, lambda: psdf.insert(0, "y", psser))
self.assertRaisesRegex(
ValueError, "cannot insert b, already exists", lambda: psdf.insert(1, "b", 10)
)
self.assertRaisesRegex(
TypeError,
'"column" should be a scalar value or tuple that contains scalar values',
lambda: psdf.insert(0, list("abc"), psser),
)
self.assertRaisesRegex(
TypeError,
"loc must be int",
lambda: psdf.insert((1,), "b", 10),
)
self.assertRaisesRegex(
NotImplementedError,
"Assigning column name as tuple is only supported for MultiIndex columns for now.",
lambda: psdf.insert(0, ("e",), 10),
)
self.assertRaises(ValueError, lambda: psdf.insert(0, "e", [7, 8, 9, 10]))
self.assertRaises(ValueError, lambda: psdf.insert(0, "f", ps.Series([7, 8])))
self.assertRaises(AssertionError, lambda: psdf.insert(100, "y", psser))
self.assertRaises(AssertionError, lambda: psdf.insert(1, "y", psser, allow_duplicates=True))
#
# DataFrame with MultiIndex as columns
#
pdf = pd.DataFrame({("x", "a", "b"): [1, 2, 3]})
psdf = ps.from_pandas(pdf)
psdf.insert(1, "b", 10)
pdf.insert(1, "b", 10)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(2, "c", 0.1)
pdf.insert(2, "c", 0.1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(3, "d", psdf.b + 1)
pdf.insert(3, "d", pdf.b + 1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
self.assertRaisesRegex(
ValueError, "cannot insert d, already exists", lambda: psdf.insert(4, "d", 11)
)
self.assertRaisesRegex(
ValueError,
r"cannot insert \('x', 'a', 'b'\), already exists",
lambda: psdf.insert(4, ("x", "a", "b"), 11),
)
self.assertRaisesRegex(
ValueError,
'"column" must have length equal to number of column levels.',
lambda: psdf.insert(4, ("e",), 11),
)
def test_inplace(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
pdf["a"] = pdf["a"] + 10
psdf["a"] = psdf["a"] + 10
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
def test_assign_list(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
pdf["x"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
psdf["x"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser, pser)
with self.assertRaisesRegex(ValueError, "Length of values does not match length of index"):
psdf["z"] = [10, 20, 30, 40, 50, 60, 70, 80]
def test_dataframe_multiindex_columns(self):
pdf = pd.DataFrame(
{
("x", "a", "1"): [1, 2, 3],
("x", "b", "2"): [4, 5, 6],
("y.z", "c.d", "3"): [7, 8, 9],
("x", "b", "4"): [10, 11, 12],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf["x"], pdf["x"])
self.assert_eq(psdf["y.z"], pdf["y.z"])
self.assert_eq(psdf["x"]["b"], pdf["x"]["b"])
self.assert_eq(psdf["x"]["b"]["2"], pdf["x"]["b"]["2"])
self.assert_eq(psdf.x, pdf.x)
self.assert_eq(psdf.x.b, pdf.x.b)
self.assert_eq(psdf.x.b["2"], pdf.x.b["2"])
self.assertRaises(KeyError, lambda: psdf["z"])
self.assertRaises(AttributeError, lambda: psdf.z)
self.assert_eq(psdf[("x",)], pdf[("x",)])
self.assert_eq(psdf[("x", "a")], pdf[("x", "a")])
self.assert_eq(psdf[("x", "a", "1")], pdf[("x", "a", "1")])
def test_dataframe_column_level_name(self):
column = pd.Index(["A", "B", "C"], name="X")
pdf = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=column, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.columns.names, pdf.columns.names)
self.assert_eq(psdf.to_pandas().columns.names, pdf.columns.names)
def test_dataframe_multiindex_names_level(self):
columns = pd.MultiIndex.from_tuples(
[("X", "A", "Z"), ("X", "B", "Z"), ("Y", "C", "Z"), ("Y", "D", "Z")],
names=["lvl_1", "lvl_2", "lv_3"],
)
pdf = pd.DataFrame(
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], [17, 18, 19, 20]],
columns=columns,
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.columns.names, pdf.columns.names)
self.assert_eq(psdf.to_pandas().columns.names, pdf.columns.names)
psdf1 = ps.from_pandas(pdf)
self.assert_eq(psdf1.columns.names, pdf.columns.names)
self.assertRaises(
AssertionError,
lambda: ps.DataFrame(psdf1._internal.copy(column_label_names=("level",))),
)
self.assert_eq(psdf["X"], pdf["X"])
self.assert_eq(psdf["X"].columns.names, pdf["X"].columns.names)
self.assert_eq(psdf["X"].to_pandas().columns.names, pdf["X"].columns.names)
self.assert_eq(psdf["X"]["A"], pdf["X"]["A"])
self.assert_eq(psdf["X"]["A"].columns.names, pdf["X"]["A"].columns.names)
self.assert_eq(psdf["X"]["A"].to_pandas().columns.names, pdf["X"]["A"].columns.names)
self.assert_eq(psdf[("X", "A")], pdf[("X", "A")])
self.assert_eq(psdf[("X", "A")].columns.names, pdf[("X", "A")].columns.names)
self.assert_eq(psdf[("X", "A")].to_pandas().columns.names, pdf[("X", "A")].columns.names)
self.assert_eq(psdf[("X", "A", "Z")], pdf[("X", "A", "Z")])
def test_itertuples(self):
pdf = pd.DataFrame({"num_legs": [4, 2], "num_wings": [0, 2]}, index=["dog", "hawk"])
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(
pdf.itertuples(index=False, name="Animal"), psdf.itertuples(index=False, name="Animal")
):
self.assert_eq(ptuple, ktuple)
for ptuple, ktuple in zip(pdf.itertuples(name=None), psdf.itertuples(name=None)):
self.assert_eq(ptuple, ktuple)
pdf.index = pd.MultiIndex.from_arrays(
[[1, 2], ["black", "brown"]], names=("count", "color")
)
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="Animal"), psdf.itertuples(name="Animal")):
self.assert_eq(ptuple, ktuple)
pdf.columns = pd.MultiIndex.from_arrays(
[["CA", "WA"], ["age", "children"]], names=("origin", "info")
)
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="Animal"), psdf.itertuples(name="Animal")):
self.assert_eq(ptuple, ktuple)
pdf = pd.DataFrame([1, 2, 3])
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(
(pdf + 1).itertuples(name="num"), (psdf + 1).itertuples(name="num")
):
self.assert_eq(ptuple, ktuple)
# DataFrames with a large number of columns (>254)
pdf = pd.DataFrame(np.random.random((1, 255)))
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="num"), psdf.itertuples(name="num")):
self.assert_eq(ptuple, ktuple)
def test_iterrows(self):
pdf = pd.DataFrame(
{
("x", "a", "1"): [1, 2, 3],
("x", "b", "2"): [4, 5, 6],
("y.z", "c.d", "3"): [7, 8, 9],
("x", "b", "4"): [10, 11, 12],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
for (pdf_k, pdf_v), (psdf_k, psdf_v) in zip(pdf.iterrows(), psdf.iterrows()):
self.assert_eq(pdf_k, psdf_k)
self.assert_eq(pdf_v, psdf_v)
def test_reset_index(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index().index, pdf.reset_index().index)
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
pdf.index.name = "a"
psdf.index.name = "a"
with self.assertRaisesRegex(ValueError, "cannot insert a, already exists"):
psdf.reset_index()
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
# inplace
pser = pdf.a
psser = psdf.a
pdf.reset_index(drop=True, inplace=True)
psdf.reset_index(drop=True, inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
pdf.columns = ["index", "b"]
psdf.columns = ["index", "b"]
self.assert_eq(psdf.reset_index(), pdf.reset_index())
def test_reset_index_with_default_index_types(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
with ps.option_context("compute.default_index_type", "sequence"):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with ps.option_context("compute.default_index_type", "distributed-sequence"):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with ps.option_context("compute.default_index_type", "distributed"):
# the index is different.
self.assert_eq(psdf.reset_index().to_pandas().reset_index(drop=True), pdf.reset_index())
def test_reset_index_with_multiindex_columns(self):
index = pd.MultiIndex.from_tuples(
[("bird", "falcon"), ("bird", "parrot"), ("mammal", "lion"), ("mammal", "monkey")],
names=["class", "name"],
)
columns = pd.MultiIndex.from_tuples([("speed", "max"), ("species", "type")])
pdf = pd.DataFrame(
[(389.0, "fly"), (24.0, "fly"), (80.5, "run"), (np.nan, "jump")],
index=index,
columns=columns,
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index(level="class"), pdf.reset_index(level="class"))
self.assert_eq(
psdf.reset_index(level="class", col_level=1),
pdf.reset_index(level="class", col_level=1),
)
self.assert_eq(
psdf.reset_index(level="class", col_level=1, col_fill="species"),
pdf.reset_index(level="class", col_level=1, col_fill="species"),
)
self.assert_eq(
psdf.reset_index(level="class", col_level=1, col_fill="genus"),
pdf.reset_index(level="class", col_level=1, col_fill="genus"),
)
with self.assertRaisesRegex(IndexError, "Index has only 2 levels, not 3"):
psdf.reset_index(col_level=2)
pdf.index.names = [("x", "class"), ("y", "name")]
psdf.index.names = [("x", "class"), ("y", "name")]
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with self.assertRaisesRegex(ValueError, "Item must have length equal to number of levels."):
psdf.reset_index(col_level=1)
def test_index_to_frame_reset_index(self):
def check(psdf, pdf):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
pdf.reset_index(drop=True, inplace=True)
psdf.reset_index(drop=True, inplace=True)
self.assert_eq(psdf, pdf)
pdf, psdf = self.df_pair
check(psdf.index.to_frame(), pdf.index.to_frame())
check(psdf.index.to_frame(index=False), pdf.index.to_frame(index=False))
check(psdf.index.to_frame(name="a"), pdf.index.to_frame(name="a"))
check(psdf.index.to_frame(index=False, name="a"), pdf.index.to_frame(index=False, name="a"))
check(psdf.index.to_frame(name=("x", "a")), pdf.index.to_frame(name=("x", "a")))
check(
psdf.index.to_frame(index=False, name=("x", "a")),
pdf.index.to_frame(index=False, name=("x", "a")),
)
def test_multiindex_column_access(self):
columns = pd.MultiIndex.from_tuples(
[
("a", "", "", "b"),
("c", "", "d", ""),
("e", "", "f", ""),
("e", "g", "", ""),
("", "", "", "h"),
("i", "", "", ""),
]
)
pdf = pd.DataFrame(
[
(1, "a", "x", 10, 100, 1000),
(2, "b", "y", 20, 200, 2000),
(3, "c", "z", 30, 300, 3000),
],
columns=columns,
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf["a"], pdf["a"])
self.assert_eq(psdf["a"]["b"], pdf["a"]["b"])
self.assert_eq(psdf["c"], pdf["c"])
self.assert_eq(psdf["c"]["d"], pdf["c"]["d"])
self.assert_eq(psdf["e"], pdf["e"])
self.assert_eq(psdf["e"][""]["f"], pdf["e"][""]["f"])
self.assert_eq(psdf["e"]["g"], pdf["e"]["g"])
self.assert_eq(psdf[""], pdf[""])
self.assert_eq(psdf[""]["h"], pdf[""]["h"])
self.assert_eq(psdf["i"], pdf["i"])
self.assert_eq(psdf[["a", "e"]], pdf[["a", "e"]])
self.assert_eq(psdf[["e", "a"]], pdf[["e", "a"]])
self.assert_eq(psdf[("a",)], pdf[("a",)])
self.assert_eq(psdf[("e", "g")], pdf[("e", "g")])
# self.assert_eq(psdf[("i",)], pdf[("i",)])
self.assert_eq(psdf[("i", "")], pdf[("i", "")])
self.assertRaises(KeyError, lambda: psdf[("a", "b")])
def test_repr_cache_invalidation(self):
# If there is any cache, inplace operations should invalidate it.
df = ps.range(10)
df.__repr__()
df["a"] = df["id"]
self.assertEqual(df.__repr__(), df.to_pandas().__repr__())
def test_repr_html_cache_invalidation(self):
# If there is any cache, inplace operations should invalidate it.
df = ps.range(10)
df._repr_html_()
df["a"] = df["id"]
self.assertEqual(df._repr_html_(), df.to_pandas()._repr_html_())
def test_empty_dataframe(self):
pdf = pd.DataFrame({"a": pd.Series([], dtype="i1"), "b": pd.Series([], dtype="str")})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_all_null_dataframe(self):
pdf = pd.DataFrame(
{
"a": [None, None, None, "a"],
"b": [None, None, None, 1],
"c": [None, None, None] + list(np.arange(1, 2).astype("i1")),
"d": [None, None, None, 1.0],
"e": [None, None, None, True],
"f": [None, None, None] + list(pd.date_range("20130101", periods=1)),
},
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.iloc[:-1], pdf.iloc[:-1])
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
self.assert_eq(psdf.iloc[:-1], pdf.iloc[:-1])
pdf = pd.DataFrame(
{
"a": pd.Series([None, None, None], dtype="float64"),
"b": pd.Series([None, None, None], dtype="str"),
},
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_nullable_object(self):
pdf = pd.DataFrame(
{
"a": list("abc") + [np.nan, None],
"b": list(range(1, 4)) + [np.nan, None],
"c": list(np.arange(3, 6).astype("i1")) + [np.nan, None],
"d": list(np.arange(4.0, 7.0, dtype="float64")) + [np.nan, None],
"e": [True, False, True, np.nan, None],
"f": list(pd.date_range("20130101", periods=3)) + [np.nan, None],
},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_assign(self):
pdf, psdf = self.df_pair
psdf["w"] = 1.0
pdf["w"] = 1.0
self.assert_eq(psdf, pdf)
psdf.w = 10.0
pdf.w = 10.0
self.assert_eq(psdf, pdf)
psdf[1] = 1.0
pdf[1] = 1.0
self.assert_eq(psdf, pdf)
psdf = psdf.assign(a=psdf["a"] * 2)
pdf = pdf.assign(a=pdf["a"] * 2)
self.assert_eq(psdf, pdf)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "w"), ("y", "v")])
pdf.columns = columns
psdf.columns = columns
psdf[("a", "c")] = "def"
pdf[("a", "c")] = "def"
self.assert_eq(psdf, pdf)
psdf = psdf.assign(Z="ZZ")
pdf = pdf.assign(Z="ZZ")
self.assert_eq(psdf, pdf)
psdf["x"] = "ghi"
pdf["x"] = "ghi"
self.assert_eq(psdf, pdf)
def test_head(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.head(2), pdf.head(2))
self.assert_eq(psdf.head(3), pdf.head(3))
self.assert_eq(psdf.head(0), pdf.head(0))
self.assert_eq(psdf.head(-3), pdf.head(-3))
self.assert_eq(psdf.head(-10), pdf.head(-10))
with option_context("compute.ordered_head", True):
self.assert_eq(psdf.head(), pdf.head())
def test_attributes(self):
psdf = self.psdf
self.assertIn("a", dir(psdf))
self.assertNotIn("foo", dir(psdf))
self.assertRaises(AttributeError, lambda: psdf.foo)
psdf = ps.DataFrame({"a b c": [1, 2, 3]})
self.assertNotIn("a b c", dir(psdf))
psdf = ps.DataFrame({"a": [1, 2], 5: [1, 2]})
self.assertIn("a", dir(psdf))
self.assertNotIn(5, dir(psdf))
def test_column_names(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.columns, pdf.columns)
self.assert_eq(psdf[["b", "a"]].columns, pdf[["b", "a"]].columns)
self.assert_eq(psdf["a"].name, pdf["a"].name)
self.assert_eq((psdf["a"] + 1).name, (pdf["a"] + 1).name)
self.assert_eq((psdf.a + psdf.b).name, (pdf.a + pdf.b).name)
self.assert_eq((psdf.a + psdf.b.rename("a")).name, (pdf.a + pdf.b.rename("a")).name)
self.assert_eq((psdf.a + psdf.b.rename()).name, (pdf.a + pdf.b.rename()).name)
self.assert_eq((psdf.a.rename() + psdf.b).name, (pdf.a.rename() + pdf.b).name)
self.assert_eq(
(psdf.a.rename() + psdf.b.rename()).name, (pdf.a.rename() + pdf.b.rename()).name
)
def test_rename_columns(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
psdf.columns = ["x", "y"]
pdf.columns = ["x", "y"]
self.assert_eq(psdf.columns, pd.Index(["x", "y"]))
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["x", "y"])
self.assert_eq(psdf.to_spark().columns, ["x", "y"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "x", "y"])
columns = pdf.columns
columns.name = "lvl_1"
psdf.columns = columns
self.assert_eq(psdf.columns.names, ["lvl_1"])
self.assert_eq(psdf, pdf)
msg = "Length mismatch: Expected axis has 2 elements, new values have 4 elements"
with self.assertRaisesRegex(ValueError, msg):
psdf.columns = [1, 2, 3, 4]
# Multi-index columns
pdf = pd.DataFrame(
{("A", "0"): [1, 2, 2, 3], ("B", "1"): [1, 2, 3, 4]}, index=np.random.rand(4)
)
psdf = ps.from_pandas(pdf)
columns = pdf.columns
self.assert_eq(psdf.columns, columns)
self.assert_eq(psdf, pdf)
pdf.columns = ["x", "y"]
psdf.columns = ["x", "y"]
self.assert_eq(psdf.columns, pd.Index(["x", "y"]))
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["x", "y"])
self.assert_eq(psdf.to_spark().columns, ["x", "y"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "x", "y"])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.columns, columns)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark().columns, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "(A, 0)", "(B, 1)"])
columns.names = ["lvl_1", "lvl_2"]
psdf.columns = columns
self.assert_eq(psdf.columns.names, ["lvl_1", "lvl_2"])
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark().columns, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "(A, 0)", "(B, 1)"])
def test_rename_dataframe(self):
pdf1 = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
psdf1 = ps.from_pandas(pdf1)
self.assert_eq(
psdf1.rename(columns={"A": "a", "B": "b"}), pdf1.rename(columns={"A": "a", "B": "b"})
)
result_psdf = psdf1.rename(index={1: 10, 2: 20})
result_pdf = pdf1.rename(index={1: 10, 2: 20})
self.assert_eq(result_psdf, result_pdf)
# inplace
pser = result_pdf.A
psser = result_psdf.A
result_psdf.rename(index={10: 100, 20: 200}, inplace=True)
result_pdf.rename(index={10: 100, 20: 200}, inplace=True)
self.assert_eq(result_psdf, result_pdf)
self.assert_eq(psser, pser)
def str_lower(s) -> str:
return str.lower(s)
self.assert_eq(
psdf1.rename(str_lower, axis="columns"), pdf1.rename(str_lower, axis="columns")
)
def mul10(x) -> int:
return x * 10
self.assert_eq(psdf1.rename(mul10, axis="index"), pdf1.rename(mul10, axis="index"))
self.assert_eq(
psdf1.rename(columns=str_lower, index={1: 10, 2: 20}),
pdf1.rename(columns=str_lower, index={1: 10, 2: 20}),
)
idx = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C"), ("Y", "D")])
pdf2 = pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=idx)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(psdf2.rename(columns=str_lower), pdf2.rename(columns=str_lower))
self.assert_eq(
psdf2.rename(columns=str_lower, level=0), pdf2.rename(columns=str_lower, level=0)
)
self.assert_eq(
psdf2.rename(columns=str_lower, level=1), pdf2.rename(columns=str_lower, level=1)
)
pdf3 = pd.DataFrame([[1, 2], [3, 4], [5, 6], [7, 8]], index=idx, columns=list("ab"))
psdf3 = ps.from_pandas(pdf3)
self.assert_eq(psdf3.rename(index=str_lower), pdf3.rename(index=str_lower))
self.assert_eq(
psdf3.rename(index=str_lower, level=0), pdf3.rename(index=str_lower, level=0)
)
self.assert_eq(
psdf3.rename(index=str_lower, level=1), pdf3.rename(index=str_lower, level=1)
)
pdf4 = pdf2 + 1
psdf4 = psdf2 + 1
self.assert_eq(psdf4.rename(columns=str_lower), pdf4.rename(columns=str_lower))
pdf5 = pdf3 + 1
psdf5 = psdf3 + 1
self.assert_eq(psdf5.rename(index=str_lower), pdf5.rename(index=str_lower))
msg = "Either `index` or `columns` should be provided."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename()
msg = "`mapper` or `index` or `columns` should be either dict-like or function type."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename(mapper=[str_lower], axis=1)
msg = "Mapper dict should have the same value type."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename({"A": "a", "B": 2}, axis=1)
msg = r"level should be an integer between \[0, column_labels_level\)"
with self.assertRaisesRegex(ValueError, msg):
psdf2.rename(columns=str_lower, level=2)
def test_rename_axis(self):
index = pd.Index(["A", "B", "C"], name="index")
columns = pd.Index(["numbers", "values"], name="cols")
pdf = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
for axis in [0, "index"]:
self.assert_eq(
pdf.rename_axis("index2", axis=axis).sort_index(),
psdf.rename_axis("index2", axis=axis).sort_index(),
)
self.assert_eq(
pdf.rename_axis(["index2"], axis=axis).sort_index(),
psdf.rename_axis(["index2"], axis=axis).sort_index(),
)
for axis in [1, "columns"]:
self.assert_eq(
pdf.rename_axis("cols2", axis=axis).sort_index(),
psdf.rename_axis("cols2", axis=axis).sort_index(),
)
self.assert_eq(
pdf.rename_axis(["cols2"], axis=axis).sort_index(),
psdf.rename_axis(["cols2"], axis=axis).sort_index(),
)
pdf2 = pdf.copy()
psdf2 = psdf.copy()
pdf2.rename_axis("index2", axis="index", inplace=True)
psdf2.rename_axis("index2", axis="index", inplace=True)
self.assert_eq(pdf2.sort_index(), psdf2.sort_index())
self.assertRaises(ValueError, lambda: psdf.rename_axis(["index2", "index3"], axis=0))
self.assertRaises(ValueError, lambda: psdf.rename_axis(["cols2", "cols3"], axis=1))
self.assertRaises(TypeError, lambda: psdf.rename_axis(mapper=["index2"], index=["index3"]))
self.assert_eq(
pdf.rename_axis(index={"index": "index2"}, columns={"cols": "cols2"}).sort_index(),
psdf.rename_axis(index={"index": "index2"}, columns={"cols": "cols2"}).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index={"missing": "index2"}, columns={"missing": "cols2"}).sort_index(),
psdf.rename_axis(
index={"missing": "index2"}, columns={"missing": "cols2"}
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
psdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
)
index = pd.MultiIndex.from_tuples(
[("A", "B"), ("C", "D"), ("E", "F")], names=["index1", "index2"]
)
columns = pd.MultiIndex.from_tuples(
[("numbers", "first"), ("values", "second")], names=["cols1", "cols2"]
)
pdf = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
for axis in [0, "index"]:
self.assert_eq(
pdf.rename_axis(["index3", "index4"], axis=axis).sort_index(),
psdf.rename_axis(["index3", "index4"], axis=axis).sort_index(),
)
for axis in [1, "columns"]:
self.assert_eq(
pdf.rename_axis(["cols3", "cols4"], axis=axis).sort_index(),
psdf.rename_axis(["cols3", "cols4"], axis=axis).sort_index(),
)
self.assertRaises(
ValueError, lambda: psdf.rename_axis(["index3", "index4", "index5"], axis=0)
)
self.assertRaises(ValueError, lambda: psdf.rename_axis(["cols3", "cols4", "cols5"], axis=1))
self.assert_eq(
pdf.rename_axis(index={"index1": "index3"}, columns={"cols1": "cols3"}).sort_index(),
psdf.rename_axis(index={"index1": "index3"}, columns={"cols1": "cols3"}).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index={"missing": "index3"}, columns={"missing": "cols3"}).sort_index(),
psdf.rename_axis(
index={"missing": "index3"}, columns={"missing": "cols3"}
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(
index={"index1": "index3", "index2": "index4"},
columns={"cols1": "cols3", "cols2": "cols4"},
).sort_index(),
psdf.rename_axis(
index={"index1": "index3", "index2": "index4"},
columns={"cols1": "cols3", "cols2": "cols4"},
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
psdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
)
def test_dot(self):
psdf = self.psdf
with self.assertRaisesRegex(TypeError, "Unsupported type DataFrame"):
psdf.dot(psdf)
def test_dot_in_column_name(self):
self.assert_eq(
ps.DataFrame(ps.range(1)._internal.spark_frame.selectExpr("1L as `a.b`"))["a.b"],
ps.Series([1], name="a.b"),
)
def test_aggregate(self):
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [np.nan, np.nan, np.nan]], columns=["A", "B", "C"]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.agg(["sum", "min"])[["A", "B", "C"]].sort_index(), # TODO?: fix column order
pdf.agg(["sum", "min"])[["A", "B", "C"]].sort_index(),
)
self.assert_eq(
psdf.agg({"A": ["sum", "min"], "B": ["min", "max"]})[["A", "B"]].sort_index(),
pdf.agg({"A": ["sum", "min"], "B": ["min", "max"]})[["A", "B"]].sort_index(),
)
self.assertRaises(KeyError, lambda: psdf.agg({"A": ["sum", "min"], "X": ["min", "max"]}))
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.agg(["sum", "min"])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index(),
pdf.agg(["sum", "min"])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index(),
)
self.assert_eq(
psdf.agg({("X", "A"): ["sum", "min"], ("X", "B"): ["min", "max"]})[
[("X", "A"), ("X", "B")]
].sort_index(),
pdf.agg({("X", "A"): ["sum", "min"], ("X", "B"): ["min", "max"]})[
[("X", "A"), ("X", "B")]
].sort_index(),
)
self.assertRaises(TypeError, lambda: psdf.agg({"X": ["sum", "min"], "Y": ["min", "max"]}))
# non-string names
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [np.nan, np.nan, np.nan]], columns=[10, 20, 30]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.agg(["sum", "min"])[[10, 20, 30]].sort_index(),
pdf.agg(["sum", "min"])[[10, 20, 30]].sort_index(),
)
self.assert_eq(
psdf.agg({10: ["sum", "min"], 20: ["min", "max"]})[[10, 20]].sort_index(),
pdf.agg({10: ["sum", "min"], 20: ["min", "max"]})[[10, 20]].sort_index(),
)
columns = pd.MultiIndex.from_tuples([("X", 10), ("X", 20), ("Y", 30)])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.agg(["sum", "min"])[[("X", 10), ("X", 20), ("Y", 30)]].sort_index(),
pdf.agg(["sum", "min"])[[("X", 10), ("X", 20), ("Y", 30)]].sort_index(),
)
self.assert_eq(
psdf.agg({("X", 10): ["sum", "min"], ("X", 20): ["min", "max"]})[
[("X", 10), ("X", 20)]
].sort_index(),
pdf.agg({("X", 10): ["sum", "min"], ("X", 20): ["min", "max"]})[
[("X", 10), ("X", 20)]
].sort_index(),
)
pdf = pd.DataFrame(
[datetime(2019, 2, 2, 0, 0, 0, 0), datetime(2019, 2, 3, 0, 0, 0, 0)],
columns=["timestamp"],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.timestamp.min(), pdf.timestamp.min())
self.assert_eq(psdf.timestamp.max(), pdf.timestamp.max())
self.assertRaises(ValueError, lambda: psdf.agg(("sum", "min")))
def test_droplevel(self):
pdf = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis(["a", "b"])
)
pdf.columns = pd.MultiIndex.from_tuples(
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
)
psdf = ps.from_pandas(pdf)
self.assertRaises(ValueError, lambda: psdf.droplevel(["a", "b"]))
self.assertRaises(ValueError, lambda: psdf.droplevel([1, 1, 1, 1, 1]))
self.assertRaises(IndexError, lambda: psdf.droplevel(2))
self.assertRaises(IndexError, lambda: psdf.droplevel(-3))
self.assertRaises(KeyError, lambda: psdf.droplevel({"a"}))
self.assertRaises(KeyError, lambda: psdf.droplevel({"a": 1}))
self.assertRaises(ValueError, lambda: psdf.droplevel(["level_1", "level_2"], axis=1))
self.assertRaises(IndexError, lambda: psdf.droplevel(2, axis=1))
self.assertRaises(IndexError, lambda: psdf.droplevel(-3, axis=1))
self.assertRaises(KeyError, lambda: psdf.droplevel({"level_1"}, axis=1))
self.assertRaises(KeyError, lambda: psdf.droplevel({"level_1": 1}, axis=1))
self.assert_eq(pdf.droplevel("a"), psdf.droplevel("a"))
self.assert_eq(pdf.droplevel(["a"]), psdf.droplevel(["a"]))
self.assert_eq(pdf.droplevel(("a",)), psdf.droplevel(("a",)))
self.assert_eq(pdf.droplevel(0), psdf.droplevel(0))
self.assert_eq(pdf.droplevel(-1), psdf.droplevel(-1))
self.assert_eq(pdf.droplevel("level_1", axis=1), psdf.droplevel("level_1", axis=1))
self.assert_eq(pdf.droplevel(["level_1"], axis=1), psdf.droplevel(["level_1"], axis=1))
self.assert_eq(pdf.droplevel(("level_1",), axis=1), psdf.droplevel(("level_1",), axis=1))
self.assert_eq(pdf.droplevel(0, axis=1), psdf.droplevel(0, axis=1))
self.assert_eq(pdf.droplevel(-1, axis=1), psdf.droplevel(-1, axis=1))
# Tupled names
pdf.columns.names = [("level", 1), ("level", 2)]
pdf.index.names = [("a", 10), ("x", 20)]
psdf = ps.from_pandas(pdf)
self.assertRaises(KeyError, lambda: psdf.droplevel("a"))
self.assertRaises(KeyError, lambda: psdf.droplevel(("a", 10)))
self.assert_eq(pdf.droplevel([("a", 10)]), psdf.droplevel([("a", 10)]))
self.assert_eq(
pdf.droplevel([("level", 1)], axis=1), psdf.droplevel([("level", 1)], axis=1)
)
# non-string names
pdf = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis([10.0, 20.0])
)
pdf.columns = pd.MultiIndex.from_tuples([("c", "e"), ("d", "f")], names=[100.0, 200.0])
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.droplevel(10.0), psdf.droplevel(10.0))
self.assert_eq(pdf.droplevel([10.0]), psdf.droplevel([10.0]))
self.assert_eq(pdf.droplevel((10.0,)), psdf.droplevel((10.0,)))
self.assert_eq(pdf.droplevel(0), psdf.droplevel(0))
self.assert_eq(pdf.droplevel(-1), psdf.droplevel(-1))
self.assert_eq(pdf.droplevel(100.0, axis=1), psdf.droplevel(100.0, axis=1))
self.assert_eq(pdf.droplevel(0, axis=1), psdf.droplevel(0, axis=1))
def test_drop(self):
pdf = pd.DataFrame({"x": [1, 2], "y": [3, 4], "z": [5, 6]}, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
# Assert 'labels' or 'columns' parameter is set
expected_error_message = "Need to specify at least one of 'labels' or 'columns'"
with self.assertRaisesRegex(ValueError, expected_error_message):
psdf.drop()
#
# Drop columns
#
# Assert using a str for 'labels' works
self.assert_eq(psdf.drop("x", axis=1), pdf.drop("x", axis=1))
self.assert_eq((psdf + 1).drop("x", axis=1), (pdf + 1).drop("x", axis=1))
# Assert using a list for 'labels' works
self.assert_eq(psdf.drop(["y", "z"], axis=1), pdf.drop(["y", "z"], axis=1))
self.assert_eq(psdf.drop(["x", "y", "z"], axis=1), pdf.drop(["x", "y", "z"], axis=1))
# Assert using 'columns' instead of 'labels' produces the same results
self.assert_eq(psdf.drop(columns="x"), pdf.drop(columns="x"))
self.assert_eq(psdf.drop(columns=["y", "z"]), pdf.drop(columns=["y", "z"]))
self.assert_eq(psdf.drop(columns=["x", "y", "z"]), pdf.drop(columns=["x", "y", "z"]))
self.assert_eq(psdf.drop(columns=[]), pdf.drop(columns=[]))
columns = pd.MultiIndex.from_tuples([(1, "x"), (1, "y"), (2, "z")])
pdf.columns = columns
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(columns=1), pdf.drop(columns=1))
self.assert_eq(psdf.drop(columns=(1, "x")), pdf.drop(columns=(1, "x")))
self.assert_eq(psdf.drop(columns=[(1, "x"), 2]), pdf.drop(columns=[(1, "x"), 2]))
self.assert_eq(
psdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
pdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
)
self.assertRaises(KeyError, lambda: psdf.drop(columns=3))
self.assertRaises(KeyError, lambda: psdf.drop(columns=(1, "z")))
pdf.index = pd.MultiIndex.from_tuples([("i", 0), ("j", 1)])
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
pdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
)
# non-string names
pdf = pd.DataFrame({10: [1, 2], 20: [3, 4], 30: [5, 6]}, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(10, axis=1), pdf.drop(10, axis=1))
self.assert_eq(psdf.drop([20, 30], axis=1), pdf.drop([20, 30], axis=1))
#
# Drop rows
#
pdf = pd.DataFrame({"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["A", "B", "C"])
psdf = ps.from_pandas(pdf)
# Given labels (and axis = 0)
self.assert_eq(psdf.drop(labels="A", axis=0), pdf.drop(labels="A", axis=0))
self.assert_eq(psdf.drop(labels="A"), pdf.drop(labels="A"))
self.assert_eq((psdf + 1).drop(labels="A"), (pdf + 1).drop(labels="A"))
self.assert_eq(psdf.drop(labels=["A", "C"], axis=0), pdf.drop(labels=["A", "C"], axis=0))
self.assert_eq(
psdf.drop(labels=["A", "B", "C"], axis=0), pdf.drop(labels=["A", "B", "C"], axis=0)
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(labels=["A", "B", "C"], axis=0), pdf.drop(labels=["A", "B", "C"], axis=0)
)
# Given index
self.assert_eq(psdf.drop(index="A"), pdf.drop(index="A"))
self.assert_eq(psdf.drop(index=["A", "C"]), pdf.drop(index=["A", "C"]))
self.assert_eq(psdf.drop(index=["A", "B", "C"]), pdf.drop(index=["A", "B", "C"]))
self.assert_eq(psdf.drop(index=[]), pdf.drop(index=[]))
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(psdf.drop(index=["A", "B", "C"]), pdf.drop(index=["A", "B", "C"]))
# Non-string names
pdf.index = [10, 20, 30]
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(labels=10, axis=0), pdf.drop(labels=10, axis=0))
self.assert_eq(psdf.drop(labels=[10, 30], axis=0), pdf.drop(labels=[10, 30], axis=0))
self.assert_eq(
psdf.drop(labels=[10, 20, 30], axis=0), pdf.drop(labels=[10, 20, 30], axis=0)
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(labels=[10, 20, 30], axis=0), pdf.drop(labels=[10, 20, 30], axis=0)
)
# MultiIndex
pdf.index = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
psdf = ps.from_pandas(pdf)
self.assertRaises(NotImplementedError, lambda: psdf.drop(labels=[("a", "x")]))
#
# Drop rows and columns
#
pdf = pd.DataFrame({"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["A", "B", "C"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(index="A", columns="X"), pdf.drop(index="A", columns="X"))
self.assert_eq(
psdf.drop(index=["A", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "C"], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=[], columns=["X", "Z"]),
pdf.drop(index=[], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=[]),
pdf.drop(index=["A", "B", "C"], columns=[]),
)
self.assert_eq(
psdf.drop(index=[], columns=[]),
pdf.drop(index=[], columns=[]),
)
self.assertRaises(
ValueError,
lambda: psdf.drop(labels="A", axis=0, columns="X"),
)
def _test_dropna(self, pdf, axis):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=axis), pdf.dropna(axis=axis))
self.assert_eq(psdf.dropna(axis=axis, how="all"), pdf.dropna(axis=axis, how="all"))
self.assert_eq(psdf.dropna(axis=axis, subset=["x"]), pdf.dropna(axis=axis, subset=["x"]))
self.assert_eq(psdf.dropna(axis=axis, subset="x"), pdf.dropna(axis=axis, subset=["x"]))
self.assert_eq(
psdf.dropna(axis=axis, subset=["y", "z"]), pdf.dropna(axis=axis, subset=["y", "z"])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=["y", "z"], how="all"),
pdf.dropna(axis=axis, subset=["y", "z"], how="all"),
)
self.assert_eq(psdf.dropna(axis=axis, thresh=2), pdf.dropna(axis=axis, thresh=2))
self.assert_eq(
psdf.dropna(axis=axis, thresh=1, subset=["y", "z"]),
pdf.dropna(axis=axis, thresh=1, subset=["y", "z"]),
)
pdf2 = pdf.copy()
psdf2 = psdf.copy()
pser = pdf2[pdf2.columns[0]]
psser = psdf2[psdf2.columns[0]]
pdf2.dropna(inplace=True, axis=axis)
psdf2.dropna(inplace=True, axis=axis)
self.assert_eq(psdf2, pdf2)
self.assert_eq(psser, pser)
# multi-index
columns = pd.MultiIndex.from_tuples([("a", "x"), ("a", "y"), ("b", "z")])
if axis == 0:
pdf.columns = columns
else:
pdf.index = columns
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=axis), pdf.dropna(axis=axis))
self.assert_eq(psdf.dropna(axis=axis, how="all"), pdf.dropna(axis=axis, how="all"))
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "x")]), pdf.dropna(axis=axis, subset=[("a", "x")])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=("a", "x")), pdf.dropna(axis=axis, subset=[("a", "x")])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")]),
pdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")]),
)
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")], how="all"),
pdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")], how="all"),
)
self.assert_eq(psdf.dropna(axis=axis, thresh=2), pdf.dropna(axis=axis, thresh=2))
self.assert_eq(
psdf.dropna(axis=axis, thresh=1, subset=[("a", "y"), ("b", "z")]),
pdf.dropna(axis=axis, thresh=1, subset=[("a", "y"), ("b", "z")]),
)
def test_dropna_axis_index(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self._test_dropna(pdf, axis=0)
# empty
pdf = pd.DataFrame(index=np.random.rand(6))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(), pdf.dropna())
self.assert_eq(psdf.dropna(how="all"), pdf.dropna(how="all"))
self.assert_eq(psdf.dropna(thresh=0), pdf.dropna(thresh=0))
self.assert_eq(psdf.dropna(thresh=1), pdf.dropna(thresh=1))
with self.assertRaisesRegex(ValueError, "No axis named foo"):
psdf.dropna(axis="foo")
self.assertRaises(KeyError, lambda: psdf.dropna(subset="1"))
with self.assertRaisesRegex(ValueError, "invalid how option: 1"):
psdf.dropna(how=1)
with self.assertRaisesRegex(TypeError, "must specify how or thresh"):
psdf.dropna(how=None)
def test_dropna_axis_column(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=[str(r) for r in np.random.rand(6)],
).T
self._test_dropna(pdf, axis=1)
psdf = ps.from_pandas(pdf)
with self.assertRaisesRegex(
ValueError, "The length of each subset must be the same as the index size."
):
psdf.dropna(subset=(["x", "y"]), axis=1)
# empty
pdf = pd.DataFrame({"x": [], "y": [], "z": []})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=1), pdf.dropna(axis=1))
self.assert_eq(psdf.dropna(axis=1, how="all"), pdf.dropna(axis=1, how="all"))
self.assert_eq(psdf.dropna(axis=1, thresh=0), pdf.dropna(axis=1, thresh=0))
self.assert_eq(psdf.dropna(axis=1, thresh=1), pdf.dropna(axis=1, thresh=1))
def test_dtype(self):
pdf = pd.DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("i1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("20130101", periods=3),
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assertTrue((psdf.dtypes == pdf.dtypes).all())
# multi-index columns
columns = pd.MultiIndex.from_tuples(zip(list("xxxyyz"), list("abcdef")))
pdf.columns = columns
psdf.columns = columns
self.assertTrue((psdf.dtypes == pdf.dtypes).all())
def test_fillna(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({"x": -1, "y": -2, "z": -5}), pdf.fillna({"x": -1, "y": -2, "z": -5})
)
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
self.assert_eq(pdf.fillna(method="ffill", limit=2), psdf.fillna(method="ffill", limit=2))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="bfill", limit=2), psdf.fillna(method="bfill", limit=2))
pdf = pdf.set_index(["x", "y"])
psdf = ps.from_pandas(pdf)
# check multi index
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
pser = pdf.z
psser = psdf.z
pdf.fillna({"x": -1, "y": -2, "z": -5}, inplace=True)
psdf.fillna({"x": -1, "y": -2, "z": -5}, inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
s_nan = pd.Series([-1, -2, -5], index=["x", "y", "z"], dtype=int)
self.assert_eq(psdf.fillna(s_nan), pdf.fillna(s_nan))
with self.assertRaisesRegex(NotImplementedError, "fillna currently only"):
psdf.fillna(-1, axis=1)
with self.assertRaisesRegex(NotImplementedError, "fillna currently only"):
psdf.fillna(-1, axis="columns")
with self.assertRaisesRegex(ValueError, "limit parameter for value is not support now"):
psdf.fillna(-1, limit=1)
with self.assertRaisesRegex(TypeError, "Unsupported.*DataFrame"):
psdf.fillna(pd.DataFrame({"x": [-1], "y": [-1], "z": [-1]}))
with self.assertRaisesRegex(TypeError, "Unsupported.*int64"):
psdf.fillna({"x": np.int64(-6), "y": np.int64(-4), "z": -5})
with self.assertRaisesRegex(ValueError, "Expecting 'pad', 'ffill', 'backfill' or 'bfill'."):
psdf.fillna(method="xxx")
with self.assertRaisesRegex(
ValueError, "Must specify a fillna 'value' or 'method' parameter."
):
psdf.fillna()
# multi-index columns
pdf = pd.DataFrame(
{
("x", "a"): [np.nan, 2, 3, 4, np.nan, 6],
("x", "b"): [1, 2, np.nan, 4, np.nan, np.nan],
("y", "c"): [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
pdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
)
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
self.assert_eq(pdf.fillna(method="ffill", limit=2), psdf.fillna(method="ffill", limit=2))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="bfill", limit=2), psdf.fillna(method="bfill", limit=2))
self.assert_eq(psdf.fillna({"x": -1}), pdf.fillna({"x": -1}))
self.assert_eq(
psdf.fillna({"x": -1, ("x", "b"): -2}), pdf.fillna({"x": -1, ("x", "b"): -2})
)
self.assert_eq(
psdf.fillna({("x", "b"): -2, "x": -1}), pdf.fillna({("x", "b"): -2, "x": -1})
)
# check multi index
pdf = pdf.set_index([("x", "a"), ("x", "b")])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
pdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
)
def test_isnull(self):
pdf = pd.DataFrame(
{"x": [1, 2, 3, 4, None, 6], "y": list("abdabd")}, index=np.random.rand(6)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.notnull(), pdf.notnull())
self.assert_eq(psdf.isnull(), pdf.isnull())
def test_to_datetime(self):
pdf = pd.DataFrame(
{"year": [2015, 2016], "month": [2, 3], "day": [4, 5]}, index=np.random.rand(2)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pd.to_datetime(pdf), ps.to_datetime(psdf))
def test_nunique(self):
pdf = pd.DataFrame({"A": [1, 2, 3], "B": [np.nan, 3, np.nan]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
# Assert NaNs are dropped by default
self.assert_eq(psdf.nunique(), pdf.nunique())
# Assert including NaN values
self.assert_eq(psdf.nunique(dropna=False), pdf.nunique(dropna=False))
# Assert approximate counts
self.assert_eq(
ps.DataFrame({"A": range(100)}).nunique(approx=True),
pd.Series([103], index=["A"]),
)
self.assert_eq(
ps.DataFrame({"A": range(100)}).nunique(approx=True, rsd=0.01),
pd.Series([100], index=["A"]),
)
# Assert unsupported axis value yet
msg = 'axis should be either 0 or "index" currently.'
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.nunique(axis=1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("Y", "B")], names=["1", "2"])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.nunique(), pdf.nunique())
self.assert_eq(psdf.nunique(dropna=False), pdf.nunique(dropna=False))
def test_sort_values(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_values("b"), pdf.sort_values("b"))
for ascending in [True, False]:
for na_position in ["first", "last"]:
self.assert_eq(
psdf.sort_values("a", ascending=ascending, na_position=na_position),
pdf.sort_values("a", ascending=ascending, na_position=na_position),
)
self.assert_eq(psdf.sort_values(["a", "b"]), pdf.sort_values(["a", "b"]))
self.assert_eq(
psdf.sort_values(["a", "b"], ascending=[False, True]),
pdf.sort_values(["a", "b"], ascending=[False, True]),
)
self.assertRaises(ValueError, lambda: psdf.sort_values(["b", "a"], ascending=[False]))
self.assert_eq(
psdf.sort_values(["a", "b"], na_position="first"),
pdf.sort_values(["a", "b"], na_position="first"),
)
self.assertRaises(ValueError, lambda: psdf.sort_values(["b", "a"], na_position="invalid"))
pserA = pdf.a
psserA = psdf.a
self.assert_eq(psdf.sort_values("b", inplace=True), pdf.sort_values("b", inplace=True))
self.assert_eq(psdf, pdf)
self.assert_eq(psserA, pserA)
# multi-index columns
pdf = pd.DataFrame(
{("X", 10): [1, 2, 3, 4, 5, None, 7], ("X", 20): [7, 6, 5, 4, 3, 2, 1]},
index=np.random.rand(7),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_values(("X", 20)), pdf.sort_values(("X", 20)))
self.assert_eq(
psdf.sort_values([("X", 20), ("X", 10)]), pdf.sort_values([("X", 20), ("X", 10)])
)
self.assertRaisesRegex(
ValueError,
"For a multi-index, the label must be a tuple with elements",
lambda: psdf.sort_values(["X"]),
)
# non-string names
pdf = pd.DataFrame(
{10: [1, 2, 3, 4, 5, None, 7], 20: [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_values(20), pdf.sort_values(20))
self.assert_eq(psdf.sort_values([20, 10]), pdf.sort_values([20, 10]))
def test_sort_index(self):
pdf = pd.DataFrame(
{"A": [2, 1, np.nan], "B": [np.nan, 0, np.nan]}, index=["b", "a", np.nan]
)
psdf = ps.from_pandas(pdf)
# Assert invalid parameters
self.assertRaises(NotImplementedError, lambda: psdf.sort_index(axis=1))
self.assertRaises(NotImplementedError, lambda: psdf.sort_index(kind="mergesort"))
self.assertRaises(ValueError, lambda: psdf.sort_index(na_position="invalid"))
# Assert default behavior without parameters
self.assert_eq(psdf.sort_index(), pdf.sort_index())
# Assert sorting descending
self.assert_eq(psdf.sort_index(ascending=False), pdf.sort_index(ascending=False))
# Assert sorting NA indices first
self.assert_eq(psdf.sort_index(na_position="first"), pdf.sort_index(na_position="first"))
# Assert sorting descending and NA indices first
self.assert_eq(
psdf.sort_index(ascending=False, na_position="first"),
pdf.sort_index(ascending=False, na_position="first"),
)
# Assert sorting inplace
pserA = pdf.A
psserA = psdf.A
self.assertEqual(psdf.sort_index(inplace=True), pdf.sort_index(inplace=True))
self.assert_eq(psdf, pdf)
self.assert_eq(psserA, pserA)
# Assert multi-indices
pdf = pd.DataFrame(
{"A": range(4), "B": range(4)[::-1]}, index=[["b", "b", "a", "a"], [1, 0, 1, 0]]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psdf.sort_index(level=[1, 0]), pdf.sort_index(level=[1, 0]))
self.assert_eq(psdf.reset_index().sort_index(), pdf.reset_index().sort_index())
# Assert with multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.sort_index(), pdf.sort_index())
def test_swaplevel(self):
# MultiIndex with two levels
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pdf = pd.DataFrame({"x1": ["a", "b", "c", "d"], "x2": ["a", "b", "c", "d"]}, index=pidx)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.swaplevel(), psdf.swaplevel())
self.assert_eq(pdf.swaplevel(0, 1), psdf.swaplevel(0, 1))
self.assert_eq(pdf.swaplevel(1, 1), psdf.swaplevel(1, 1))
self.assert_eq(pdf.swaplevel("number", "color"), psdf.swaplevel("number", "color"))
# MultiIndex with more than two levels
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"], ["l", "m", "s", "xs"]]
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color", "size"))
pdf = pd.DataFrame({"x1": ["a", "b", "c", "d"], "x2": ["a", "b", "c", "d"]}, index=pidx)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.swaplevel(), psdf.swaplevel())
self.assert_eq(pdf.swaplevel(0, 1), psdf.swaplevel(0, 1))
self.assert_eq(pdf.swaplevel(0, 2), psdf.swaplevel(0, 2))
self.assert_eq(pdf.swaplevel(1, 2), psdf.swaplevel(1, 2))
self.assert_eq(pdf.swaplevel(1, 1), psdf.swaplevel(1, 1))
self.assert_eq(pdf.swaplevel(-1, -2), psdf.swaplevel(-1, -2))
self.assert_eq(pdf.swaplevel("number", "color"), psdf.swaplevel("number", "color"))
self.assert_eq(pdf.swaplevel("number", "size"), psdf.swaplevel("number", "size"))
self.assert_eq(pdf.swaplevel("color", "size"), psdf.swaplevel("color", "size"))
self.assert_eq(
pdf.swaplevel("color", "size", axis="index"),
psdf.swaplevel("color", "size", axis="index"),
)
self.assert_eq(
pdf.swaplevel("color", "size", axis=0), psdf.swaplevel("color", "size", axis=0)
)
pdf = pd.DataFrame(
{
"x1": ["a", "b", "c", "d"],
"x2": ["a", "b", "c", "d"],
"x3": ["a", "b", "c", "d"],
"x4": ["a", "b", "c", "d"],
}
)
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color", "size"))
pdf.columns = pidx
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.swaplevel(axis=1), psdf.swaplevel(axis=1))
self.assert_eq(pdf.swaplevel(0, 1, axis=1), psdf.swaplevel(0, 1, axis=1))
self.assert_eq(pdf.swaplevel(0, 2, axis=1), psdf.swaplevel(0, 2, axis=1))
self.assert_eq(pdf.swaplevel(1, 2, axis=1), psdf.swaplevel(1, 2, axis=1))
self.assert_eq(pdf.swaplevel(1, 1, axis=1), psdf.swaplevel(1, 1, axis=1))
self.assert_eq(pdf.swaplevel(-1, -2, axis=1), psdf.swaplevel(-1, -2, axis=1))
self.assert_eq(
pdf.swaplevel("number", "color", axis=1), psdf.swaplevel("number", "color", axis=1)
)
self.assert_eq(
pdf.swaplevel("number", "size", axis=1), psdf.swaplevel("number", "size", axis=1)
)
self.assert_eq(
pdf.swaplevel("color", "size", axis=1), psdf.swaplevel("color", "size", axis=1)
)
self.assert_eq(
pdf.swaplevel("color", "size", axis="columns"),
psdf.swaplevel("color", "size", axis="columns"),
)
# Error conditions
self.assertRaises(AssertionError, lambda: ps.DataFrame([1, 2]).swaplevel())
self.assertRaises(IndexError, lambda: psdf.swaplevel(0, 9, axis=1))
self.assertRaises(KeyError, lambda: psdf.swaplevel("not_number", "color", axis=1))
self.assertRaises(ValueError, lambda: psdf.swaplevel(axis=2))
def test_swapaxes(self):
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=["x", "y", "z"], columns=["a", "b", "c"]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.swapaxes(0, 1), pdf.swapaxes(0, 1))
self.assert_eq(psdf.swapaxes(1, 0), pdf.swapaxes(1, 0))
self.assert_eq(psdf.swapaxes("index", "columns"), pdf.swapaxes("index", "columns"))
self.assert_eq(psdf.swapaxes("columns", "index"), pdf.swapaxes("columns", "index"))
self.assert_eq((psdf + 1).swapaxes(0, 1), (pdf + 1).swapaxes(0, 1))
self.assertRaises(AssertionError, lambda: psdf.swapaxes(0, 1, copy=False))
self.assertRaises(ValueError, lambda: psdf.swapaxes(0, -1))
def test_nlargest(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.nlargest(n=5, columns="a"), pdf.nlargest(5, columns="a"))
self.assert_eq(psdf.nlargest(n=5, columns=["a", "b"]), pdf.nlargest(5, columns=["a", "b"]))
def test_nsmallest(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.nsmallest(n=5, columns="a"), pdf.nsmallest(5, columns="a"))
self.assert_eq(
psdf.nsmallest(n=5, columns=["a", "b"]), pdf.nsmallest(5, columns=["a", "b"])
)
def test_xs(self):
d = {
"num_legs": [4, 4, 2, 2],
"num_wings": [0, 0, 2, 2],
"class": ["mammal", "mammal", "mammal", "bird"],
"animal": ["cat", "dog", "bat", "penguin"],
"locomotion": ["walks", "walks", "flies", "walks"],
}
pdf = pd.DataFrame(data=d)
pdf = pdf.set_index(["class", "animal", "locomotion"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.xs("mammal"), pdf.xs("mammal"))
self.assert_eq(psdf.xs(("mammal",)), pdf.xs(("mammal",)))
self.assert_eq(psdf.xs(("mammal", "dog", "walks")), pdf.xs(("mammal", "dog", "walks")))
self.assert_eq(
ps.concat([psdf, psdf]).xs(("mammal", "dog", "walks")),
pd.concat([pdf, pdf]).xs(("mammal", "dog", "walks")),
)
self.assert_eq(psdf.xs("cat", level=1), pdf.xs("cat", level=1))
self.assert_eq(psdf.xs("flies", level=2), pdf.xs("flies", level=2))
self.assert_eq(psdf.xs("mammal", level=-3), pdf.xs("mammal", level=-3))
msg = 'axis should be either 0 or "index" currently.'
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.xs("num_wings", axis=1)
with self.assertRaises(KeyError):
psdf.xs(("mammal", "dog", "walk"))
msg = r"'Key length \(4\) exceeds index depth \(3\)'"
with self.assertRaisesRegex(KeyError, msg):
psdf.xs(("mammal", "dog", "walks", "foo"))
msg = "'key' should be a scalar value or tuple that contains scalar values"
with self.assertRaisesRegex(TypeError, msg):
psdf.xs(["mammal", "dog", "walks", "foo"])
self.assertRaises(IndexError, lambda: psdf.xs("foo", level=-4))
self.assertRaises(IndexError, lambda: psdf.xs("foo", level=3))
self.assertRaises(KeyError, lambda: psdf.xs(("dog", "walks"), level=1))
# non-string names
pdf = pd.DataFrame(data=d)
pdf = pdf.set_index(["class", "animal", "num_legs", "num_wings"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.xs(("mammal", "dog", 4)), pdf.xs(("mammal", "dog", 4)))
self.assert_eq(psdf.xs(2, level=2), pdf.xs(2, level=2))
self.assert_eq((psdf + "a").xs(("mammal", "dog", 4)), (pdf + "a").xs(("mammal", "dog", 4)))
self.assert_eq((psdf + "a").xs(2, level=2), (pdf + "a").xs(2, level=2))
def test_missing(self):
psdf = self.psdf
missing_functions = inspect.getmembers(_MissingPandasLikeDataFrame, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*DataFrame.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf, name)()
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*DataFrame.*{}.*is deprecated".format(name)
):
getattr(psdf, name)()
missing_properties = inspect.getmembers(
_MissingPandasLikeDataFrame, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*DataFrame.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf, name)
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*DataFrame.*{}.*is deprecated".format(name)
):
getattr(psdf, name)
def test_to_numpy(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 9, 4, 2, 4],
"c": ["one", "three", "six", "seven", "one", "5"],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.to_numpy(), pdf.values)
def test_to_pandas(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.to_pandas(), pdf)
def test_isin(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 9, 4, 2, 4],
"c": ["one", "three", "six", "seven", "one", "5"],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.isin([4, "six"]), pdf.isin([4, "six"]))
# Seems like pandas has a bug when passing `np.array` as parameter
self.assert_eq(psdf.isin(np.array([4, "six"])), pdf.isin([4, "six"]))
self.assert_eq(
psdf.isin({"a": [2, 8], "c": ["three", "one"]}),
pdf.isin({"a": [2, 8], "c": ["three", "one"]}),
)
self.assert_eq(
psdf.isin({"a": np.array([2, 8]), "c": ["three", "one"]}),
pdf.isin({"a": np.array([2, 8]), "c": ["three", "one"]}),
)
msg = "'DataFrame' object has no attribute {'e'}"
with self.assertRaisesRegex(AttributeError, msg):
psdf.isin({"e": [5, 7], "a": [1, 6]})
msg = "DataFrame and Series are not supported"
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.isin(pdf)
msg = "Values should be iterable, Series, DataFrame or dict."
with self.assertRaisesRegex(TypeError, msg):
psdf.isin(1)
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, None, 9, 4, None, 4],
"c": [None, 5, None, 3, 2, 1],
},
)
psdf = ps.from_pandas(pdf)
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(psdf.isin([4, 3, 1, 1, None]), pdf.isin([4, 3, 1, 1, None]))
else:
expected = pd.DataFrame(
{
"a": [True, False, True, True, False, False],
"b": [True, False, False, True, False, True],
"c": [False, False, False, True, False, True],
}
)
self.assert_eq(psdf.isin([4, 3, 1, 1, None]), expected)
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(
psdf.isin({"b": [4, 3, 1, 1, None]}), pdf.isin({"b": [4, 3, 1, 1, None]})
)
else:
expected = pd.DataFrame(
{
"a": [False, False, False, False, False, False],
"b": [True, False, False, True, False, True],
"c": [False, False, False, False, False, False],
}
)
self.assert_eq(psdf.isin({"b": [4, 3, 1, 1, None]}), expected)
def test_merge(self):
left_pdf = pd.DataFrame(
{
"lkey": ["foo", "bar", "baz", "foo", "bar", "l"],
"value": [1, 2, 3, 5, 6, 7],
"x": list("abcdef"),
},
columns=["lkey", "value", "x"],
)
right_pdf = pd.DataFrame(
{
"rkey": ["baz", "foo", "bar", "baz", "foo", "r"],
"value": [4, 5, 6, 7, 8, 9],
"y": list("efghij"),
},
columns=["rkey", "value", "y"],
)
right_ps = pd.Series(list("defghi"), name="x", index=[5, 6, 7, 8, 9, 10])
left_psdf = ps.from_pandas(left_pdf)
right_psdf = ps.from_pandas(right_pdf)
right_psser = ps.from_pandas(right_ps)
def check(op, right_psdf=right_psdf, right_pdf=right_pdf):
k_res = op(left_psdf, right_psdf)
k_res = k_res.to_pandas()
k_res = k_res.sort_values(by=list(k_res.columns))
k_res = k_res.reset_index(drop=True)
p_res = op(left_pdf, right_pdf)
p_res = p_res.sort_values(by=list(p_res.columns))
p_res = p_res.reset_index(drop=True)
self.assert_eq(k_res, p_res)
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on="value"))
check(lambda left, right: left.merge(right, on=("value",)))
check(lambda left, right: left.merge(right, left_on="lkey", right_on="rkey"))
check(lambda left, right: left.set_index("lkey").merge(right.set_index("rkey")))
check(
lambda left, right: left.set_index("lkey").merge(
right, left_index=True, right_on="rkey"
)
)
check(
lambda left, right: left.merge(
right.set_index("rkey"), left_on="lkey", right_index=True
)
)
check(
lambda left, right: left.set_index("lkey").merge(
right.set_index("rkey"), left_index=True, right_index=True
)
)
# MultiIndex
check(
lambda left, right: left.merge(
right, left_on=["lkey", "value"], right_on=["rkey", "value"]
)
)
check(
lambda left, right: left.set_index(["lkey", "value"]).merge(
right, left_index=True, right_on=["rkey", "value"]
)
)
check(
lambda left, right: left.merge(
right.set_index(["rkey", "value"]), left_on=["lkey", "value"], right_index=True
)
)
# TODO: when both left_index=True and right_index=True with multi-index
# check(lambda left, right: left.set_index(['lkey', 'value']).merge(
# right.set_index(['rkey', 'value']), left_index=True, right_index=True))
# join types
for how in ["inner", "left", "right", "outer"]:
check(lambda left, right: left.merge(right, on="value", how=how))
check(lambda left, right: left.merge(right, left_on="lkey", right_on="rkey", how=how))
# suffix
check(
lambda left, right: left.merge(
right, left_on="lkey", right_on="rkey", suffixes=["_left", "_right"]
)
)
# Test Series on the right
check(lambda left, right: left.merge(right), right_psser, right_ps)
check(
lambda left, right: left.merge(right, left_on="x", right_on="x"), right_psser, right_ps
)
check(
lambda left, right: left.set_index("x").merge(right, left_index=True, right_on="x"),
right_psser,
right_ps,
)
# Test join types with Series
for how in ["inner", "left", "right", "outer"]:
check(lambda left, right: left.merge(right, how=how), right_psser, right_ps)
check(
lambda left, right: left.merge(right, left_on="x", right_on="x", how=how),
right_psser,
right_ps,
)
# suffix with Series
check(
lambda left, right: left.merge(
right,
suffixes=["_left", "_right"],
how="outer",
left_index=True,
right_index=True,
),
right_psser,
right_ps,
)
# multi-index columns
left_columns = pd.MultiIndex.from_tuples([(10, "lkey"), (10, "value"), (20, "x")])
left_pdf.columns = left_columns
left_psdf.columns = left_columns
right_columns = pd.MultiIndex.from_tuples([(10, "rkey"), (10, "value"), (30, "y")])
right_pdf.columns = right_columns
right_psdf.columns = right_columns
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on=[(10, "value")]))
check(
lambda left, right: (left.set_index((10, "lkey")).merge(right.set_index((10, "rkey"))))
)
check(
lambda left, right: (
left.set_index((10, "lkey")).merge(
right.set_index((10, "rkey")), left_index=True, right_index=True
)
)
)
# TODO: when both left_index=True and right_index=True with multi-index columns
# check(lambda left, right: left.merge(right,
# left_on=[('a', 'lkey')], right_on=[('a', 'rkey')]))
# check(lambda left, right: (left.set_index(('a', 'lkey'))
# .merge(right, left_index=True, right_on=[('a', 'rkey')])))
# non-string names
left_pdf.columns = [10, 100, 1000]
left_psdf.columns = [10, 100, 1000]
right_pdf.columns = [20, 100, 2000]
right_psdf.columns = [20, 100, 2000]
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on=[100]))
check(lambda left, right: (left.set_index(10).merge(right.set_index(20))))
check(
lambda left, right: (
left.set_index(10).merge(right.set_index(20), left_index=True, right_index=True)
)
)
def test_merge_same_anchor(self):
pdf = pd.DataFrame(
{
"lkey": ["foo", "bar", "baz", "foo", "bar", "l"],
"rkey": ["baz", "foo", "bar", "baz", "foo", "r"],
"value": [1, 1, 3, 5, 6, 7],
"x": list("abcdef"),
"y": list("efghij"),
},
columns=["lkey", "rkey", "value", "x", "y"],
)
psdf = ps.from_pandas(pdf)
left_pdf = pdf[["lkey", "value", "x"]]
right_pdf = pdf[["rkey", "value", "y"]]
left_psdf = psdf[["lkey", "value", "x"]]
right_psdf = psdf[["rkey", "value", "y"]]
def check(op, right_psdf=right_psdf, right_pdf=right_pdf):
k_res = op(left_psdf, right_psdf)
k_res = k_res.to_pandas()
k_res = k_res.sort_values(by=list(k_res.columns))
k_res = k_res.reset_index(drop=True)
p_res = op(left_pdf, right_pdf)
p_res = p_res.sort_values(by=list(p_res.columns))
p_res = p_res.reset_index(drop=True)
self.assert_eq(k_res, p_res)
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on="value"))
check(lambda left, right: left.merge(right, left_on="lkey", right_on="rkey"))
check(lambda left, right: left.set_index("lkey").merge(right.set_index("rkey")))
check(
lambda left, right: left.set_index("lkey").merge(
right, left_index=True, right_on="rkey"
)
)
check(
lambda left, right: left.merge(
right.set_index("rkey"), left_on="lkey", right_index=True
)
)
check(
lambda left, right: left.set_index("lkey").merge(
right.set_index("rkey"), left_index=True, right_index=True
)
)
def test_merge_retains_indices(self):
left_pdf = pd.DataFrame({"A": [0, 1]})
right_pdf = pd.DataFrame({"B": [1, 2]}, index=[1, 2])
left_psdf = ps.from_pandas(left_pdf)
right_psdf = ps.from_pandas(right_pdf)
self.assert_eq(
left_psdf.merge(right_psdf, left_index=True, right_index=True),
left_pdf.merge(right_pdf, left_index=True, right_index=True),
)
self.assert_eq(
left_psdf.merge(right_psdf, left_on="A", right_index=True),
left_pdf.merge(right_pdf, left_on="A", right_index=True),
)
self.assert_eq(
left_psdf.merge(right_psdf, left_index=True, right_on="B"),
left_pdf.merge(right_pdf, left_index=True, right_on="B"),
)
self.assert_eq(
left_psdf.merge(right_psdf, left_on="A", right_on="B"),
left_pdf.merge(right_pdf, left_on="A", right_on="B"),
)
def test_merge_how_parameter(self):
left_pdf = pd.DataFrame({"A": [1, 2]})
right_pdf = pd.DataFrame({"B": ["x", "y"]}, index=[1, 2])
left_psdf = ps.from_pandas(left_pdf)
right_psdf = ps.from_pandas(right_pdf)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True)
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True)
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True, how="left")
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="left")
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True, how="right")
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="right")
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True, how="outer")
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="outer")
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
def test_merge_raises(self):
left = ps.DataFrame(
{"value": [1, 2, 3, 5, 6], "x": list("abcde")},
columns=["value", "x"],
index=["foo", "bar", "baz", "foo", "bar"],
)
right = ps.DataFrame(
{"value": [4, 5, 6, 7, 8], "y": list("fghij")},
columns=["value", "y"],
index=["baz", "foo", "bar", "baz", "foo"],
)
with self.assertRaisesRegex(ValueError, "No common columns to perform merge on"):
left[["x"]].merge(right[["y"]])
with self.assertRaisesRegex(ValueError, "not a combination of both"):
left.merge(right, on="value", left_on="x")
with self.assertRaisesRegex(ValueError, "Must pass right_on or right_index=True"):
left.merge(right, left_on="x")
with self.assertRaisesRegex(ValueError, "Must pass right_on or right_index=True"):
left.merge(right, left_index=True)
with self.assertRaisesRegex(ValueError, "Must pass left_on or left_index=True"):
left.merge(right, right_on="y")
with self.assertRaisesRegex(ValueError, "Must pass left_on or left_index=True"):
left.merge(right, right_index=True)
with self.assertRaisesRegex(
ValueError, "len\\(left_keys\\) must equal len\\(right_keys\\)"
):
left.merge(right, left_on="value", right_on=["value", "y"])
with self.assertRaisesRegex(
ValueError, "len\\(left_keys\\) must equal len\\(right_keys\\)"
):
left.merge(right, left_on=["value", "x"], right_on="value")
with self.assertRaisesRegex(ValueError, "['inner', 'left', 'right', 'full', 'outer']"):
left.merge(right, left_index=True, right_index=True, how="foo")
with self.assertRaisesRegex(KeyError, "id"):
left.merge(right, on="id")
def test_append(self):
pdf = pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"))
psdf = ps.from_pandas(pdf)
other_pdf = pd.DataFrame([[3, 4], [5, 6]], columns=list("BC"), index=[2, 3])
other_psdf = ps.from_pandas(other_pdf)
self.assert_eq(psdf.append(psdf), pdf.append(pdf))
self.assert_eq(psdf.append(psdf, ignore_index=True), pdf.append(pdf, ignore_index=True))
# Assert DataFrames with non-matching columns
self.assert_eq(psdf.append(other_psdf), pdf.append(other_pdf))
# Assert appending a Series fails
msg = "DataFrames.append() does not support appending Series to DataFrames"
with self.assertRaises(TypeError, msg=msg):
psdf.append(psdf["A"])
# Assert using the sort parameter raises an exception
msg = "The 'sort' parameter is currently not supported"
with self.assertRaises(NotImplementedError, msg=msg):
psdf.append(psdf, sort=True)
# Assert using 'verify_integrity' only raises an exception for overlapping indices
self.assert_eq(
psdf.append(other_psdf, verify_integrity=True),
pdf.append(other_pdf, verify_integrity=True),
)
msg = "Indices have overlapping values"
with self.assertRaises(ValueError, msg=msg):
psdf.append(psdf, verify_integrity=True)
# Skip integrity verification when ignore_index=True
self.assert_eq(
psdf.append(psdf, ignore_index=True, verify_integrity=True),
pdf.append(pdf, ignore_index=True, verify_integrity=True),
)
# Assert appending multi-index DataFrames
multi_index_pdf = pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[[2, 3], [4, 5]])
multi_index_psdf = ps.from_pandas(multi_index_pdf)
other_multi_index_pdf = pd.DataFrame(
[[5, 6], [7, 8]], columns=list("AB"), index=[[2, 3], [6, 7]]
)
other_multi_index_psdf = ps.from_pandas(other_multi_index_pdf)
self.assert_eq(
multi_index_psdf.append(multi_index_psdf), multi_index_pdf.append(multi_index_pdf)
)
# Assert DataFrames with non-matching columns
self.assert_eq(
multi_index_psdf.append(other_multi_index_psdf),
multi_index_pdf.append(other_multi_index_pdf),
)
# Assert using 'verify_integrity' only raises an exception for overlapping indices
self.assert_eq(
multi_index_psdf.append(other_multi_index_psdf, verify_integrity=True),
multi_index_pdf.append(other_multi_index_pdf, verify_integrity=True),
)
with self.assertRaises(ValueError, msg=msg):
multi_index_psdf.append(multi_index_psdf, verify_integrity=True)
# Skip integrity verification when ignore_index=True
self.assert_eq(
multi_index_psdf.append(multi_index_psdf, ignore_index=True, verify_integrity=True),
multi_index_pdf.append(multi_index_pdf, ignore_index=True, verify_integrity=True),
)
# Assert trying to append DataFrames with different index levels
msg = "Both DataFrames have to have the same number of index levels"
with self.assertRaises(ValueError, msg=msg):
psdf.append(multi_index_psdf)
# Skip index level check when ignore_index=True
self.assert_eq(
psdf.append(multi_index_psdf, ignore_index=True),
pdf.append(multi_index_pdf, ignore_index=True),
)
columns = pd.MultiIndex.from_tuples([("A", "X"), ("A", "Y")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.append(psdf), pdf.append(pdf))
def test_clip(self):
pdf = pd.DataFrame(
{"A": [0, 2, 4], "B": [4, 2, 0], "X": [-1, 10, 0]}, index=np.random.rand(3)
)
psdf = ps.from_pandas(pdf)
# Assert list-like values are not accepted for 'lower' and 'upper'
msg = "List-like value are not supported for 'lower' and 'upper' at the moment"
with self.assertRaises(TypeError, msg=msg):
psdf.clip(lower=[1])
with self.assertRaises(TypeError, msg=msg):
psdf.clip(upper=[1])
# Assert no lower or upper
self.assert_eq(psdf.clip(), pdf.clip())
# Assert lower only
self.assert_eq(psdf.clip(1), pdf.clip(1))
# Assert upper only
self.assert_eq(psdf.clip(upper=3), pdf.clip(upper=3))
# Assert lower and upper
self.assert_eq(psdf.clip(1, 3), pdf.clip(1, 3))
pdf["clip"] = pdf.A.clip(lower=1, upper=3)
psdf["clip"] = psdf.A.clip(lower=1, upper=3)
self.assert_eq(psdf, pdf)
# Assert behavior on string values
str_psdf = ps.DataFrame({"A": ["a", "b", "c"]}, index=np.random.rand(3))
self.assert_eq(str_psdf.clip(1, 3), str_psdf)
def test_binary_operators(self):
pdf = pd.DataFrame(
{"A": [0, 2, 4], "B": [4, 2, 0], "X": [-1, 10, 0]}, index=np.random.rand(3)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf + psdf.copy(), pdf + pdf.copy())
self.assert_eq(psdf + psdf.loc[:, ["A", "B"]], pdf + pdf.loc[:, ["A", "B"]])
self.assert_eq(psdf.loc[:, ["A", "B"]] + psdf, pdf.loc[:, ["A", "B"]] + pdf)
self.assertRaisesRegex(
ValueError,
"it comes from a different dataframe",
lambda: ps.range(10).add(ps.range(10)),
)
self.assertRaisesRegex(
TypeError,
"add with a sequence is currently not supported",
lambda: ps.range(10).add(ps.range(10).id),
)
psdf_other = psdf.copy()
psdf_other.columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X"), ("C", "C")])
self.assertRaisesRegex(
ValueError,
"cannot join with no overlapping index names",
lambda: psdf.add(psdf_other),
)
def test_binary_operator_add(self):
# Positive
pdf = pd.DataFrame({"a": ["x"], "b": ["y"], "c": [1], "d": [2]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] + psdf["b"], pdf["a"] + pdf["b"])
self.assert_eq(psdf["c"] + psdf["d"], pdf["c"] + pdf["d"])
# Negative
ks_err_msg = "Addition can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] + psdf["c"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["c"] + psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["c"] + "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" + psdf["c"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 + psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] + 1)
def test_binary_operator_sub(self):
# Positive
pdf = pd.DataFrame({"a": [2], "b": [1]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] - psdf["b"], pdf["a"] - pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "Subtraction can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] - psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] - "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" - psdf["b"])
ks_err_msg = "Subtraction can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] - psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 - psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] - 1)
psdf = ps.DataFrame({"a": ["x"], "b": ["y"]})
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] - psdf["b"])
def test_binary_operator_truediv(self):
# Positive
pdf = pd.DataFrame({"a": [3], "b": [2]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] / psdf["b"], pdf["a"] / pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "True division can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] / psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] / "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" / psdf["b"])
ks_err_msg = "True division can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] / psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 / psdf["a"])
def test_binary_operator_floordiv(self):
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "Floor division can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] // psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 // psdf["a"])
ks_err_msg = "Floor division can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] // psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] // "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" // psdf["b"])
def test_binary_operator_mod(self):
# Positive
pdf = pd.DataFrame({"a": [3], "b": [2]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] % psdf["b"], pdf["a"] % pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "Modulo can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] % psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] % "literal")
ks_err_msg = "Modulo can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] % psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 % psdf["a"])
def test_binary_operator_multiply(self):
# Positive
pdf = pd.DataFrame({"a": ["x", "y"], "b": [1, 2], "c": [3, 4]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["b"] * psdf["c"], pdf["b"] * pdf["c"])
self.assert_eq(psdf["c"] * psdf["b"], pdf["c"] * pdf["b"])
self.assert_eq(psdf["a"] * psdf["b"], pdf["a"] * pdf["b"])
self.assert_eq(psdf["b"] * psdf["a"], pdf["b"] * pdf["a"])
self.assert_eq(psdf["a"] * 2, pdf["a"] * 2)
self.assert_eq(psdf["b"] * 2, pdf["b"] * 2)
self.assert_eq(2 * psdf["a"], 2 * pdf["a"])
self.assert_eq(2 * psdf["b"], 2 * pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [2]})
ks_err_msg = "Multiplication can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] * "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" * psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] * "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] * psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] * 0.1)
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 0.1 * psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" * psdf["a"])
def test_sample(self):
pdf = pd.DataFrame({"A": [0, 2, 4]})
psdf = ps.from_pandas(pdf)
# Make sure the tests run, but we can't check the result because they are non-deterministic.
psdf.sample(frac=0.1)
psdf.sample(frac=0.2, replace=True)
psdf.sample(frac=0.2, random_state=5)
psdf["A"].sample(frac=0.2)
psdf["A"].sample(frac=0.2, replace=True)
psdf["A"].sample(frac=0.2, random_state=5)
with self.assertRaises(ValueError):
psdf.sample()
with self.assertRaises(NotImplementedError):
psdf.sample(n=1)
def test_add_prefix(self):
pdf = pd.DataFrame({"A": [1, 2, 3, 4], "B": [3, 4, 5, 6]}, index=np.random.rand(4))
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.add_prefix("col_"), psdf.add_prefix("col_"))
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.add_prefix("col_"), psdf.add_prefix("col_"))
def test_add_suffix(self):
pdf = pd.DataFrame({"A": [1, 2, 3, 4], "B": [3, 4, 5, 6]}, index=np.random.rand(4))
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.add_suffix("first_series"), psdf.add_suffix("first_series"))
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.add_suffix("first_series"), psdf.add_suffix("first_series"))
def test_join(self):
# check basic function
pdf1 = pd.DataFrame(
{"key": ["K0", "K1", "K2", "K3"], "A": ["A0", "A1", "A2", "A3"]}, columns=["key", "A"]
)
pdf2 = pd.DataFrame(
{"key": ["K0", "K1", "K2"], "B": ["B0", "B1", "B2"]}, columns=["key", "B"]
)
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
join_pdf = pdf1.join(pdf2, lsuffix="_left", rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2, lsuffix="_left", rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf, join_psdf)
# join with duplicated columns in Series
with self.assertRaisesRegex(ValueError, "columns overlap but no suffix specified"):
ks1 = ps.Series(["A1", "A5"], index=[1, 2], name="A")
psdf1.join(ks1, how="outer")
# join with duplicated columns in DataFrame
with self.assertRaisesRegex(ValueError, "columns overlap but no suffix specified"):
psdf1.join(psdf2, how="outer")
# check `on` parameter
join_pdf = pdf1.join(pdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
join_pdf = pdf1.set_index("key").join(
pdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right"
)
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.set_index("key").join(
psdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right"
)
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
# multi-index columns
columns1 = pd.MultiIndex.from_tuples([("x", "key"), ("Y", "A")])
columns2 = pd.MultiIndex.from_tuples([("x", "key"), ("Y", "B")])
pdf1.columns = columns1
pdf2.columns = columns2
psdf1.columns = columns1
psdf2.columns = columns2
join_pdf = pdf1.join(pdf2, lsuffix="_left", rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2, lsuffix="_left", rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf, join_psdf)
# check `on` parameter
join_pdf = pdf1.join(
pdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(
psdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
join_pdf = pdf1.set_index(("x", "key")).join(
pdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.set_index(("x", "key")).join(
psdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
# multi-index
midx1 = pd.MultiIndex.from_tuples(
[("w", "a"), ("x", "b"), ("y", "c"), ("z", "d")], names=["index1", "index2"]
)
midx2 = pd.MultiIndex.from_tuples(
[("w", "a"), ("x", "b"), ("y", "c")], names=["index1", "index2"]
)
pdf1.index = midx1
pdf2.index = midx2
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
join_pdf = pdf1.join(pdf2, on=["index1", "index2"], rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2, on=["index1", "index2"], rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf, join_psdf)
with self.assertRaisesRegex(
ValueError, r'len\(left_on\) must equal the number of levels in the index of "right"'
):
psdf1.join(psdf2, on=["index1"], rsuffix="_right")
def test_replace(self):
pdf = pd.DataFrame(
{
"name": ["Ironman", "Captain America", "Thor", "Hulk"],
"weapon": ["Mark-45", "Shield", "Mjolnir", "Smash"],
},
index=np.random.rand(4),
)
psdf = ps.from_pandas(pdf)
with self.assertRaisesRegex(
NotImplementedError, "replace currently works only for method='pad"
):
psdf.replace(method="bfill")
with self.assertRaisesRegex(
NotImplementedError, "replace currently works only when limit=None"
):
psdf.replace(limit=10)
with self.assertRaisesRegex(
NotImplementedError, "replace currently doesn't supports regex"
):
psdf.replace(regex="")
with self.assertRaisesRegex(ValueError, "Length of to_replace and value must be same"):
psdf.replace(to_replace=["Ironman"], value=["Spiderman", "Doctor Strange"])
with self.assertRaisesRegex(TypeError, "Unsupported type function"):
psdf.replace("Ironman", lambda x: "Spiderman")
with self.assertRaisesRegex(TypeError, "Unsupported type function"):
psdf.replace(lambda x: "Ironman", "Spiderman")
self.assert_eq(psdf.replace("Ironman", "Spiderman"), pdf.replace("Ironman", "Spiderman"))
self.assert_eq(
psdf.replace(["Ironman", "Captain America"], ["Rescue", "Hawkeye"]),
pdf.replace(["Ironman", "Captain America"], ["Rescue", "Hawkeye"]),
)
self.assert_eq(
psdf.replace(("Ironman", "Captain America"), ("Rescue", "Hawkeye")),
pdf.replace(("Ironman", "Captain America"), ("Rescue", "Hawkeye")),
)
# inplace
pser = pdf.name
psser = psdf.name
pdf.replace("Ironman", "Spiderman", inplace=True)
psdf.replace("Ironman", "Spiderman", inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
pdf = pd.DataFrame(
{"A": [0, 1, 2, 3, np.nan], "B": [5, 6, 7, 8, np.nan], "C": ["a", "b", "c", "d", None]},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.replace([0, 1, 2, 3, 5, 6], 4), pdf.replace([0, 1, 2, 3, 5, 6], 4))
self.assert_eq(
psdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
pdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
)
self.assert_eq(psdf.replace({0: 10, 1: 100, 7: 200}), pdf.replace({0: 10, 1: 100, 7: 200}))
self.assert_eq(
psdf.replace({"A": [0, np.nan], "B": [5, np.nan]}, 100),
pdf.replace({"A": [0, np.nan], "B": [5, np.nan]}, 100),
)
self.assert_eq(
psdf.replace({"A": {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({"A": {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(
psdf.replace({"X": {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({"X": {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(psdf.replace({"C": ["a", None]}, "e"), pdf.replace({"C": ["a", None]}, "e"))
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.replace([0, 1, 2, 3, 5, 6], 4), pdf.replace([0, 1, 2, 3, 5, 6], 4))
self.assert_eq(
psdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
pdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
)
self.assert_eq(psdf.replace({0: 10, 1: 100, 7: 200}), pdf.replace({0: 10, 1: 100, 7: 200}))
self.assert_eq(
psdf.replace({("X", "A"): [0, np.nan], ("X", "B"): 5}, 100),
pdf.replace({("X", "A"): [0, np.nan], ("X", "B"): 5}, 100),
)
self.assert_eq(
psdf.replace({("X", "A"): {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({("X", "A"): {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(
psdf.replace({("X", "B"): {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({("X", "B"): {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(
psdf.replace({("Y", "C"): ["a", None]}, "e"),
pdf.replace({("Y", "C"): ["a", None]}, "e"),
)
def test_update(self):
# check base function
def get_data(left_columns=None, right_columns=None):
left_pdf = pd.DataFrame(
{"A": ["1", "2", "3", "4"], "B": ["100", "200", np.nan, np.nan]}, columns=["A", "B"]
)
right_pdf = pd.DataFrame(
{"B": ["x", np.nan, "y", np.nan], "C": ["100", "200", "300", "400"]},
columns=["B", "C"],
)
left_psdf = ps.DataFrame(
{"A": ["1", "2", "3", "4"], "B": ["100", "200", None, None]}, columns=["A", "B"]
)
right_psdf = ps.DataFrame(
{"B": ["x", None, "y", None], "C": ["100", "200", "300", "400"]}, columns=["B", "C"]
)
if left_columns is not None:
left_pdf.columns = left_columns
left_psdf.columns = left_columns
if right_columns is not None:
right_pdf.columns = right_columns
right_psdf.columns = right_columns
return left_psdf, left_pdf, right_psdf, right_pdf
left_psdf, left_pdf, right_psdf, right_pdf = get_data()
pser = left_pdf.B
psser = left_psdf.B
left_pdf.update(right_pdf)
left_psdf.update(right_psdf)
self.assert_eq(left_pdf.sort_values(by=["A", "B"]), left_psdf.sort_values(by=["A", "B"]))
self.assert_eq(psser.sort_index(), pser.sort_index())
left_psdf, left_pdf, right_psdf, right_pdf = get_data()
left_pdf.update(right_pdf, overwrite=False)
left_psdf.update(right_psdf, overwrite=False)
self.assert_eq(left_pdf.sort_values(by=["A", "B"]), left_psdf.sort_values(by=["A", "B"]))
with self.assertRaises(NotImplementedError):
left_psdf.update(right_psdf, join="right")
# multi-index columns
left_columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
right_columns = pd.MultiIndex.from_tuples([("X", "B"), ("Y", "C")])
left_psdf, left_pdf, right_psdf, right_pdf = get_data(
left_columns=left_columns, right_columns=right_columns
)
left_pdf.update(right_pdf)
left_psdf.update(right_psdf)
self.assert_eq(
left_pdf.sort_values(by=[("X", "A"), ("X", "B")]),
left_psdf.sort_values(by=[("X", "A"), ("X", "B")]),
)
left_psdf, left_pdf, right_psdf, right_pdf = get_data(
left_columns=left_columns, right_columns=right_columns
)
left_pdf.update(right_pdf, overwrite=False)
left_psdf.update(right_psdf, overwrite=False)
self.assert_eq(
left_pdf.sort_values(by=[("X", "A"), ("X", "B")]),
left_psdf.sort_values(by=[("X", "A"), ("X", "B")]),
)
right_columns = pd.MultiIndex.from_tuples([("Y", "B"), ("Y", "C")])
left_psdf, left_pdf, right_psdf, right_pdf = get_data(
left_columns=left_columns, right_columns=right_columns
)
left_pdf.update(right_pdf)
left_psdf.update(right_psdf)
self.assert_eq(
left_pdf.sort_values(by=[("X", "A"), ("X", "B")]),
left_psdf.sort_values(by=[("X", "A"), ("X", "B")]),
)
def test_pivot_table_dtypes(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 2, 4, 2, 4],
"e": [1, 2, 2, 4, 2, 4],
"c": [1, 2, 9, 4, 7, 4],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
# Skip columns comparison by reset_index
res_df = psdf.pivot_table(
index=["c"], columns="a", values=["b"], aggfunc={"b": "mean"}
).dtypes.reset_index(drop=True)
exp_df = pdf.pivot_table(
index=["c"], columns="a", values=["b"], aggfunc={"b": "mean"}
).dtypes.reset_index(drop=True)
self.assert_eq(res_df, exp_df)
# Results don't have the same column's name
# Todo: self.assert_eq(psdf.pivot_table(columns="a", values="b").dtypes,
# pdf.pivot_table(columns="a", values="b").dtypes)
# Todo: self.assert_eq(psdf.pivot_table(index=['c'], columns="a", values="b").dtypes,
# pdf.pivot_table(index=['c'], columns="a", values="b").dtypes)
# Todo: self.assert_eq(psdf.pivot_table(index=['e', 'c'], columns="a", values="b").dtypes,
# pdf.pivot_table(index=['e', 'c'], columns="a", values="b").dtypes)
# Todo: self.assert_eq(psdf.pivot_table(index=['e', 'c'],
# columns="a", values="b", fill_value=999).dtypes, pdf.pivot_table(index=['e', 'c'],
# columns="a", values="b", fill_value=999).dtypes)
def test_pivot_table(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 2, 4, 2, 4],
"e": [10, 20, 20, 40, 20, 40],
"c": [1, 2, 9, 4, 7, 4],
"d": [-1, -2, -3, -4, -5, -6],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
# Checking if both DataFrames have the same results
self.assert_eq(
psdf.pivot_table(columns="a", values="b").sort_index(),
pdf.pivot_table(columns="a", values="b").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["c"], columns="a", values="b").sort_index(),
pdf.pivot_table(index=["c"], columns="a", values="b").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["c"], columns="a", values="b", aggfunc="sum").sort_index(),
pdf.pivot_table(index=["c"], columns="a", values="b", aggfunc="sum").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["c"], columns="a", values=["b"], aggfunc="sum").sort_index(),
pdf.pivot_table(index=["c"], columns="a", values=["b"], aggfunc="sum").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc="sum"
).sort_index(),
pdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc="sum"
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["c"], columns="a", values=["b", "e", "d"], aggfunc="sum"
).sort_index(),
pdf.pivot_table(
index=["c"], columns="a", values=["b", "e", "d"], aggfunc="sum"
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"}
).sort_index(),
pdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"}
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["e", "c"], columns="a", values="b").sort_index(),
pdf.pivot_table(index=["e", "c"], columns="a", values="b").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["e", "c"], columns="a", values="b", fill_value=999
).sort_index(),
pdf.pivot_table(index=["e", "c"], columns="a", values="b", fill_value=999).sort_index(),
almost=True,
)
# multi-index columns
columns = pd.MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "e"), ("z", "c"), ("w", "d")]
)
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.pivot_table(columns=("x", "a"), values=("x", "b")).sort_index(),
pdf.pivot_table(columns=[("x", "a")], values=[("x", "b")]).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")], columns=("x", "a"), values=[("x", "b")]
).sort_index(),
pdf.pivot_table(
index=[("z", "c")], columns=[("x", "a")], values=[("x", "b")]
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")], columns=("x", "a"), values=[("x", "b"), ("y", "e")]
).sort_index(),
pdf.pivot_table(
index=[("z", "c")], columns=[("x", "a")], values=[("x", "b"), ("y", "e")]
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")], columns=("x", "a"), values=[("x", "b"), ("y", "e"), ("w", "d")]
).sort_index(),
pdf.pivot_table(
index=[("z", "c")],
columns=[("x", "a")],
values=[("x", "b"), ("y", "e"), ("w", "d")],
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")],
columns=("x", "a"),
values=[("x", "b"), ("y", "e")],
aggfunc={("x", "b"): "mean", ("y", "e"): "sum"},
).sort_index(),
pdf.pivot_table(
index=[("z", "c")],
columns=[("x", "a")],
values=[("x", "b"), ("y", "e")],
aggfunc={("x", "b"): "mean", ("y", "e"): "sum"},
).sort_index(),
almost=True,
)
def test_pivot_table_and_index(self):
# https://github.com/databricks/koalas/issues/805
pdf = pd.DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
},
columns=["A", "B", "C", "D", "E"],
index=np.random.rand(9),
)
psdf = ps.from_pandas(pdf)
ptable = pdf.pivot_table(
values="D", index=["A", "B"], columns="C", aggfunc="sum", fill_value=0
).sort_index()
ktable = psdf.pivot_table(
values="D", index=["A", "B"], columns="C", aggfunc="sum", fill_value=0
).sort_index()
self.assert_eq(ktable, ptable)
self.assert_eq(ktable.index, ptable.index)
self.assert_eq(repr(ktable.index), repr(ptable.index))
def test_stack(self):
pdf_single_level_cols = pd.DataFrame(
[[0, 1], [2, 3]], index=["cat", "dog"], columns=["weight", "height"]
)
psdf_single_level_cols = ps.from_pandas(pdf_single_level_cols)
self.assert_eq(
psdf_single_level_cols.stack().sort_index(), pdf_single_level_cols.stack().sort_index()
)
multicol1 = pd.MultiIndex.from_tuples(
[("weight", "kg"), ("weight", "pounds")], names=["x", "y"]
)
pdf_multi_level_cols1 = pd.DataFrame(
[[1, 2], [2, 4]], index=["cat", "dog"], columns=multicol1
)
psdf_multi_level_cols1 = ps.from_pandas(pdf_multi_level_cols1)
self.assert_eq(
psdf_multi_level_cols1.stack().sort_index(), pdf_multi_level_cols1.stack().sort_index()
)
multicol2 = pd.MultiIndex.from_tuples([("weight", "kg"), ("height", "m")])
pdf_multi_level_cols2 = pd.DataFrame(
[[1.0, 2.0], [3.0, 4.0]], index=["cat", "dog"], columns=multicol2
)
psdf_multi_level_cols2 = ps.from_pandas(pdf_multi_level_cols2)
self.assert_eq(
psdf_multi_level_cols2.stack().sort_index(), pdf_multi_level_cols2.stack().sort_index()
)
pdf = pd.DataFrame(
{
("y", "c"): [True, True],
("x", "b"): [False, False],
("x", "c"): [True, False],
("y", "a"): [False, True],
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.stack().sort_index(), pdf.stack().sort_index())
self.assert_eq(psdf[[]].stack().sort_index(), pdf[[]].stack().sort_index(), almost=True)
def test_unstack(self):
pdf = pd.DataFrame(
np.random.randn(3, 3),
index=pd.MultiIndex.from_tuples([("rg1", "x"), ("rg1", "y"), ("rg2", "z")]),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.unstack().sort_index(), pdf.unstack().sort_index(), almost=True)
self.assert_eq(
psdf.unstack().unstack().sort_index(), pdf.unstack().unstack().sort_index(), almost=True
)
def test_pivot_errors(self):
psdf = ps.range(10)
with self.assertRaisesRegex(ValueError, "columns should be set"):
psdf.pivot(index="id")
with self.assertRaisesRegex(ValueError, "values should be set"):
psdf.pivot(index="id", columns="id")
def test_pivot_table_errors(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 2, 4, 2, 4],
"e": [1, 2, 2, 4, 2, 4],
"c": [1, 2, 9, 4, 7, 4],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assertRaises(KeyError, lambda: psdf.pivot_table(index=["c"], columns="a", values=5))
msg = "index should be a None or a list of columns."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(index="c", columns="a", values="b")
msg = "pivot_table doesn't support aggfunc as dict and without index."
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.pivot_table(columns="a", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"})
msg = "columns should be one column name."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(columns=["a"], values=["b"], aggfunc={"b": "mean", "e": "sum"})
msg = "Columns in aggfunc must be the same as values."
with self.assertRaisesRegex(ValueError, msg):
psdf.pivot_table(
index=["e", "c"], columns="a", values="b", aggfunc={"b": "mean", "e": "sum"}
)
msg = "values can't be a list without index."
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.pivot_table(columns="a", values=["b", "e"])
msg = "Wrong columns A."
with self.assertRaisesRegex(ValueError, msg):
psdf.pivot_table(
index=["c"], columns="A", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"}
)
msg = "values should be one column or list of columns."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(columns="a", values=(["b"], ["c"]))
msg = "aggfunc must be a dict mapping from column name to aggregate functions"
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(columns="a", values="b", aggfunc={"a": lambda x: sum(x)})
psdf = ps.DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
},
columns=["A", "B", "C", "D", "E"],
index=np.random.rand(9),
)
msg = "values should be a numeric type."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(
index=["C"], columns="A", values=["B", "E"], aggfunc={"B": "mean", "E": "sum"}
)
msg = "values should be a numeric type."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(index=["C"], columns="A", values="B", aggfunc={"B": "mean"})
def test_transpose(self):
# TODO: what if with random index?
pdf1 = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]}, columns=["col1", "col2"])
psdf1 = ps.from_pandas(pdf1)
pdf2 = pd.DataFrame(
data={"score": [9, 8], "kids": [0, 0], "age": [12, 22]},
columns=["score", "kids", "age"],
)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(pdf1.transpose().sort_index(), psdf1.transpose().sort_index())
self.assert_eq(pdf2.transpose().sort_index(), psdf2.transpose().sort_index())
with option_context("compute.max_rows", None):
self.assert_eq(pdf1.transpose().sort_index(), psdf1.transpose().sort_index())
self.assert_eq(pdf2.transpose().sort_index(), psdf2.transpose().sort_index())
pdf3 = pd.DataFrame(
{
("cg1", "a"): [1, 2, 3],
("cg1", "b"): [4, 5, 6],
("cg2", "c"): [7, 8, 9],
("cg3", "d"): [9, 9, 9],
},
index=pd.MultiIndex.from_tuples([("rg1", "x"), ("rg1", "y"), ("rg2", "z")]),
)
psdf3 = ps.from_pandas(pdf3)
self.assert_eq(pdf3.transpose().sort_index(), psdf3.transpose().sort_index())
with option_context("compute.max_rows", None):
self.assert_eq(pdf3.transpose().sort_index(), psdf3.transpose().sort_index())
def _test_cummin(self, pdf, psdf):
self.assert_eq(pdf.cummin(), psdf.cummin())
self.assert_eq(pdf.cummin(skipna=False), psdf.cummin(skipna=False))
self.assert_eq(pdf.cummin().sum(), psdf.cummin().sum())
def test_cummin(self):
pdf = pd.DataFrame(
[[2.0, 1.0], [5, None], [1.0, 0.0], [2.0, 4.0], [4.0, 9.0]],
columns=list("AB"),
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self._test_cummin(pdf, psdf)
def test_cummin_multiindex_columns(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "C", "B"], columns=arrays)
pdf.at["C", ("A", "two")] = None
psdf = ps.from_pandas(pdf)
self._test_cummin(pdf, psdf)
def _test_cummax(self, pdf, psdf):
self.assert_eq(pdf.cummax(), psdf.cummax())
self.assert_eq(pdf.cummax(skipna=False), psdf.cummax(skipna=False))
self.assert_eq(pdf.cummax().sum(), psdf.cummax().sum())
def test_cummax(self):
pdf = pd.DataFrame(
[[2.0, 1.0], [5, None], [1.0, 0.0], [2.0, 4.0], [4.0, 9.0]],
columns=list("AB"),
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self._test_cummax(pdf, psdf)
def test_cummax_multiindex_columns(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "C", "B"], columns=arrays)
pdf.at["C", ("A", "two")] = None
psdf = ps.from_pandas(pdf)
self._test_cummax(pdf, psdf)
def _test_cumsum(self, pdf, psdf):
self.assert_eq(pdf.cumsum(), psdf.cumsum())
self.assert_eq(pdf.cumsum(skipna=False), psdf.cumsum(skipna=False))
self.assert_eq(pdf.cumsum().sum(), psdf.cumsum().sum())
def test_cumsum(self):
pdf = pd.DataFrame(
[[2.0, 1.0], [5, None], [1.0, 0.0], [2.0, 4.0], [4.0, 9.0]],
columns=list("AB"),
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self._test_cumsum(pdf, psdf)
def test_cumsum_multiindex_columns(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "C", "B"], columns=arrays)
pdf.at["C", ("A", "two")] = None
psdf = ps.from_pandas(pdf)
self._test_cumsum(pdf, psdf)
def _test_cumprod(self, pdf, psdf):
self.assert_eq(pdf.cumprod(), psdf.cumprod(), almost=True)
self.assert_eq(pdf.cumprod(skipna=False), psdf.cumprod(skipna=False), almost=True)
self.assert_eq(pdf.cumprod().sum(), psdf.cumprod().sum(), almost=True)
def test_cumprod(self):
pdf = pd.DataFrame(
[[2.0, 1.0, 1], [5, None, 2], [1.0, -1.0, -3], [2.0, 0, 4], [4.0, 9.0, 5]],
columns=list("ABC"),
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self._test_cumprod(pdf, psdf)
def test_cumprod_multiindex_columns(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.rand(3, 4), index=["A", "C", "B"], columns=arrays)
pdf.at["C", ("A", "two")] = None
psdf = ps.from_pandas(pdf)
self._test_cumprod(pdf, psdf)
def test_drop_duplicates(self):
pdf = pd.DataFrame(
{"a": [1, 2, 2, 2, 3], "b": ["a", "a", "a", "c", "d"]}, index=np.random.rand(5)
)
psdf = ps.from_pandas(pdf)
# inplace is False
for keep in ["first", "last", False]:
with self.subTest(keep=keep):
self.assert_eq(
pdf.drop_duplicates(keep=keep).sort_index(),
psdf.drop_duplicates(keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates("a", keep=keep).sort_index(),
psdf.drop_duplicates("a", keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates(["a", "b"], keep=keep).sort_index(),
psdf.drop_duplicates(["a", "b"], keep=keep).sort_index(),
)
self.assert_eq(
pdf.set_index("a", append=True).drop_duplicates(keep=keep).sort_index(),
psdf.set_index("a", append=True).drop_duplicates(keep=keep).sort_index(),
)
self.assert_eq(
pdf.set_index("a", append=True).drop_duplicates("b", keep=keep).sort_index(),
psdf.set_index("a", append=True).drop_duplicates("b", keep=keep).sort_index(),
)
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf.columns = columns
psdf.columns = columns
# inplace is False
for keep in ["first", "last", False]:
with self.subTest("multi-index columns", keep=keep):
self.assert_eq(
pdf.drop_duplicates(keep=keep).sort_index(),
psdf.drop_duplicates(keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates(("x", "a"), keep=keep).sort_index(),
psdf.drop_duplicates(("x", "a"), keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates([("x", "a"), ("y", "b")], keep=keep).sort_index(),
psdf.drop_duplicates([("x", "a"), ("y", "b")], keep=keep).sort_index(),
)
# inplace is True
subset_list = [None, "a", ["a", "b"]]
for subset in subset_list:
pdf = pd.DataFrame(
{"a": [1, 2, 2, 2, 3], "b": ["a", "a", "a", "c", "d"]}, index=np.random.rand(5)
)
psdf = ps.from_pandas(pdf)
pser = pdf.a
psser = psdf.a
pdf.drop_duplicates(subset=subset, inplace=True)
psdf.drop_duplicates(subset=subset, inplace=True)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser.sort_index(), pser.sort_index())
# multi-index columns, inplace is True
subset_list = [None, ("x", "a"), [("x", "a"), ("y", "b")]]
for subset in subset_list:
pdf = pd.DataFrame(
{"a": [1, 2, 2, 2, 3], "b": ["a", "a", "a", "c", "d"]}, index=np.random.rand(5)
)
psdf = ps.from_pandas(pdf)
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf.columns = columns
psdf.columns = columns
pser = pdf[("x", "a")]
psser = psdf[("x", "a")]
pdf.drop_duplicates(subset=subset, inplace=True)
psdf.drop_duplicates(subset=subset, inplace=True)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser.sort_index(), pser.sort_index())
# non-string names
pdf = pd.DataFrame(
{10: [1, 2, 2, 2, 3], 20: ["a", "a", "a", "c", "d"]}, index=np.random.rand(5)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.drop_duplicates(10, keep=keep).sort_index(),
psdf.drop_duplicates(10, keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates([10, 20], keep=keep).sort_index(),
psdf.drop_duplicates([10, 20], keep=keep).sort_index(),
)
def test_reindex(self):
index = pd.Index(["A", "B", "C", "D", "E"])
columns = pd.Index(["numbers"])
pdf = pd.DataFrame([1.0, 2.0, 3.0, 4.0, None], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
columns2 = pd.Index(["numbers", "2", "3"], name="cols2")
self.assert_eq(
pdf.reindex(columns=columns2).sort_index(),
psdf.reindex(columns=columns2).sort_index(),
)
columns = pd.Index(["numbers"], name="cols")
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
pdf.reindex(["A", "B", "C"], columns=["numbers", "2", "3"]).sort_index(),
psdf.reindex(["A", "B", "C"], columns=["numbers", "2", "3"]).sort_index(),
)
self.assert_eq(
pdf.reindex(["A", "B", "C"], index=["numbers", "2", "3"]).sort_index(),
psdf.reindex(["A", "B", "C"], index=["numbers", "2", "3"]).sort_index(),
)
self.assert_eq(
pdf.reindex(index=["A", "B"]).sort_index(), psdf.reindex(index=["A", "B"]).sort_index()
)
self.assert_eq(
pdf.reindex(index=["A", "B", "2", "3"]).sort_index(),
psdf.reindex(index=["A", "B", "2", "3"]).sort_index(),
)
self.assert_eq(
pdf.reindex(index=["A", "E", "2", "3"], fill_value=0).sort_index(),
psdf.reindex(index=["A", "E", "2", "3"], fill_value=0).sort_index(),
)
self.assert_eq(
pdf.reindex(columns=["numbers"]).sort_index(),
psdf.reindex(columns=["numbers"]).sort_index(),
)
self.assert_eq(
pdf.reindex(columns=["numbers"], copy=True).sort_index(),
psdf.reindex(columns=["numbers"], copy=True).sort_index(),
)
# Using float as fill_value to avoid int64/32 clash
self.assert_eq(
pdf.reindex(columns=["numbers", "2", "3"], fill_value=0.0).sort_index(),
psdf.reindex(columns=["numbers", "2", "3"], fill_value=0.0).sort_index(),
)
columns2 = pd.Index(["numbers", "2", "3"])
self.assert_eq(
pdf.reindex(columns=columns2).sort_index(),
psdf.reindex(columns=columns2).sort_index(),
)
columns2 = pd.Index(["numbers", "2", "3"], name="cols2")
self.assert_eq(
pdf.reindex(columns=columns2).sort_index(),
psdf.reindex(columns=columns2).sort_index(),
)
# Reindexing single Index on single Index
pindex2 = pd.Index(["A", "C", "D", "E", "0"], name="index2")
kindex2 = ps.from_pandas(pindex2)
for fill_value in [None, 0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
pindex2 = pd.DataFrame({"index2": ["A", "C", "D", "E", "0"]}).set_index("index2").index
kindex2 = ps.from_pandas(pindex2)
for fill_value in [None, 0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
# Reindexing MultiIndex on single Index
pindex = pd.MultiIndex.from_tuples(
[("A", "B"), ("C", "D"), ("F", "G")], names=["name1", "name2"]
)
kindex = ps.from_pandas(pindex)
self.assert_eq(
pdf.reindex(index=pindex, fill_value=0.0).sort_index(),
psdf.reindex(index=kindex, fill_value=0.0).sort_index(),
)
self.assertRaises(TypeError, lambda: psdf.reindex(columns=["numbers", "2", "3"], axis=1))
self.assertRaises(TypeError, lambda: psdf.reindex(columns=["numbers", "2", "3"], axis=2))
self.assertRaises(TypeError, lambda: psdf.reindex(columns="numbers"))
self.assertRaises(TypeError, lambda: psdf.reindex(index=["A", "B", "C"], axis=1))
self.assertRaises(TypeError, lambda: psdf.reindex(index=123))
# Reindexing MultiIndex on MultiIndex
pdf = pd.DataFrame({"numbers": [1.0, 2.0, None]}, index=pindex)
psdf = ps.from_pandas(pdf)
pindex2 = pd.MultiIndex.from_tuples(
[("A", "G"), ("C", "D"), ("I", "J")], names=["name1", "name2"]
)
kindex2 = ps.from_pandas(pindex2)
for fill_value in [None, 0.0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
pindex2 = (
pd.DataFrame({"index_level_1": ["A", "C", "I"], "index_level_2": ["G", "D", "J"]})
.set_index(["index_level_1", "index_level_2"])
.index
)
kindex2 = ps.from_pandas(pindex2)
for fill_value in [None, 0.0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
columns = pd.MultiIndex.from_tuples([("X", "numbers")], names=["cols1", "cols2"])
pdf.columns = columns
psdf.columns = columns
# Reindexing MultiIndex index on MultiIndex columns and MultiIndex index
for fill_value in [None, 0.0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
index = pd.Index(["A", "B", "C", "D", "E"])
pdf = pd.DataFrame(data=[1.0, 2.0, 3.0, 4.0, None], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
pindex2 = pd.Index(["A", "C", "D", "E", "0"], name="index2")
kindex2 = ps.from_pandas(pindex2)
# Reindexing single Index on MultiIndex columns and single Index
for fill_value in [None, 0.0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
for fill_value in [None, 0.0]:
self.assert_eq(
pdf.reindex(
columns=[("X", "numbers"), ("Y", "2"), ("Y", "3")], fill_value=fill_value
).sort_index(),
psdf.reindex(
columns=[("X", "numbers"), ("Y", "2"), ("Y", "3")], fill_value=fill_value
).sort_index(),
)
columns2 = pd.MultiIndex.from_tuples(
[("X", "numbers"), ("Y", "2"), ("Y", "3")], names=["cols3", "cols4"]
)
self.assert_eq(
pdf.reindex(columns=columns2).sort_index(),
psdf.reindex(columns=columns2).sort_index(),
)
self.assertRaises(TypeError, lambda: psdf.reindex(columns=["X"]))
self.assertRaises(ValueError, lambda: psdf.reindex(columns=[("X",)]))
def test_reindex_like(self):
data = [[1.0, 2.0], [3.0, None], [None, 4.0]]
index = pd.Index(["A", "B", "C"], name="index")
columns = pd.Index(["numbers", "values"], name="cols")
pdf = pd.DataFrame(data=data, index=index, columns=columns)
psdf = ps.from_pandas(pdf)
# Reindexing single Index on single Index
data2 = [[5.0, None], [6.0, 7.0], [8.0, None]]
index2 = pd.Index(["A", "C", "D"], name="index2")
columns2 = pd.Index(["numbers", "F"], name="cols2")
pdf2 = pd.DataFrame(data=data2, index=index2, columns=columns2)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(
pdf.reindex_like(pdf2).sort_index(),
psdf.reindex_like(psdf2).sort_index(),
)
pdf2 = pd.DataFrame({"index_level_1": ["A", "C", "I"]})
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(
pdf.reindex_like(pdf2.set_index(["index_level_1"])).sort_index(),
psdf.reindex_like(psdf2.set_index(["index_level_1"])).sort_index(),
)
# Reindexing MultiIndex on single Index
index2 = pd.MultiIndex.from_tuples(
[("A", "G"), ("C", "D"), ("I", "J")], names=["name3", "name4"]
)
pdf2 = pd.DataFrame(data=data2, index=index2)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(
pdf.reindex_like(pdf2).sort_index(),
psdf.reindex_like(psdf2).sort_index(),
)
self.assertRaises(TypeError, lambda: psdf.reindex_like(index2))
self.assertRaises(AssertionError, lambda: psdf2.reindex_like(psdf))
# Reindexing MultiIndex on MultiIndex
columns2 = pd.MultiIndex.from_tuples(
[("numbers", "third"), ("values", "second")], names=["cols3", "cols4"]
)
pdf2.columns = columns2
psdf2.columns = columns2
columns = pd.MultiIndex.from_tuples(
[("numbers", "first"), ("values", "second")], names=["cols1", "cols2"]
)
index = pd.MultiIndex.from_tuples(
[("A", "B"), ("C", "D"), ("E", "F")], names=["name1", "name2"]
)
pdf = pd.DataFrame(data=data, index=index, columns=columns)
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.reindex_like(pdf2).sort_index(),
psdf.reindex_like(psdf2).sort_index(),
)
def test_melt(self):
pdf = pd.DataFrame(
{"A": [1, 3, 5], "B": [2, 4, 6], "C": [7, 8, 9]}, index=np.random.rand(3)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.melt().sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt().sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars="A").sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt(id_vars="A").sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=["A", "B"]).sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt(id_vars=["A", "B"]).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=("A", "B")).sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt(id_vars=("A", "B")).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=["A"], value_vars=["C"])
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=["A"], value_vars=["C"]).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=["A"], value_vars=["B"], var_name="myVarname", value_name="myValname")
.sort_values(["myVarname", "myValname"])
.reset_index(drop=True),
pdf.melt(
id_vars=["A"], value_vars=["B"], var_name="myVarname", value_name="myValname"
).sort_values(["myVarname", "myValname"]),
)
self.assert_eq(
psdf.melt(value_vars=("A", "B"))
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(value_vars=("A", "B")).sort_values(["variable", "value"]),
)
self.assertRaises(KeyError, lambda: psdf.melt(id_vars="Z"))
self.assertRaises(KeyError, lambda: psdf.melt(value_vars="Z"))
# multi-index columns
TEN = 10.0
TWELVE = 20.0
columns = pd.MultiIndex.from_tuples([(TEN, "A"), (TEN, "B"), (TWELVE, "C")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.melt().sort_values(["variable_0", "variable_1", "value"]).reset_index(drop=True),
pdf.melt().sort_values(["variable_0", "variable_1", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=[(TEN, "A")])
.sort_values(["variable_0", "variable_1", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=[(TEN, "A")])
.sort_values(["variable_0", "variable_1", "value"])
.rename(columns=name_like_string),
)
self.assert_eq(
psdf.melt(id_vars=[(TEN, "A")], value_vars=[(TWELVE, "C")])
.sort_values(["variable_0", "variable_1", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=[(TEN, "A")], value_vars=[(TWELVE, "C")])
.sort_values(["variable_0", "variable_1", "value"])
.rename(columns=name_like_string),
)
self.assert_eq(
psdf.melt(
id_vars=[(TEN, "A")],
value_vars=[(TEN, "B")],
var_name=["myV1", "myV2"],
value_name="myValname",
)
.sort_values(["myV1", "myV2", "myValname"])
.reset_index(drop=True),
pdf.melt(
id_vars=[(TEN, "A")],
value_vars=[(TEN, "B")],
var_name=["myV1", "myV2"],
value_name="myValname",
)
.sort_values(["myV1", "myV2", "myValname"])
.rename(columns=name_like_string),
)
columns.names = ["v0", "v1"]
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.melt().sort_values(["v0", "v1", "value"]).reset_index(drop=True),
pdf.melt().sort_values(["v0", "v1", "value"]),
)
self.assertRaises(ValueError, lambda: psdf.melt(id_vars=(TEN, "A")))
self.assertRaises(ValueError, lambda: psdf.melt(value_vars=(TEN, "A")))
self.assertRaises(KeyError, lambda: psdf.melt(id_vars=[TEN]))
self.assertRaises(KeyError, lambda: psdf.melt(id_vars=[(TWELVE, "A")]))
self.assertRaises(KeyError, lambda: psdf.melt(value_vars=[TWELVE]))
self.assertRaises(KeyError, lambda: psdf.melt(value_vars=[(TWELVE, "A")]))
# non-string names
pdf.columns = [10.0, 20.0, 30.0]
psdf.columns = [10.0, 20.0, 30.0]
self.assert_eq(
psdf.melt().sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt().sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=10.0).sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt(id_vars=10.0).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=[10.0, 20.0])
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=[10.0, 20.0]).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=(10.0, 20.0))
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=(10.0, 20.0)).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=[10.0], value_vars=[30.0])
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=[10.0], value_vars=[30.0]).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(value_vars=(10.0, 20.0))
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(value_vars=(10.0, 20.0)).sort_values(["variable", "value"]),
)
def test_all(self):
pdf = pd.DataFrame(
{
"col1": [False, False, False],
"col2": [True, False, False],
"col3": [0, 0, 1],
"col4": [0, 1, 2],
"col5": [False, False, None],
"col6": [True, False, None],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.all(), pdf.all())
columns = pd.MultiIndex.from_tuples(
[
("a", "col1"),
("a", "col2"),
("a", "col3"),
("b", "col4"),
("b", "col5"),
("c", "col6"),
]
)
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.all(), pdf.all())
columns.names = ["X", "Y"]
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.all(), pdf.all())
with self.assertRaisesRegex(
NotImplementedError, 'axis should be either 0 or "index" currently.'
):
psdf.all(axis=1)
def test_any(self):
pdf = pd.DataFrame(
{
"col1": [False, False, False],
"col2": [True, False, False],
"col3": [0, 0, 1],
"col4": [0, 1, 2],
"col5": [False, False, None],
"col6": [True, False, None],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.any(), pdf.any())
columns = pd.MultiIndex.from_tuples(
[
("a", "col1"),
("a", "col2"),
("a", "col3"),
("b", "col4"),
("b", "col5"),
("c", "col6"),
]
)
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.any(), pdf.any())
columns.names = ["X", "Y"]
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.any(), pdf.any())
with self.assertRaisesRegex(
NotImplementedError, 'axis should be either 0 or "index" currently.'
):
psdf.any(axis=1)
def test_rank(self):
pdf = pd.DataFrame(
data={"col1": [1, 2, 3, 1], "col2": [3, 4, 3, 1]},
columns=["col1", "col2"],
index=np.random.rand(4),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.rank().sort_index(), psdf.rank().sort_index())
self.assert_eq(pdf.rank().sum(), psdf.rank().sum())
self.assert_eq(
pdf.rank(ascending=False).sort_index(), psdf.rank(ascending=False).sort_index()
)
self.assert_eq(pdf.rank(method="min").sort_index(), psdf.rank(method="min").sort_index())
self.assert_eq(pdf.rank(method="max").sort_index(), psdf.rank(method="max").sort_index())
self.assert_eq(
pdf.rank(method="first").sort_index(), psdf.rank(method="first").sort_index()
)
self.assert_eq(
pdf.rank(method="dense").sort_index(), psdf.rank(method="dense").sort_index()
)
msg = "method must be one of 'average', 'min', 'max', 'first', 'dense'"
with self.assertRaisesRegex(ValueError, msg):
psdf.rank(method="nothing")
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "col1"), ("y", "col2")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.rank().sort_index(), psdf.rank().sort_index())
def test_round(self):
pdf = pd.DataFrame(
{
"A": [0.028208, 0.038683, 0.877076],
"B": [0.992815, 0.645646, 0.149370],
"C": [0.173891, 0.577595, 0.491027],
},
columns=["A", "B", "C"],
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
pser = pd.Series([1, 0, 2], index=["A", "B", "C"])
psser = ps.Series([1, 0, 2], index=["A", "B", "C"])
self.assert_eq(pdf.round(2), psdf.round(2))
self.assert_eq(pdf.round({"A": 1, "C": 2}), psdf.round({"A": 1, "C": 2}))
self.assert_eq(pdf.round({"A": 1, "D": 2}), psdf.round({"A": 1, "D": 2}))
self.assert_eq(pdf.round(pser), psdf.round(psser))
msg = "decimals must be an integer, a dict-like or a Series"
with self.assertRaisesRegex(TypeError, msg):
psdf.round(1.5)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
psdf.columns = columns
pser = pd.Series([1, 0, 2], index=columns)
psser = ps.Series([1, 0, 2], index=columns)
self.assert_eq(pdf.round(2), psdf.round(2))
self.assert_eq(
pdf.round({("X", "A"): 1, ("Y", "C"): 2}), psdf.round({("X", "A"): 1, ("Y", "C"): 2})
)
self.assert_eq(pdf.round({("X", "A"): 1, "Y": 2}), psdf.round({("X", "A"): 1, "Y": 2}))
self.assert_eq(pdf.round(pser), psdf.round(psser))
# non-string names
pdf = pd.DataFrame(
{
10: [0.028208, 0.038683, 0.877076],
20: [0.992815, 0.645646, 0.149370],
30: [0.173891, 0.577595, 0.491027],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.round({10: 1, 30: 2}), psdf.round({10: 1, 30: 2}))
def test_shift(self):
pdf = pd.DataFrame(
{
"Col1": [10, 20, 15, 30, 45],
"Col2": [13, 23, 18, 33, 48],
"Col3": [17, 27, 22, 37, 52],
},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.shift(3), psdf.shift(3))
self.assert_eq(pdf.shift().shift(-1), psdf.shift().shift(-1))
self.assert_eq(pdf.shift().sum().astype(int), psdf.shift().sum())
# Need the expected result since pandas 0.23 does not support `fill_value` argument.
pdf1 = pd.DataFrame(
{"Col1": [0, 0, 0, 10, 20], "Col2": [0, 0, 0, 13, 23], "Col3": [0, 0, 0, 17, 27]},
index=pdf.index,
)
self.assert_eq(pdf1, psdf.shift(periods=3, fill_value=0))
msg = "should be an int"
with self.assertRaisesRegex(TypeError, msg):
psdf.shift(1.5)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "Col1"), ("x", "Col2"), ("y", "Col3")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.shift(3), psdf.shift(3))
self.assert_eq(pdf.shift().shift(-1), psdf.shift().shift(-1))
def test_diff(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [1, 1, 2, 3, 5, 8], "c": [1, 4, 9, 16, 25, 36]},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.diff(), psdf.diff())
self.assert_eq(pdf.diff().diff(-1), psdf.diff().diff(-1))
self.assert_eq(pdf.diff().sum().astype(int), psdf.diff().sum())
msg = "should be an int"
with self.assertRaisesRegex(TypeError, msg):
psdf.diff(1.5)
msg = 'axis should be either 0 or "index" currently.'
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.diff(axis=1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "Col1"), ("x", "Col2"), ("y", "Col3")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.diff(), psdf.diff())
def test_duplicated(self):
pdf = pd.DataFrame(
{"a": [1, 1, 2, 3], "b": [1, 1, 1, 4], "c": [1, 1, 1, 5]}, index=np.random.rand(4)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.duplicated().sort_index(), psdf.duplicated().sort_index())
self.assert_eq(
pdf.duplicated(keep="last").sort_index(),
psdf.duplicated(keep="last").sort_index(),
)
self.assert_eq(
pdf.duplicated(keep=False).sort_index(),
psdf.duplicated(keep=False).sort_index(),
)
self.assert_eq(
pdf.duplicated(subset="b").sort_index(),
psdf.duplicated(subset="b").sort_index(),
)
self.assert_eq(
pdf.duplicated(subset=["b"]).sort_index(),
psdf.duplicated(subset=["b"]).sort_index(),
)
with self.assertRaisesRegex(ValueError, "'keep' only supports 'first', 'last' and False"):
psdf.duplicated(keep="false")
with self.assertRaisesRegex(KeyError, "'d'"):
psdf.duplicated(subset=["d"])
pdf.index.name = "x"
psdf.index.name = "x"
self.assert_eq(pdf.duplicated().sort_index(), psdf.duplicated().sort_index())
# multi-index
self.assert_eq(
pdf.set_index("a", append=True).duplicated().sort_index(),
psdf.set_index("a", append=True).duplicated().sort_index(),
)
self.assert_eq(
pdf.set_index("a", append=True).duplicated(keep=False).sort_index(),
psdf.set_index("a", append=True).duplicated(keep=False).sort_index(),
)
self.assert_eq(
pdf.set_index("a", append=True).duplicated(subset=["b"]).sort_index(),
psdf.set_index("a", append=True).duplicated(subset=["b"]).sort_index(),
)
# mutli-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.duplicated().sort_index(), psdf.duplicated().sort_index())
self.assert_eq(
pdf.duplicated(subset=("x", "b")).sort_index(),
psdf.duplicated(subset=("x", "b")).sort_index(),
)
self.assert_eq(
pdf.duplicated(subset=[("x", "b")]).sort_index(),
psdf.duplicated(subset=[("x", "b")]).sort_index(),
)
# non-string names
pdf = pd.DataFrame(
{10: [1, 1, 2, 3], 20: [1, 1, 1, 4], 30: [1, 1, 1, 5]}, index=np.random.rand(4)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.duplicated().sort_index(), psdf.duplicated().sort_index())
self.assert_eq(
pdf.duplicated(subset=10).sort_index(),
psdf.duplicated(subset=10).sort_index(),
)
def test_ffill(self):
idx = np.random.rand(6)
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=idx,
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.ffill(), pdf.ffill())
self.assert_eq(psdf.ffill(limit=1), pdf.ffill(limit=1))
pser = pdf.y
psser = psdf.y
psdf.ffill(inplace=True)
pdf.ffill(inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
self.assert_eq(psser[idx[2]], pser[idx[2]])
def test_bfill(self):
idx = np.random.rand(6)
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=idx,
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.bfill(), pdf.bfill())
self.assert_eq(psdf.bfill(limit=1), pdf.bfill(limit=1))
pser = pdf.x
psser = psdf.x
psdf.bfill(inplace=True)
pdf.bfill(inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
self.assert_eq(psser[idx[0]], pser[idx[0]])
def test_filter(self):
pdf = pd.DataFrame(
{
"aa": ["aa", "bd", "bc", "ab", "ce"],
"ba": [1, 2, 3, 4, 5],
"cb": [1.0, 2.0, 3.0, 4.0, 5.0],
"db": [1.0, np.nan, 3.0, np.nan, 5.0],
}
)
pdf = pdf.set_index("aa")
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.filter(items=["ab", "aa"], axis=0).sort_index(),
pdf.filter(items=["ab", "aa"], axis=0).sort_index(),
)
with option_context("compute.isin_limit", 0):
self.assert_eq(
psdf.filter(items=["ab", "aa"], axis=0).sort_index(),
pdf.filter(items=["ab", "aa"], axis=0).sort_index(),
)
self.assert_eq(
psdf.filter(items=["ba", "db"], axis=1).sort_index(),
pdf.filter(items=["ba", "db"], axis=1).sort_index(),
)
self.assert_eq(psdf.filter(like="b", axis="index"), pdf.filter(like="b", axis="index"))
self.assert_eq(psdf.filter(like="c", axis="columns"), pdf.filter(like="c", axis="columns"))
self.assert_eq(
psdf.filter(regex="b.*", axis="index"), pdf.filter(regex="b.*", axis="index")
)
self.assert_eq(
psdf.filter(regex="b.*", axis="columns"), pdf.filter(regex="b.*", axis="columns")
)
pdf = pdf.set_index("ba", append=True)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.filter(items=[("aa", 1), ("bd", 2)], axis=0).sort_index(),
pdf.filter(items=[("aa", 1), ("bd", 2)], axis=0).sort_index(),
)
with self.assertRaisesRegex(TypeError, "Unsupported type list"):
psdf.filter(items=[["aa", 1], ("bd", 2)], axis=0)
with self.assertRaisesRegex(ValueError, "The item should not be empty."):
psdf.filter(items=[(), ("bd", 2)], axis=0)
self.assert_eq(psdf.filter(like="b", axis=0), pdf.filter(like="b", axis=0))
self.assert_eq(psdf.filter(regex="b.*", axis=0), pdf.filter(regex="b.*", axis=0))
with self.assertRaisesRegex(ValueError, "items should be a list-like object"):
psdf.filter(items="b")
with self.assertRaisesRegex(ValueError, "No axis named"):
psdf.filter(regex="b.*", axis=123)
with self.assertRaisesRegex(TypeError, "Must pass either `items`, `like`"):
psdf.filter()
with self.assertRaisesRegex(TypeError, "mutually exclusive"):
psdf.filter(regex="b.*", like="aaa")
# multi-index columns
pdf = pd.DataFrame(
{
("x", "aa"): ["aa", "ab", "bc", "bd", "ce"],
("x", "ba"): [1, 2, 3, 4, 5],
("y", "cb"): [1.0, 2.0, 3.0, 4.0, 5.0],
("z", "db"): [1.0, np.nan, 3.0, np.nan, 5.0],
}
)
pdf = pdf.set_index(("x", "aa"))
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.filter(items=["ab", "aa"], axis=0).sort_index(),
pdf.filter(items=["ab", "aa"], axis=0).sort_index(),
)
self.assert_eq(
psdf.filter(items=[("x", "ba"), ("z", "db")], axis=1).sort_index(),
pdf.filter(items=[("x", "ba"), ("z", "db")], axis=1).sort_index(),
)
self.assert_eq(psdf.filter(like="b", axis="index"), pdf.filter(like="b", axis="index"))
self.assert_eq(psdf.filter(like="c", axis="columns"), pdf.filter(like="c", axis="columns"))
self.assert_eq(
psdf.filter(regex="b.*", axis="index"), pdf.filter(regex="b.*", axis="index")
)
self.assert_eq(
psdf.filter(regex="b.*", axis="columns"), pdf.filter(regex="b.*", axis="columns")
)
def test_pipe(self):
psdf = ps.DataFrame(
{"category": ["A", "A", "B"], "col1": [1, 2, 3], "col2": [4, 5, 6]},
columns=["category", "col1", "col2"],
)
self.assertRaisesRegex(
ValueError,
"arg is both the pipe target and a keyword argument",
lambda: psdf.pipe((lambda x: x, "arg"), arg="1"),
)
def test_transform(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 100,
"b": [1.0, 1.0, 2.0, 3.0, 5.0, 8.0] * 100,
"c": [1, 4, 9, 16, 25, 36] * 100,
},
columns=["a", "b", "c"],
index=np.random.rand(600),
)
psdf = ps.DataFrame(pdf)
self.assert_eq(
psdf.transform(lambda x: x + 1).sort_index(),
pdf.transform(lambda x: x + 1).sort_index(),
)
self.assert_eq(
psdf.transform(lambda x, y: x + y, y=2).sort_index(),
pdf.transform(lambda x, y: x + y, y=2).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.transform(lambda x: x + 1).sort_index(),
pdf.transform(lambda x: x + 1).sort_index(),
)
self.assert_eq(
psdf.transform(lambda x, y: x + y, y=1).sort_index(),
pdf.transform(lambda x, y: x + y, y=1).sort_index(),
)
with self.assertRaisesRegex(AssertionError, "the first argument should be a callable"):
psdf.transform(1)
# multi-index columns
columns = | pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")]) | pandas.MultiIndex.from_tuples |
"""
A minimalistic version helper in the spirit of versioneer, that is able to run without build step using pkg_resources.
Developed by <NAME>, see https://github.com/flying-sheep/get_version.
"""
# __version__ is defined at the very end of this file.
import re
import os
from pathlib import Path
from subprocess import run, PIPE, CalledProcessError
from typing import NamedTuple, List, Union, Optional
RE_VERSION = r"([\d.]+?)(?:\.dev(\d+))?(?:[_+-]([0-9a-zA-Z.]+))?"
RE_GIT_DESCRIBE = r"v?(?:([\d.]+)-(\d+)-g)?([0-9a-f]{7})(-dirty)?"
ON_RTD = os.environ.get("READTHEDOCS") == "True"
def match_groups(regex, target):
match = re.match(regex, target)
if match is None:
raise re.error(f"Regex does not match “{target}”. RE Pattern: {regex}", regex)
return match.groups()
class Version(NamedTuple):
release: str
dev: Optional[str]
labels: List[str]
@staticmethod
def parse(ver):
release, dev, labels = match_groups(f"{RE_VERSION}$", ver)
return Version(release, dev, labels.split(".") if labels else [])
def __str__(self):
release = self.release if self.release else "0.0"
dev = f".dev{self.dev}" if self.dev else ""
labels = f'+{".".join(self.labels)}' if self.labels else ""
return f"{release}{dev}{labels}"
def get_version_from_dirname(name, parent):
"""Extracted sdist"""
parent = parent.resolve()
re_dirname = re.compile(f"{name}-{RE_VERSION}$")
if not re_dirname.match(parent.name):
return None
return Version.parse(parent.name[len(name) + 1 :])
def get_version_from_git(parent):
parent = parent.resolve()
try:
p = run(
["git", "rev-parse", "--show-toplevel"],
cwd=str(parent),
stdout=PIPE,
stderr=PIPE,
encoding="utf-8",
check=True,
)
except (OSError, CalledProcessError):
return None
if Path(p.stdout.rstrip("\r\n")).resolve() != parent.resolve():
return None
p = run(
[
"git",
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"v[0-9]*",
],
cwd=str(parent),
stdout=PIPE,
stderr=PIPE,
encoding="utf-8",
check=True,
)
release, dev, hex_, dirty = match_groups(f"{RE_GIT_DESCRIBE}$", p.stdout.rstrip("\r\n"))
labels = []
if dev == "0":
dev = None
else:
labels.append(hex_)
if dirty and not ON_RTD:
labels.append("dirty")
return Version(release, dev, labels)
def get_version_from_metadata(name: str, parent: Optional[Path] = None):
try:
from pkg_resources import get_distribution, DistributionNotFound
except ImportError:
return None
try:
pkg = get_distribution(name)
except DistributionNotFound:
return None
# For an installed package, the parent is the install location
path_pkg = Path(pkg.location).resolve()
if parent is not None and path_pkg != parent.resolve():
msg = f"""\
metadata: Failed; distribution and package paths do not match:
{path_pkg}
!=
{parent.resolve()}\
"""
return None
return Version.parse(pkg.version)
def get_version(package: Union[Path, str]) -> str:
"""Get the version of a package or module
Pass a module path or package name.
The former is recommended, since it also works for not yet installed packages.
Supports getting the version from
#. The directory name (as created by ``setup.py sdist``)
#. The output of ``git describe``
#. The package metadata of an installed package
(This is the only possibility when passing a name)
Args:
package: package name or module path (``…/module.py`` or ``…/module/__init__.py``)
"""
path = Path(package)
if not path.suffix and len(path.parts) == 1: # Is probably not a path
v = get_version_from_metadata(package)
if v:
return str(v)
if path.suffix != ".py":
msg = f"“package” is neither the name of an installed module nor the path to a .py file."
if path.suffix:
msg += f" Unknown file suffix {path.suffix}"
raise ValueError(msg)
if path.name == "__init__.py":
name = path.parent.name
parent = path.parent.parent
else:
name = path.with_suffix("").name
parent = path.parent
return str(
get_version_from_dirname(name, parent)
or get_version_from_git(parent)
or get_version_from_metadata(name, parent)
or "0.0.0"
)
def get_all_dependencies_version(display=True):
"""
Adapted from answer 2 in
https://stackoverflow.com/questions/40428931/package-for-listing-version-of-packages-used-in-a-jupyter-notebook
"""
import pkg_resources
from IPython.display import display
import pandas as pd
_package_name = "dynamo-release"
_package = pkg_resources.working_set.by_key[_package_name]
all_dependencies = [str(r).split(">")[0] for r in _package.requires()] # retrieve deps from setup.py
all_dependencies.sort(reverse=True)
all_dependencies.insert(0, "dynamo-release")
all_dependencies_list = []
for m in pkg_resources.working_set:
if m.project_name.lower() in all_dependencies:
all_dependencies_list.append([m.project_name, m.version])
df = | pd.DataFrame(all_dependencies_list[::-1], columns=["package", "version"]) | pandas.DataFrame |
"""
Functions for writing a directory for iModulonDB webpages
"""
import logging
import os
import re
from itertools import chain
from zipfile import ZipFile
import numpy as np
import pandas as pd
from matplotlib.colors import to_hex
from tqdm.notebook import tqdm
from pymodulon.plotting import _broken_line, _get_fit, _solid_line
##################
# User Functions #
##################
def imodulondb_compatibility(model, inplace=False, tfcomplex_to_gene=None):
"""
Checks for all issues and missing information prior to exporting to iModulonDB.
If inplace = True, modifies the model (not recommended for main model variables).
Parameters
----------
model: :class:`~pymodulon.core.IcaData`
IcaData object to check
inplace: bool, optional
If true, modifies the model to prepare for export.
Not recommended for use with your main model variable.
tfcomplex_to_gene: dict, optional
dictionary pointing complex TRN entries to matching gene names in the gene
table (ex: {"FlhDC":"flhD"})
Returns
-------
table_issues: pd.DataFrame
Each row corresponds to an issue with one of the main class elements.
Columns:
* Table: which table or other variable the issue is in
* Missing Column: the column of the Table with the issue (not case
sensitive; capitalization is ignored).
* Solution: Unless "CRITICAL" is in this cell, the site behavior if the
issue remained is described here.
tf_issues: pd.DataFrame
Each row corresponds to a regulator that is used in the imodulon_table.
Columns:
* in_trn: whether the regulator is in the model.trn. Regulators not
in the TRN will be ignored in the site's histograms and gene tables.
* has_link: whether the regulator has a link in tf_links. If not, no
link to external regulator databases will be shown.
* has_gene: whether the regulator can be matched to a gene in the model.
If this is false, then there will be no regulator scatter plot on the
site. You can link TF complexes to one of their genes using the
tfcomplex_to_gene input.
missing_g_links: pd.Series
The genes on this list don't have links in the gene_links. Their gene pages
for these genes will not display links.
missing_DOIs: pd.Series
The samples listed here don't have DOIs in the sample_table. Clicking on their
associated bars in the activity plots will not link to relevant papers.
"""
if tfcomplex_to_gene is None:
tfcomplex_to_gene = {}
table_issues = pd.DataFrame(columns=["Table", "Missing Column", "Solution"])
# Check for X
if model.X is None:
table_issues = table_issues.append(
{
"Table": "X",
"Missing Column": "all",
"Solution": "CRITICAL. Add the expression matrix"
" so that gene pages can be generated.",
},
ignore_index=True,
)
logging.warning("Critical issue: No X matrix")
# Check for updated imodulondb table
default_imdb_table = {
"organism": "New Organism",
"dataset": "New Dataset",
"strain": "Unspecified",
"publication_name": "Unpublished Study",
"publication_link": "",
"gene_link_db": "External Database",
"organism_folder": "new_organism",
"dataset_folder": "new_dataset",
}
for k, v in default_imdb_table.items():
if model.imodulondb_table[k] == v:
if k == "publication_link":
solution = "The publication name will not be a hyperlink."
else:
solution = 'The default, "{}", will be used.'.format(v)
table_issues = table_issues.append(
{
"Table": "iModulonDB",
"Missing Column": k,
"Solution": solution,
},
ignore_index=True,
)
# Check the gene table
gene_table_cols = {
"gene_name": "Locus tags (gene_table.index) will be used.",
"gene_product": "Locus tags (gene_table.index) will be used.",
"cog": "COG info will not display & the gene scatter plot will"
" not have color.",
"start": "The x axis of the scatter plot will be a numerical"
" value instead of a genome location.",
"operon": "Operon info will not display.",
"regulator": "Regulator info will not display. If you have a"
" TRN, add it to the model to auto-generate this column.",
}
gene_table_lower = {i.lower(): i for i in model.gene_table.columns}
for col in gene_table_cols.keys():
if not (col in gene_table_lower.keys()):
table_issues = table_issues.append(
{
"Table": "Gene",
"Missing Column": col,
"Solution": gene_table_cols[col],
},
ignore_index=True,
)
if (col in ["gene_name", "gene_product"]) & inplace:
model.gene_table[col] = model.gene_table.index
elif inplace:
model.gene_table = model.gene_table.rename(
{gene_table_lower[col]: col}, axis=1
)
# check for missing gene links
missing_g_links = []
for g in model.M.index:
if (
not (isinstance(model.gene_links[g], str))
or model.gene_links[g].strip() == ""
):
missing_g_links.append(g)
missing_g_links = pd.Series(missing_g_links, name="missing_gene_links")
# check for errors in the n_replicates column of the sample table
if inplace & ("n_replicates" in model.sample_table.columns):
try:
imdb_activity_bar_df(model, model.imodulon_table.index[0])
except ValueError:
logging.warning(
"Error detected in sample_table['n_replicates']."
" Deleting that column. It will be auto-regenerated."
" You can prevent this from happening in the future"
" using generate_n_replicates_column(model)"
)
model.sample_table = model.sample_table.drop("n_replicates", 1)
# check the sample table
sample_table_cols = {
"project": "This is a CRITICAL column defining the largest"
" grouping of samples. Vertical bars in the activity plot"
" will separate projects.",
"condition": "This is an CRITICAL column defining the smallest"
" grouping of samples. Biological replicates must have matching"
" projects and conditions, and they will appear as single bars"
" with averaged activities.",
"sample": "The sample_table.index will be used. Each entry must be"
' unique. Note that the preferred syntax is "project__condition__#."',
"n_replicates": "This column will be generated for you.",
"doi": "Clicking on activity plot bars will not link to relevant"
" papers for the samples.",
}
sample_table_lower = {i.lower(): i for i in model.sample_table.columns}
if model.sample_table.columns.str.lower().duplicated().any():
logging.warning(
"Critical issue: Duplicated column names"
" (case insensitive) in sample_table"
)
table_issues = table_issues.append(
{
"Table": "Sample",
"Missing Column": "N/A - Duplicated Columns Exist",
"Solution": "Column names (case insensitive) should not "
"be duplicated. Pay special attention the 'sample' column.",
},
ignore_index=True,
)
for col in sample_table_cols.keys():
if not (col in sample_table_lower.keys()):
if (col == "sample") & (model.sample_table.index.name == "sample"):
continue
if col in ["project", "condition"]:
logging.warning(
"Critical issue: No {} column in sample_table.".format(col)
)
table_issues = table_issues.append(
{
"Table": "Sample",
"Missing Column": col,
"Solution": sample_table_cols[col],
},
ignore_index=True,
)
if (col == "n_replicates") & inplace:
generate_n_replicates_column(model)
elif inplace:
model.sample_table = model.sample_table.rename(
{sample_table_lower[col]: col}, axis=1
)
# check for missing DOIs
if "doi" in sample_table_lower.keys():
if inplace:
doi_idx = "doi"
else:
doi_idx = sample_table_lower["doi"]
missing_DOIs = model.sample_table.index[
model.sample_table[doi_idx].isna()
].copy()
missing_DOIs.name = "missing_DOIs"
else:
missing_DOIs = model.sample_table.index.copy()
missing_DOIs.name = "missing_DOIs"
# check the iModulon table columns
try:
model.imodulon_table.index.astype(int)
im_idx = "int"
except TypeError:
im_idx = "str"
iM_table_cols = {
"name": "imodulon_table.index will be used.",
"regulator": "The regulator details will be left blank.",
"function": "The function will be blank in the dataset table and"
' "Uncharacterized" in the iModulon dashboard',
"category": 'The categories will be filled in as "Uncharacterized".',
"n_genes": "This column will be computed for you.",
"precision": "This column will be left blank.",
"recall": "This column will be left blank.",
"exp_var": "This column will be left blank.",
}
iM_table_lower = {i.lower(): i for i in model.imodulon_table.columns}
for col in iM_table_cols.keys():
if not (col in iM_table_lower.keys()):
table_issues = table_issues.append(
{
"Table": "iModulon",
"Missing Column": col,
"Solution": iM_table_cols[col],
},
ignore_index=True,
)
if inplace:
if col == "name":
if im_idx == "int":
model.imodulon_table["name"] = [
"iModulon {}".format(i) for i in model.imodulon_table.index
]
else:
model.imodulon_table["name"] = model.imodulon_table.index
elif col == "n_genes":
model.imodulon_table["n_genes"] = model.M_binarized.sum().astype(
int
)
else:
model.imodulon_table[col] = np.nan
elif inplace:
model.imodulon_table = model.imodulon_table.rename(
{iM_table_lower[col]: col}, axis=1
)
if inplace:
if im_idx == "str":
model.rename_imodulons(
dict(zip(model.imodulon_names, range(len(model.imodulon_names))))
)
for idx, tf in zip(model.imodulon_table.index, model.imodulon_table.regulator):
try:
model.imodulon_table.loc[idx, "regulator_readable"] = (
model.imodulon_table.regulator[idx]
.replace("/", " or ")
.replace("+", " and ")
)
except AttributeError:
model.imodulon_table.loc[
idx, "regulator_readable"
] = model.imodulon_table.regulator[idx]
# check the TRN
cols = ["in_trn", "has_link", "has_gene"]
tf_issues = pd.DataFrame(columns=cols)
if "regulator" in iM_table_lower.keys():
if inplace:
reg_idx = "regulator"
else:
reg_idx = iM_table_lower["regulator"]
for tf_string in model.imodulon_table[reg_idx]:
_, no_trn = parse_tf_string(model, tf_string)
_, no_link = tf_with_links(model, tf_string)
_, no_gene = get_tfs_to_scatter(model, tf_string, tfcomplex_to_gene)
tfs_to_add = set(no_trn + no_link + no_gene)
for tf in tfs_to_add:
row = dict(zip(cols, [True] * 3))
for col, tf_set in zip(cols, [no_trn, no_link, no_gene]):
if tf in tf_set:
row[col] = False
tf_issues.loc[tf] = row
return table_issues, tf_issues, missing_g_links, missing_DOIs
def imodulondb_export(
model,
path=".",
cat_order=None,
tfcomplex_to_gene=None,
skip_iMs=False,
skip_genes=False,
):
"""
Generates the iModulonDB page for the data and exports to the path.
If certain columns are unavailable but can be filled in automatically,
they will be.
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object to export
path : str, optional
Path to iModulonDB main hosting folder (default = ".")
cat_order : list, optional
List of categories in the imodulon_table, ordered as you would
like them to appear in the dataset table (default = None)
tfcomplex_to_gene : dict, optional
dictionary pointing complex TRN entries
to matching gene names in the gene table
ex: {"FlhDC":"flhD"}
skip_iMs : bool, optional
If this is True, do not output iModulon files (to save time)
skip_genes : bool, optional
If this is True, do not output gene files (to save time)
Returns
-------
None: None
"""
if tfcomplex_to_gene is None:
tfcomplex_to_gene = {}
model1 = model.copy()
imodulondb_compatibility(model1, True, tfcomplex_to_gene=tfcomplex_to_gene)
print("Writing main site files...")
folder = imodulondb_main_site_files(model1, path, cat_order=cat_order)
print("Done writing main site files. Writing plot files...")
if not (skip_iMs and skip_genes):
print(
"Two progress bars will appear below. The second will take "
"significantly longer than the first."
)
if not (skip_iMs):
print("Writing iModulon page files (1/2)")
imdb_generate_im_files(model1, folder, "start", tfcomplex_to_gene)
if not (skip_genes):
print("Writing Gene page files (2/2)")
imdb_generate_gene_files(model1, folder)
print(
"Complete! (Organism = {}; Dataset = {})".format(
model1.imodulondb_table["organism_folder"],
model1.imodulondb_table["dataset_folder"],
)
)
###############################
# Major Outputs (Called Once) #
###############################
def imdb_dataset_table(model):
"""
Converts the model's imodulondb_table into dataset metadata
for the gray box on the left side of the dataset page
Parameters
----------
model: :class:`~pymodulon.core.IcaData`
An IcaData object
Returns
-------
res: ~pandas.Series
A series of formatted metadata
"""
res = pd.Series(dtype=str)
if model.imodulondb_table["organism"] == "New Organism":
org_short = ""
else:
org_parts = model.imodulondb_table["organism"].split(" ")
org_short = org_parts[0][0].upper() + ". " + org_parts[1].lower()
org_short = "<i>" + org_short + "</i>"
res["Title"] = org_short + " " + model.imodulondb_table["dataset"]
res["Organism"] = "<i>" + model.imodulondb_table["organism"] + "</i>"
res["Strain"] = model.imodulondb_table["strain"]
if model.imodulondb_table["publication_link"] == "":
res["Publication"] = model.imodulondb_table["publication_name"]
else:
pub_str = '<a href="' + model.imodulondb_table["publication_link"]
pub_str += '">' + model.imodulondb_table["publication_name"] + "</a>"
res["Publication"] = pub_str
res["Number of Samples"] = model.A.shape[1]
if ("project" in model.sample_table.columns) and (
"condition" in model.sample_table.columns
):
num_conds = len(model.sample_table.groupby(["condition", "project"]))
else:
num_conds = "Unknown"
res["Number of Unique Conditions"] = num_conds
res["Number of Genes"] = model.M.shape[0]
res["Number of iModulons"] = model.M.shape[1]
return res
def imdb_iM_table(imodulon_table, cat_order=None):
"""
Reformats the iModulon table according
Parameters
----------
imodulon_table : ~pandas.DataFrame
Table formatted similar to IcaData.imodulon_table
cat_order : list, optional
List of categories in imodulon_table.category, ordered as desired
Returns
-------
im_table: ~pandas.DataFrame
New iModulon table with the columns expected by iModulonDB
"""
im_table = imodulon_table[
[
"name",
"regulator_readable",
"function",
"category",
"n_genes",
"exp_var",
"precision",
"recall",
]
].copy()
im_table.index.name = "k"
im_table.category = im_table.category.fillna("Uncharacterized")
if cat_order is not None:
cat_dict = {val: i for i, val in enumerate(cat_order)}
im_table.loc[:, "category_num"] = [
cat_dict[im_table.category[k]] for k in im_table.index
]
else:
try:
im_table.loc[:, "category_num"] = imodulon_table["new_idx"]
except KeyError:
im_table.loc[:, "category_num"] = im_table.index
return im_table
def imdb_gene_presence(model):
"""
Generates the two versions of the gene presence file, one as a binary
matrix, and one as a DataFrame
Parameters
----------
model: :class:`~pymodulon.core.IcaData`
An IcaData object
Returns
-------
mbin: ~pandas.DataFrame
Binarized M matrix
mbin_list: ~pandas.DataFrame
Table mapping genes to iModulons
"""
mbin = model.M_binarized.astype(bool)
mbin_list = pd.DataFrame(columns=["iModulon", "Gene"])
for k in mbin.columns:
for g in mbin.index[mbin[k]]:
mbin_list = mbin_list.append({"iModulon": k, "Gene": g}, ignore_index=True)
return mbin, mbin_list
def imodulondb_main_site_files(
model, path_prefix=".", rewrite_annotations=True, cat_order=None
):
"""
Generates all parts of the site that do not require large iteration loops
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object
path_prefix : str, optional
Main folder for iModulonDB files (default = ".")
rewrite_annotations : bool, optional
Set to False if the gene_table and trn are unchanged (default = True)
cat_order : list, optional
list of categories in data.imodulon_table.category, ordered as you want
them to appear on the dataset page (default = None)
Returns
-------
main_folder: str
Dataset folder, for use as the path_prefix in imdb_generate_im_files()
"""
organism = model.imodulondb_table["organism_folder"]
dataset = model.imodulondb_table["dataset_folder"]
# create new folders
organism_folder = os.path.join(path_prefix, "organisms", organism)
if not (os.path.isdir(organism_folder)):
os.makedirs(organism_folder)
annot_folder = os.path.join(organism_folder, "annotation")
if not (os.path.isdir(annot_folder)):
rewrite_annotations = True
os.makedirs(annot_folder)
# save annotations
if rewrite_annotations:
# make the folder if necessary
gene_folder = os.path.join(annot_folder, "gene_files")
if not (os.path.isdir(gene_folder)):
os.makedirs(gene_folder)
# add files to the folder
model.gene_table.to_csv(os.path.join(gene_folder, "gene_info.csv"))
try:
model.trn.to_csv(os.path.join(gene_folder, "trn.csv"))
except FileNotFoundError:
pass
# zip the folder
old_cwd = os.getcwd()
os.chdir(gene_folder)
with ZipFile("../gene_files.zip", "w") as z:
z.write("gene_info.csv")
z.write("trn.csv")
os.chdir(old_cwd)
main_folder = os.path.join(organism_folder, dataset)
if not (os.path.isdir(main_folder)):
os.makedirs(main_folder)
# save the metadata files in the main folder
dataset_meta = imdb_dataset_table(model)
dataset_meta.to_csv(os.path.join(main_folder, "dataset_meta.csv"))
# num_ims - used so that the 'next iModulon' button doesn't overflow
file = open(main_folder + "/num_ims.txt", "w")
file.write(str(model.M.shape[1]))
file.close()
# save the dataset files in the data folder
data_folder = os.path.join(main_folder, "data_files")
if not (os.path.isdir(data_folder)):
os.makedirs(data_folder)
model.X.to_csv(os.path.join(data_folder, "log_tpm.csv"))
model.A.to_csv(os.path.join(data_folder, "A.csv"))
model.M.to_csv(os.path.join(data_folder, "M.csv"))
im_table = imdb_iM_table(model.imodulon_table, cat_order)
im_table.to_csv(os.path.join(data_folder, "iM_table.csv"))
model.sample_table.to_csv(os.path.join(data_folder, "sample_table.csv"))
mbin, mbin_list = imdb_gene_presence(model)
mbin.to_csv(os.path.join(data_folder, "gene_presence_matrix.csv"))
mbin_list.to_csv(os.path.join(data_folder, "gene_presence_list.csv"))
pd.Series(model.thresholds).to_csv(os.path.join(data_folder, "M_thresholds.csv"))
# zip the data folder
old_cwd = os.getcwd()
os.chdir(data_folder)
with ZipFile("../data_files.zip", "w") as z:
z.write("log_tpm.csv")
z.write("A.csv")
z.write("M.csv")
z.write("iM_table.csv")
z.write("sample_table.csv")
z.write("gene_presence_list.csv")
z.write("gene_presence_matrix.csv")
z.write("M_thresholds.csv")
os.chdir(old_cwd)
# make iModulons searchable
enrich_df = model.imodulon_table.copy()
enrich_df["component"] = enrich_df.index
enrich_df = enrich_df[["component", "name", "regulator", "function"]]
enrich_df = enrich_df.rename({"function": "Function"}, axis=1)
try:
enrich_df = enrich_df.sort_values(by="name").fillna(value="N/A")
except TypeError:
enrich_df["name"] = enrich_df["name"].astype(str)
enrich_df = enrich_df.sort_values(by="name").fillna(value="N/A")
if not (os.path.isdir(main_folder + "/iModulon_files")):
os.makedirs(main_folder + "/iModulon_files")
enrich_df.to_json(main_folder + "/iModulon_files/im_list.json", orient="records")
# make genes searchable
gene_df = model.gene_table.copy()
gene_df = gene_df[gene_df.index.isin(model.X.index)]
gene_df["gene_id"] = gene_df.index
gene_df = gene_df[["gene_name", "gene_id", "gene_product"]]
gene_df = gene_df.sort_values(by="gene_name").fillna(value="not available")
if not (os.path.isdir(main_folder + "/gene_page_files")):
os.makedirs(main_folder + "/gene_page_files")
gene_df.to_json(main_folder + "/gene_page_files/gene_list.json", orient="records")
# make the html
html = '<div class="panel">\n'
html += ' <div class="panel-header">\n'
html += ' <h2 class="mb-0">\n'
html += ' <button class="btn btn-link collapsed organism" type="button"'
html += ' data-toggle="collapse" data-target="#new_org" aria-expanded="false"'
html += ' aria-controls="new_org">\n <i>'
html += model.imodulondb_table["organism"]
html += "</i>\n </button>\n </h2>\n </div>\n"
html += ' <div id="new_org" class="collapse" aria-labelledby="headingThree"'
html += ' data-parent="#organismAccordion">\n'
html += ' <div class="panel-body">\n'
html += ' <ul class="nav navbar-dark flex-column">\n'
html += ' <li class="nav-item dataset">\n'
html += ' <a class="nav-link active" href="dataset.html?organism='
html += organism
html += "&dataset="
html += dataset
html += '"><i class="fas fa-angle-right pr-2"></i>'
html += model.imodulondb_table["dataset"]
html += "\n </a>\n </li>\n"
html += " </ul>\n </div>\n </div>\n</div>"
file = open(main_folder + "/html_for_splash.html", "w")
file.write(html)
file.close()
return main_folder
def imdb_generate_im_files(
model, path_prefix=".", gene_scatter_x="start", tfcomplex_to_gene=None
):
"""
Generates all files for all iModulons in data
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object
path_prefix : str, optional
Dataset folder in which to store the files (default = ".")
gene_scatter_x : str
Column from the gene table that specificies what to use on the
X-axis of the gene scatter plot (default = "start")
tfcomplex_to_gene : dict, optional
dictionary pointing complex TRN entries
to matching gene names in the gene table
ex: {"FlhDC":"flhD"}
"""
if tfcomplex_to_gene is None:
tfcomplex_to_gene = {}
for k in tqdm(model.imodulon_table.index):
make_im_directory(model, k, path_prefix, gene_scatter_x, tfcomplex_to_gene)
def imdb_generate_gene_files(model, path_prefix="."):
"""
Generates all files for all iModulons in IcaData object
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object
path_prefix : str, optional
Dataset folder in which to store the files (default = ".")
Returns
-------
None
"""
for g in tqdm(model.M.index):
make_gene_directory(model, g, path_prefix)
###################################################
# iModulon-Related Outputs (and Helper Functions) #
###################################################
# Gene Table
def parse_tf_string(model, tf_str, verbose=False):
"""
Returns a list of relevant tfs from a string. Will ignore TFs not in the
trn file.
iModulonDB helper function.
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object
tf_str : str
String of tfs joined by '+' and '/' operators
verbose : bool, optional
Whether or nor to print outputs
Returns
-------
tfs: list
List of relevant TFs
"""
if not (type(tf_str) == str):
return [], []
if tf_str == "":
return [], []
tf_str = tf_str.replace("[", "").replace("]", "")
tfs = re.split("[+/]", tf_str)
# Check if there is an issue, just remove the issues for now.
bad_tfs = []
for tf in tfs:
tf = tf.strip()
if tf not in model.trn.regulator.unique():
if verbose:
print("Regulator not in TRN:", tf)
print(
"To remedy this, add rows to the TRN for each gene associated "
"with this regulator. Otherwise, it will be ignored in the gene"
"tables and histograms."
)
bad_tfs.append(tf)
tfs = [t.strip() for t in list(set(tfs) - set(bad_tfs))]
bad_tfs = list(set(bad_tfs))
return tfs, bad_tfs
def imdb_gene_table_df(model, k):
"""
Creates the gene table dataframe for iModulonDB
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object
k : int or str
iModulon name
Returns
-------
res: ~pandas.DataFrame
DataFrame of the gene table that is compatible with iModulonDB
"""
# get TFs and large table
row = model.imodulon_table.loc[k]
tfs, _ = parse_tf_string(model, row.regulator)
res = model.view_imodulon(k)
# sort
columns = []
for c in [
"gene_weight",
"gene_name",
"old_locus_tag",
"gene_product",
"cog",
"operon",
"regulator",
]:
if c in res.columns:
columns.append(c)
res = res[columns]
res = res.sort_values("gene_weight", ascending=False)
# add TFs
for tf in tfs:
reg_genes = model.trn.gene_id[model.trn.regulator == tf].values
res[tf] = [i in reg_genes for i in res.index]
# add links
res["link"] = [model.gene_links[g] for g in res.index]
# clean up
res.index.name = "locus"
return res
# Gene Histogram
def _component_DF(model, k, tfs=None):
"""
Helper function for imdb_gene_hist_df
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object
k : int or str
iModulon name
tfs : list
List of TFs (default = None)
Returns
-------
gene_table: ~pandas.DataFrame
Gene table for the iModulon
"""
df = pd.DataFrame(model.M[k].sort_values())
df.columns = ["gene_weight"]
if "gene_product" in model.gene_table.columns:
df["gene_product"] = model.gene_table["gene_product"]
if "gene_name" in model.gene_table.columns:
df["gene_name"] = model.gene_table["gene_name"]
if "operon" in model.gene_table.columns:
df["operon"] = model.gene_table["operon"]
if "length" in model.gene_table.columns:
df["length"] = model.gene_table.length
if "regulator" in model.gene_table.columns:
df["regulator"] = model.gene_table.regulator.fillna("")
if tfs is not None:
for tf in tfs:
df[tf] = [tf in regs.split(",") for regs in df["regulator"]]
return df.sort_values("gene_weight")
def _tf_combo_string(row):
"""
Creates a formatted string for the histogram legends. Helper function for
imdb_gene_hist_df.
Parameters
----------
row : ~pandas.Series
Boolean series indexed by TFs for a given gene
Returns
-------
str
A string formatted for display (i.e. "Regulated by ...")
"""
if row.sum() == 0:
return "unreg"
if row.sum() == 1:
return row.index[row][0]
if row.sum() == 2:
return " and ".join(row.index[row])
else:
return ", ".join(row.index[row][:-1]) + ", and " + row.index[row][-1]
def _sort_tf_strings(tfs, unique_elts):
"""
Sorts TF strings for the legend of the histogram. Helper function for
imdb_gene_hist_df.
Parameters
----------
tfs : list[str]
Sequence of TFs in the desired order
unique_elts : list[str]
All combination strings made by _tf_combo_string
Returns
-------
list[str]
A sorted list of combination strings that have a consistent ordering
"""
# unreg always goes first
unique_elts.remove("unreg")
sorted_elts = ["unreg"]
# then the individual TFs
for tf in tfs:
if tf in unique_elts:
sorted_elts.append(tf)
unique_elts.remove(tf)
# then pairs
pairs = [i for i in unique_elts if "," not in i]
for i in tfs:
for j in tfs:
name = i + " and " + j
if name in pairs:
sorted_elts.append(name)
unique_elts.remove(name)
# then longer combos, which won't be sorted for now
return sorted_elts + unique_elts
def imdb_gene_hist_df(model, k, bins=20, tol=0.001):
"""
Creates the gene histogram for an iModulon
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object
k : int or str
iModulon name
bins : int
Number of bins in the histogram (default = 20)
tol : float
Distance to threshold for deciding if a bar is in the iModulon
(default = .001)
Returns
-------
gene_hist_table: ~pandas.DataFrame
A dataframe for producing the histogram that is compatible with
iModulonDB
"""
# get TFs
row = model.imodulon_table.loc[k]
if not (type(row.regulator) == str):
tfs = []
else:
tfs, _ = parse_tf_string(model, row.regulator)
tfs = list(set(tfs))
# get genes
DF_gene = _component_DF(model, k, tfs)
# add a tf_combo column
if len(tfs) == 0:
DF_gene["tf_combos"] = ["unreg"] * DF_gene.shape[0]
else:
tf_bools = DF_gene[tfs]
DF_gene["tf_combos"] = [
_tf_combo_string(tf_bools.loc[g]) for g in tf_bools.index
]
# get the list of tf combos in the correct order
tf_combo_order = _sort_tf_strings(tfs, list(DF_gene.tf_combos.unique()))
# compute bins
xmin = min(min(DF_gene.gene_weight), -model.thresholds[k])
xmax = max(max(DF_gene.gene_weight), model.thresholds[k])
width = (
2
* model.thresholds[k]
/ max((np.floor(2 * model.thresholds[k] * bins / (xmax - xmin) - 1)), 1)
)
xmin = -model.thresholds[k] - width * np.ceil((-model.thresholds[k] - xmin) / width)
xmax = xmin + width * bins
# column headers: bin middles
columns = np.arange(xmin + width / 2, xmax + width / 2, width)[:bins]
index = ["thresh"] + tf_combo_order + [i + "_genes" for i in tf_combo_order]
res = pd.DataFrame(index=index, columns=columns)
# row 0: threshold indices and number of unique tf combos
thresh1 = -model.thresholds[k]
thresh2 = model.thresholds[k]
num_combos = len(tf_combo_order)
res.loc["thresh"] = [thresh1, thresh2, num_combos] + [np.nan] * (len(columns) - 3)
# next set of rows: heights of bars
for r in tf_combo_order:
res.loc[r] = np.histogram(
DF_gene.gene_weight[DF_gene.tf_combos == r], bins, (xmin, xmax)
)[0]
# last set of rows: gene names
for b_mid in columns:
# get the bin bounds
b_lower = b_mid - width / 2
b_upper = b_lower + width
for r in tf_combo_order:
# get the genes for this regulator and bin
genes = DF_gene.index[
(DF_gene.tf_combos == r)
& (DF_gene.gene_weight < b_upper)
& (DF_gene.gene_weight > b_lower)
]
# use the gene names, and get them with num2name (more robust)
genes = [model.num2name(g) for g in genes]
res.loc[r, b_mid] = len(genes)
gene_list = np.array2string(np.array(genes), separator=" ")
# don't list unregulated genes unless they are in the i-modulon
if r == "unreg":
if (b_lower + tol >= model.thresholds[k]) or (
b_upper - tol <= -model.thresholds[k]
):
res.loc[r + "_genes", b_mid] = gene_list
else:
res.loc[r + "_genes", b_mid] = "[]"
else:
res.loc[r + "_genes", b_mid] = gene_list
return res
# Gene Scatter Plot
def _gene_color_dict(model):
"""
Helper function to match genes to colors based on COG. Used by
imdb_gene_scatter_df.
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object
Returns
-------
dict
Dictionary associating gene names to colors
"""
try:
gene_cogs = model.gene_table.cog.to_dict()
except AttributeError:
return {k: "dodgerblue" for k in model.gene_table.index}
try:
return {k: model.cog_colors[v] for k, v in gene_cogs.items()}
except (KeyError, AttributeError):
# previously, this would call the setter using:
# data.cog_colors = None
cogs = sorted(model.gene_table.cog.unique())
model.cog_colors = dict(
zip(
cogs,
[
"red",
"pink",
"y",
"orchid",
"mediumvioletred",
"green",
"lightgray",
"lightgreen",
"slategray",
"blue",
"saddlebrown",
"turquoise",
"lightskyblue",
"c",
"skyblue",
"lightblue",
"fuchsia",
"dodgerblue",
"lime",
"sandybrown",
"black",
"goldenrod",
"chocolate",
"orange",
],
)
)
return {k: model.cog_colors[v] for k, v in gene_cogs.items()}
def imdb_gene_scatter_df(model, k, gene_scatter_x="start"):
"""
Generates a dataframe for the gene scatter plot in iModulonDB
Parameters
----------
model : :class:`~pymodulon.core.IcaData`
IcaData object
k : int or str
iModulon name
gene_scatter_x : str
Determines x-axis of the scatterplot
Returns
-------
res: ~pandas.DataFrame
A dataframe for producing the scatterplot
"""
columns = ["name", "x", "y", "cog", "color", "link"]
res = pd.DataFrame(columns=columns, index=model.M.index)
res.index.name = "locus"
cutoff = model.thresholds[k]
# x&y scatterplot points - do alternatives later
if gene_scatter_x == "start":
try:
res.x = model.gene_table.loc[res.index, "start"]
except KeyError:
gene_scatter_x = "gene number"
res.x = range(len(res.index))
else:
raise ValueError("Only 'start' is supported as a gene_scatter_x input.")
# res.x = data.X[base_conds].mean(axis=1)
res.y = model.M[k]
# add other data
res.name = [model.num2name(i) for i in res.index]
try:
res.cog = model.gene_table.cog[res.index]
except AttributeError:
res.cog = "Unknown"
gene_colors = _gene_color_dict(model)
res.color = [to_hex(gene_colors[gene]) for gene in res.index]
# if the gene is in the iModulon, it is clickable
in_im = res.index[res.y.abs() > cutoff]
for g in in_im:
res.loc[g, "link"] = model.gene_links[g]
# add a row to store the threshold
cutoff_row = pd.DataFrame(
[gene_scatter_x, cutoff] + [np.nan] * 4, columns=["meta"], index=columns
).T
res = | pd.concat([cutoff_row, res]) | pandas.concat |
""" breaks down by-cell variants table to by-sample
also condensing ROIs to genes """
import pandas as pd
def driver():
""" loops through variant_df, matches cells to samples, and fills in
samples_x_gene with read count values """
for i in range(0,len(variant_df.index)):# looping over by-cell df
currCell = variant_df['cells'].iloc[i]
keep = meta['cell_id'] == currCell
meta_row = meta[keep]
try:
currSample = list(meta_row['sample_name'])[0]
except IndexError: # cells not in metadata?
currSample = 'NOT_FOUND'
for j in range(1,len(variant_df.columns)): # starting at 1 bc i dont want the cellnames
ROI = variant_df.columns[j]
currGene = ROI.split('_')[0]
samples_x_gene_sub = samples_x_gene.where(samples_x_gene['gene'] == currGene) # bottleneck
gene_index = samples_x_gene_sub.index[ | pd.notna(samples_x_gene_sub['gene']) | pandas.notna |
from __future__ import print_function
import unittest
from unittest import mock
from io import BytesIO, StringIO
import random
import six
import os
import re
import logging
import numpy as np
import pandas as pd
from . import utils as test_utils
import dataprofiler as dp
from dataprofiler.profilers.profile_builder import StructuredColProfiler, \
UnstructuredProfiler, UnstructuredCompiler, StructuredProfiler, Profiler
from dataprofiler.profilers.profiler_options import ProfilerOptions, \
StructuredOptions, UnstructuredOptions
from dataprofiler.profilers.column_profile_compilers import \
ColumnPrimitiveTypeProfileCompiler, ColumnStatsProfileCompiler, \
ColumnDataLabelerCompiler
from dataprofiler import StructuredDataLabeler, UnstructuredDataLabeler
from dataprofiler.profilers.helpers.report_helpers import _prepare_report
test_root_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def setup_save_mock_open(mock_open):
mock_file = BytesIO()
mock_file.close = lambda: None
mock_open.side_effect = lambda *args: mock_file
return mock_file
class TestStructuredProfiler(unittest.TestCase):
@classmethod
def setUp(cls):
test_utils.set_seed(seed=0)
@classmethod
def setUpClass(cls):
test_utils.set_seed(seed=0)
cls.input_file_path = os.path.join(
test_root_path, 'data', 'csv/aws_honeypot_marx_geo.csv'
)
cls.aws_dataset = pd.read_csv(cls.input_file_path)
profiler_options = ProfilerOptions()
profiler_options.set({'data_labeler.is_enabled': False})
with test_utils.mock_timeit():
cls.trained_schema = dp.StructuredProfiler(
cls.aws_dataset, len(cls.aws_dataset), options=profiler_options)
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnPrimitiveTypeProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnStatsProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler',
spec=StructuredDataLabeler)
def test_bad_input_data(self, *mocks):
allowed_data_types = (r"\(<class 'list'>, "
r"<class 'pandas.core.series.Series'>, "
r"<class 'pandas.core.frame.DataFrame'>\)")
bad_data_types = [1, {}, np.inf, 'sdfs']
for data in bad_data_types:
with self.assertRaisesRegex(TypeError,
r"Data must either be imported using "
r"the data_readers or using one of the "
r"following: " + allowed_data_types):
StructuredProfiler(data)
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnPrimitiveTypeProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnStatsProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler',
spec=StructuredDataLabeler)
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._update_correlation')
def test_list_data(self, *mocks):
data = [[1, 1],
[None, None],
[3, 3],
[4, 4],
[5, 5],
[None, None],
[1, 1]]
with test_utils.mock_timeit():
profiler = dp.StructuredProfiler(data)
# test properties
self.assertEqual("<class 'list'>", profiler.file_type)
self.assertIsNone(profiler.encoding)
self.assertEqual(2, profiler.row_has_null_count)
self.assertEqual(2, profiler.row_is_null_count)
self.assertEqual(7, profiler.total_samples)
self.assertEqual(5, len(profiler.hashed_row_dict))
self.assertListEqual([0, 1], list(profiler._col_name_to_idx.keys()))
self.assertIsNone(profiler.correlation_matrix)
self.assertDictEqual({'row_stats': 1}, profiler.times)
# validates the sample out maintains the same visual data format as the
# input.
self.assertListEqual(['5', '1', '1', '3', '4'],
profiler.profile[0].sample)
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnPrimitiveTypeProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnStatsProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler',
spec=StructuredDataLabeler)
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._update_correlation')
def test_pandas_series_data(self, *mocks):
data = pd.Series([1, None, 3, 4, 5, None, 1])
with test_utils.mock_timeit():
profiler = dp.StructuredProfiler(data)
# test properties
self.assertEqual(
"<class 'pandas.core.series.Series'>", profiler.file_type)
self.assertIsNone(profiler.encoding)
self.assertEqual(2, profiler.row_has_null_count)
self.assertEqual(2, profiler.row_is_null_count)
self.assertEqual(7, profiler.total_samples)
self.assertEqual(5, len(profiler.hashed_row_dict))
self.assertListEqual([0], list(profiler._col_name_to_idx.keys()))
self.assertIsNone(profiler.correlation_matrix)
self.assertDictEqual({'row_stats': 1}, profiler.times)
# test properties when series has name
data.name = 'test'
profiler = dp.StructuredProfiler(data)
self.assertEqual(
"<class 'pandas.core.series.Series'>", profiler.file_type)
self.assertIsNone(profiler.encoding)
self.assertEqual(2, profiler.row_has_null_count)
self.assertEqual(2, profiler.row_is_null_count)
self.assertEqual(7, profiler.total_samples)
self.assertEqual(5, len(profiler.hashed_row_dict))
self.assertListEqual(['test'], list(profiler._col_name_to_idx.keys()))
self.assertIsNone(profiler.correlation_matrix)
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnPrimitiveTypeProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnStatsProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler',
spec=StructuredDataLabeler)
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._update_correlation')
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._merge_correlation')
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._update_chi2')
def test_add_profilers(self, *mocks):
data = pd.DataFrame([1, None, 3, 4, 5, None, 1])
with test_utils.mock_timeit():
profile1 = dp.StructuredProfiler(data[:2])
profile2 = dp.StructuredProfiler(data[2:])
# test incorrect type
with self.assertRaisesRegex(TypeError,
'`StructuredProfiler` and `int` are '
'not of the same profiler type.'):
profile1 + 3
# test mismatched profiles
profile2._profile.pop(0)
profile2._col_name_to_idx.pop(0)
with self.assertRaisesRegex(ValueError,
"Cannot merge empty profiles."):
profile1 + profile2
# test mismatched profiles due to options
profile2._profile.append(None)
profile2._col_name_to_idx[0] = [0]
with self.assertRaisesRegex(ValueError,
'The two profilers were not setup with the '
'same options, hence they do not calculate '
'the same profiles and cannot be added '
'together.'):
profile1 + profile2
# test success
profile1._profile = [1]
profile1._col_name_to_idx = {"test": [0]}
profile2._profile = [2]
profile2._col_name_to_idx = {"test": [0]}
merged_profile = profile1 + profile2
self.assertEqual(3, merged_profile._profile[
merged_profile._col_name_to_idx["test"][0]])
self.assertIsNone(merged_profile.encoding)
self.assertEqual(
"<class 'pandas.core.frame.DataFrame'>", merged_profile.file_type)
self.assertEqual(2, merged_profile.row_has_null_count)
self.assertEqual(2, merged_profile.row_is_null_count)
self.assertEqual(7, merged_profile.total_samples)
self.assertEqual(5, len(merged_profile.hashed_row_dict))
self.assertDictEqual({'row_stats': 2}, merged_profile.times)
# test success if drawn from multiple files
profile2.encoding = 'test'
profile2.file_type = 'test'
merged_profile = profile1 + profile2
self.assertEqual('multiple files', merged_profile.encoding)
self.assertEqual('multiple files', merged_profile.file_type)
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnPrimitiveTypeProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnStatsProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._get_correlation')
def test_stream_profilers(self, *mocks):
mocks[0].return_value = None
data = pd.DataFrame([
['test1', 1.0],
['test2', None],
['test1', 1.0],
[None, None],
[None, 5.0],
[None, 5.0],
[None, None],
['test3', 7.0]])
# check prior to update
with test_utils.mock_timeit():
profiler = dp.StructuredProfiler(data[:3])
self.assertEqual(1, profiler.row_has_null_count)
self.assertEqual(0, profiler.row_is_null_count)
self.assertEqual(3, profiler.total_samples)
self.assertEqual(2, len(profiler.hashed_row_dict))
self.assertIsNone(profiler.correlation_matrix)
self.assertDictEqual({'row_stats': 1}, profiler.times)
# check after update
with test_utils.mock_timeit():
profiler.update_profile(data[3:])
self.assertIsNone(profiler.encoding)
self.assertEqual(
"<class 'pandas.core.frame.DataFrame'>", profiler.file_type)
self.assertEqual(5, profiler.row_has_null_count)
self.assertEqual(2, profiler.row_is_null_count)
self.assertEqual(8, profiler.total_samples)
self.assertEqual(5, len(profiler.hashed_row_dict))
self.assertIsNone(profiler.correlation_matrix)
self.assertDictEqual({'row_stats': 2}, profiler.times)
def test_correct_unique_row_ratio_test(self):
self.assertEqual(2999, len(self.trained_schema.hashed_row_dict))
self.assertEqual(2999, self.trained_schema.total_samples)
self.assertEqual(1.0, self.trained_schema._get_unique_row_ratio())
def test_correct_rows_ingested(self):
self.assertEqual(2999, self.trained_schema.total_samples)
def test_correct_null_row_ratio_test(self):
self.assertEqual(2999, self.trained_schema.row_has_null_count)
self.assertEqual(1.0, self.trained_schema._get_row_has_null_ratio())
self.assertEqual(0, self.trained_schema.row_is_null_count)
self.assertEqual(0, self.trained_schema._get_row_is_null_ratio())
self.assertEqual(2999, self.trained_schema.total_samples)
def test_correct_duplicate_row_count_test(self):
self.assertEqual(2999, len(self.trained_schema.hashed_row_dict))
self.assertEqual(2999, self.trained_schema.total_samples)
self.assertEqual(0.0, self.trained_schema._get_duplicate_row_count())
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler',
spec=StructuredDataLabeler)
def test_correlation(self, *mock):
# Use the following formula to obtain the pairwise correlation
# sum((x - np.mean(x))*(y-np.mean(y))) /
# np.sqrt(sum((x - np.mean(x)**2)))/np.sqrt(sum((y - np.mean(y)**2)))
profile_options = dp.ProfilerOptions()
profile_options.set({"correlation.is_enabled": True})
# data with a sole numeric column
data = pd.DataFrame([1.0, 8.0, 1.0, -2.0, 5.0])
with test_utils.mock_timeit():
profiler = dp.StructuredProfiler(data, options=profile_options)
expected_corr_mat = np.array([[1.0]])
np.testing.assert_array_equal(expected_corr_mat,
profiler.correlation_matrix)
self.assertDictEqual({'row_stats': 1, 'correlation': 1}, profiler.times)
# data with one column with non-numeric calues
data = pd.DataFrame([1.0, None, 1.0, None, 5.0])
profiler = dp.StructuredProfiler(data, options=profile_options)
expected_corr_mat = np.array([[1]])
np.testing.assert_array_equal(expected_corr_mat,
profiler.correlation_matrix)
# data with two columns, but one is numerical
data = pd.DataFrame([
['test1', 1.0],
['test2', None],
['test1', 1.0],
[None, None]])
profiler = dp.StructuredProfiler(data, options=profile_options)
# Even the correlation with itself is NaN because the variance is zero
expected_corr_mat = np.array([
[np.nan, np.nan],
[np.nan, np.nan]
])
np.testing.assert_array_equal(expected_corr_mat,
profiler.correlation_matrix)
# data with multiple numerical columns
data = pd.DataFrame({'a': [3, 2, 1, 7, 5, 9, 4, 10, 7, 2],
'b': [10, 11, 1, 4, 2, 5, 6, 3, 9, 8],
'c': [1, 5, 3, 5, 7, 2, 6, 8, 1, 2]})
profiler = dp.StructuredProfiler(data, options=profile_options)
expected_corr_mat = np.array([
[1.0, -0.26559388521279237, 0.26594894270403086],
[-0.26559388521279237, 1.0, -0.49072329],
[0.26594894270403086, -0.49072329, 1.0]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
# data with multiple numerical columns, with nan values
data = pd.DataFrame({'a': [np.nan, np.nan, 1, 7, 5, 9, 4, 10, 7, 2],
'b': [10, 11, np.nan, 4, 2, 5, 6, 3, 9, 8],
'c': [1, 5, 3, 5, 7, 2, 6, 8, np.nan, np.nan]})
profiler = dp.StructuredProfiler(data, options=profile_options)
expected_corr_mat = np.array([
[1, -0.28527657, 0.18626508],
[-0.28527657, 1, -0.52996792],
[0.18626508, -0.52996792, 1]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
# data with multiple numerical columns, with nan values in only one
# column
data = pd.DataFrame({'a': [np.nan, np.nan, 1, 7, 5, 9, 4, 10, 7, 2],
'b': [10, 11, 1, 4, 2, 5, 6, 3, 9, 8],
'c': [1, 5, 3, 5, 7, 2, 6, 8, 1, 2]})
profiler = dp.StructuredProfiler(data, options=profile_options)
expected_corr_mat = np.array([
[1, 0.03673504, 0.22844891],
[0.03673504, 1, -0.49072329],
[0.22844891, -0.49072329, 1]])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
# data with only one numerical columns without nan values
data = pd.DataFrame({'a': [3, 2, 1, 7, 5, 9, 4, 10, 7, 2]})
profiler = dp.StructuredProfiler(data, options=profile_options)
expected_corr_mat = np.array([[1]])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
# data with no numeric columns
data = pd.DataFrame({'a': ['hi', 'hi2', 'hi3'],
'b': ['test1', 'test2', 'test3']})
profiler = dp.StructuredProfiler(data, options=profile_options)
expected_corr_mat = np.array([
[np.nan, np.nan],
[np.nan, np.nan]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
# data with only one numeric column
# data with no numeric columns
data = pd.DataFrame({'a': ['hi', 'hi2', 'hi3'],
'b': ['test1', 'test2', 'test3'],
'c': [1, 2, 3]})
profiler = dp.StructuredProfiler(data, options=profile_options)
expected_corr_mat = np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 1]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
# Data with null rows
data = pd.DataFrame({'a': [None, 2, 1, np.nan, 5, np.nan, 4, 10, 7, np.nan],
'b': [np.nan, 11, 1, 'nan', 2, np.nan, 6, 3, 9, np.nan],
'c': [np.nan, 5, 3, np.nan, 7, np.nan, 6, 8, 1, None]})
profiler = dp.StructuredProfiler(data, options=profile_options)
# correlation between [2, 1, 5, 4, 10, 7],
# [11, 1, 2, 6, 3, 9],
# [5, 3, 7, 6, 8, 1]
expected_corr_mat = np.array([
[1, -0.06987956, 0.32423975],
[-0.06987956, 1, -0.3613099],
[0.32423975, -0.3613099, 1]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
# Data with null rows and some imputed values
data = pd.DataFrame({'a': [None, np.nan, 1, 7, 5, 9, 4, 10, np.nan, 2],
'b': [10, 11, 1, 4, 2, 5, np.nan, 3, np.nan, 8],
'c': [1, 5, 3, 5, np.nan, 2, 6, 8, np.nan, 2]})
profiler = dp.StructuredProfiler(data, options=profile_options)
# correlation between [*38/7*, *38/7*, 1, 7, 5, 9, 4, 10, 2],
# [10, 11, 1, 4, 2, 5, *11/2*, 3, 8],
# [1, 5, 3, 5, *4*, 2, 6, 8, 2]
expected_corr_mat = np.array([
[1, -0.03283837, 0.40038038],
[-0.03283837, 1, -0.30346637],
[0.40038038, -0.30346637, 1]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler',
spec=StructuredDataLabeler)
def test_merge_correlation(self, *mocks):
# Use the following formular to obtain the pairwise correlation
# sum((x - np.mean(x))*(y-np.mean(y))) /
# np.sqrt(sum((x - np.mean(x)**2)))/np.sqrt(sum((y - np.mean(y)**2)))
profile_options = dp.ProfilerOptions()
profile_options.set({"correlation.is_enabled": True})
# merge between two existing correlations
data = pd.DataFrame({'a': [3, 2, 1, 7, 5, 9, 4, 10, 7, 2],
'b': [10, 11, 1, 4, 2, 5, 6, 3, 9, 8],
'c': [1, 5, 3, 5, 7, 2, 6, 8, 1, 2]})
data1 = data[:5]
data2 = data[5:]
with test_utils.mock_timeit():
profile1 = dp.StructuredProfiler(data1, options=profile_options)
profile2 = dp.StructuredProfiler(data2, options=profile_options)
merged_profile = profile1 + profile2
expected_corr_mat = np.array([
[1.0, -0.26559388521279237, 0.26594894270403086],
[-0.26559388521279237, 1.0, -0.49072329],
[0.26594894270403086, -0.49072329, 1.0]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
merged_profile.correlation_matrix)
self.assertDictEqual({'row_stats': 2, 'correlation': 2},
merged_profile.times)
# merge between an existing corr and None correlation (without data)
with test_utils.mock_timeit():
profile1 = dp.StructuredProfiler(None, options=profile_options)
profile2 = dp.StructuredProfiler(data, options=profile_options)
# TODO: remove the mock below when merge profile is update
with mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._add_error_checks'):
merged_profile = profile1 + profile2
expected_corr_mat = np.array([
[1.0, -0.26559388521279237, 0.26594894270403086],
[-0.26559388521279237, 1.0, -0.49072329],
[0.26594894270403086, -0.4907239, 1.0]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
merged_profile.correlation_matrix)
self.assertDictEqual({'row_stats': 1, 'correlation': 1},
merged_profile.times)
# Merge between existing data and empty data that still has samples
data = pd.DataFrame({'a': [1, 2, 4, np.nan, None, np.nan],
'b': [5, 7, 1, np.nan, np.nan, 'nan']})
data1 = data[:3]
data2 = data[3:]
profile1 = dp.StructuredProfiler(data1, options=profile_options)
expected_corr_mat = np.array([
[1, -0.78571429],
[-0.78571429, 1]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profile1.correlation_matrix)
profile2 = dp.StructuredProfiler(data2, options=profile_options)
merged_profile = profile1 + profile2
np.testing.assert_array_almost_equal(expected_corr_mat,
merged_profile.correlation_matrix)
def test_correlation_update(self):
profile_options = dp.ProfilerOptions()
profile_options.set({"correlation.is_enabled": True})
# Test with all numeric columns
data = pd.DataFrame({'a': [3, 2, 1, 7, 5, 9, 4, 10, 7, 2],
'b': [10, 11, 1, 4, 2, 5, 6, 3, 9, 8],
'c': [1, 5, 3, 5, 7, 2, 6, 8, 1, 2]})
data1 = data[:5]
data2 = data[5:]
with test_utils.mock_timeit():
profiler = dp.StructuredProfiler(data1, options=profile_options)
profiler.update_profile(data2)
expected_corr_mat = np.array([
[1.0, -0.26559388521279237, 0.26594894270403086],
[-0.26559388521279237, 1.0, -0.4907239],
[0.26594894270403086, -0.4907239, 1.0]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
self.assertDictEqual({'row_stats': 2, 'correlation': 2}, profiler.times)
# Test when there's a non-numeric column
data = pd.DataFrame({'a': [3, 2, 1, 7, 5, 9, 4, 10, 7, 2],
'b': [10, 11, 1, 4, 2, 5, 6, 3, 9, 8],
'c': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']})
data1 = data[:5]
data2 = data[5:]
profiler = dp.StructuredProfiler(data1, options=profile_options)
profiler.update_profile(data2)
expected_corr_mat = np.array([
[1.0, -0.26559388521279237, np.nan],
[-0.26559388521279237, 1.0, np.nan],
[np.nan, np.nan, np.nan]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
# Data with multiple numerical and non-numeric columns, with nan values in only one column
# NaNs imputed to (9+4+10)/3
data = pd.DataFrame({'a': [7, 2, 1, 7, 5, 9, 4, 10, np.nan, np.nan],
'b': [10, 11, 1, 4, 2, 5, 6, 3, 9, 8],
'c': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'],
'd': [1, 5, 3, 5, 7, 2, 6, 8, 1, 2]})
data1 = data[:5]
data2 = data[5:]
profiler = dp.StructuredProfiler(data1, options=profile_options)
profiler.update_profile(data2)
expected_corr_mat = np.array([
[ 1, 0.04721482, np.nan, -0.09383408],
[ 0.04721482, 1, np.nan,-0.49072329],
[np.nan, np.nan, np.nan, np.nan],
[-0.09383408, -0.49072329, np.nan, 1]]
)
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
# Data with null rows, all null rows are dropped
data = pd.DataFrame({'a': [np.nan, 2, 1, None, 5, np.nan, 4, 10, 7, 'NaN'],
'b': [np.nan, 11, 1, np.nan, 2, np.nan, 6, 3, 9, np.nan],
'c': [np.nan, 5, 3, np.nan, 7, None, 6, 8, 1, np.nan]})
data1 = data[:5]
data2 = data[5:]
profiler = dp.StructuredProfiler(data1, options=profile_options)
profiler.update_profile(data2)
# correlation between [2, 1, 5, 4, 10, 7],
# [11, 1, 2, 6, 3, 9],
# [5, 3, 7, 6, 8, 1]
expected_corr_mat = np.array([
[1, -0.06987956, 0.32423975],
[-0.06987956, 1, -0.3613099],
[0.32423975, -0.3613099, 1]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
# Data with null rows and some imputed values
data = pd.DataFrame({'a': [None, np.nan, 1, 7, 5, 9, 4, 10, 'nan', 2],
'b': [10, 11, 1, 4, 2, 5, 'NaN', 3, None, 8],
'c': [1, 5, 3, 5, np.nan, 2, 6, 8, None, 2]})
data1 = data[:5]
data2 = data[5:]
profiler = dp.StructuredProfiler(data1, options=profile_options)
profiler.update_profile(data2)
# correlation between [*13/3*, *13/3*, 1, 7, 5]
# [10, 11, 1, 4, 2]
# [1, 5, 3, 5, *7/2*]
# then updated with correlation (9th row dropped) between
# [9, 4, 10, 2],
# [5, *16/3*, 3, 8],
# [2, 6, 8, 2]
expected_corr_mat = np.array([
[1, -0.16079606, 0.43658332],
[-0.16079606, 1, -0.2801748],
[0.43658332, -0.2801748, 1]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler',
spec=StructuredDataLabeler)
def test_chi2(self, *mocks):
# Empty
data = pd.DataFrame([])
profiler = dp.StructuredProfiler(data)
self.assertIsNone(profiler.chi2_matrix)
# Single column
data = pd.DataFrame({'a': ["y", "y", "n", "n", "y"]})
profiler = dp.StructuredProfiler(data)
expected_mat = np.array([1])
self.assertEqual(expected_mat, profiler.chi2_matrix)
data = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["y", "maybe", "y", "y", "n", "n", "maybe"],
'c': ["n", "maybe", "n", "n", "n", "y", "y"]})
profiler = dp.StructuredProfiler(data)
expected_mat = np.array([
[1, 0.309924, 0.404638],
[0.309924, 1, 0.548812],
[0.404638, 0.548812, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler.chi2_matrix)
# All different categories
data = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["a", "maybe", "a", "a", "b", "b", "maybe"],
'c': ["d", "d", "g", "g", "g", "t", "t"]})
profiler = dp.StructuredProfiler(data)
expected_mat = np.array([
[1, 0.007295, 0.007295],
[0.007295, 1, 0.015609],
[0.007295, 0.015609, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler.chi2_matrix)
# Identical columns
data = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["y", "y", "y", "y", "n", "n", "n"],
'c': ["y", "y", "y", "y", "n", "n", "n"]})
profiler = dp.StructuredProfiler(data)
expected_mat = np.array([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler.chi2_matrix)
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler',
spec=StructuredDataLabeler)
def test_merge_chi2(self, *mocks):
# Merge empty data
data = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["y", "maybe", "y", "y", "n", "n", "maybe"],
'c': ["n", "maybe", "n", "n", "n", "y", "y"]})
profiler1 = dp.StructuredProfiler(None)
profiler2 = dp.StructuredProfiler(data)
with mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._add_error_checks'):
profiler3 = profiler1 + profiler2
expected_mat = np.array([
[1, 0.309924, 0.404638],
[0.309924, 1, 0.548812],
[0.404638, 0.548812, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler3.chi2_matrix)
data = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["y", "maybe", "y", "y", "n", "n", "maybe"],
'c': ["n", "maybe", "n", "n", "n", "y", "y"]})
data1 = data[:4]
data2 = data[4:]
profiler1 = dp.StructuredProfiler(data1)
profiler2 = dp.StructuredProfiler(data2)
profiler3 = profiler1 + profiler2
expected_mat = np.array([
[1, 0.309924, 0.404638],
[0.309924, 1, 0.548812],
[0.404638, 0.548812, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler3.chi2_matrix)
# All different categories
data = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["a", "maybe", "a", "a", "b", "b", "maybe"],
'c': ["d", "d", "g", "g", "g", "t", "t"]})
data1 = data[:4]
data2 = data[4:]
profiler1 = dp.StructuredProfiler(data1)
profiler2 = dp.StructuredProfiler(data2)
profiler3 = profiler1 + profiler2
expected_mat = np.array([
[1, 0.007295, 0.007295],
[0.007295, 1, 0.015609],
[0.007295, 0.015609, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler3.chi2_matrix)
# Identical columns
data = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["y", "y", "y", "y", "n", "n", "n"],
'c': ["y", "y", "y", "y", "n", "n", "n"]})
data1 = data[:4]
data2 = data[4:]
profiler1 = dp.StructuredProfiler(data1)
profiler2 = dp.StructuredProfiler(data2)
profiler3 = profiler1 + profiler2
expected_mat = np.array([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler3.chi2_matrix)
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler',
spec=StructuredDataLabeler)
def test_update_chi2(self, *mocks):
# Update with empty data
data1 = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["y", "maybe", "y", "y", "n", "n", "maybe"],
'c': ["n", "maybe", "n", "n", "n", "y", "y"]})
data2 = pd.DataFrame({'a': [],
'b': [],
'c': []})
profiler = dp.StructuredProfiler(data1)
profiler.update_profile(data2)
expected_mat = np.array([
[1, 0.309924, 0.404638],
[0.309924, 1, 0.548812],
[0.404638, 0.548812, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler.chi2_matrix)
data = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["y", "maybe", "y", "y", "n", "n", "maybe"],
'c': ["n", "maybe", "n", "n", "n", "y", "y"]})
data1 = data[:4]
data2 = data[4:]
profiler = dp.StructuredProfiler(data1)
profiler.update_profile(data2)
expected_mat = np.array([
[1, 0.309924, 0.404638],
[0.309924, 1, 0.548812],
[0.404638, 0.548812, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler.chi2_matrix)
# All different categories
data = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["a", "maybe", "a", "a", "b", "b", "maybe"],
'c': ["d", "d", "g", "g", "g", "t", "t"]})
data1 = data[:4]
data2 = data[4:]
profiler = dp.StructuredProfiler(data1)
profiler.update_profile(data2)
expected_mat = np.array([
[1, 0.007295, 0.007295],
[0.007295, 1, 0.015609],
[0.007295, 0.015609, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler.chi2_matrix)
# Identical columns
data = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["y", "y", "y", "y", "n", "n", "n"],
'c': ["y", "y", "y", "y", "n", "n", "n"]})
data1 = data[:4]
data2 = data[4:]
profiler = dp.StructuredProfiler(data1)
profiler.update_profile(data2)
expected_mat = np.array([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler.chi2_matrix)
def test_correct_datatime_schema_test(self):
profile_idx = self.trained_schema._col_name_to_idx["datetime"][0]
profile = self.trained_schema.profile[profile_idx]
col_schema_info = \
profile.profiles['data_type_profile']._profiles["datetime"]
self.assertEqual(2999, profile.sample_size)
self.assertEqual(col_schema_info.sample_size,
col_schema_info.match_count)
self.assertEqual(2, profile.null_count)
six.assertCountEqual(self, ['nan'], profile.null_types)
self.assertEqual(['%m/%d/%y %H:%M'], col_schema_info['date_formats'])
def test_correct_integer_column_detection_src(self):
profile_idx = self.trained_schema._col_name_to_idx["src"][0]
profile = self.trained_schema.profile[profile_idx]
col_schema_info = profile.profiles['data_type_profile']._profiles["int"]
self.assertEqual(2999, profile.sample_size)
self.assertEqual(col_schema_info.sample_size,
col_schema_info.match_count)
self.assertEqual(3, profile.null_count)
def test_correct_integer_column_detection_int_col(self):
profile_idx = self.trained_schema._col_name_to_idx["int_col"][0]
profile = self.trained_schema.profile[profile_idx]
col_schema_info = profile.profiles['data_type_profile']._profiles["int"]
self.assertEqual(2999, profile.sample_size)
self.assertEqual(col_schema_info.sample_size,
col_schema_info.match_count)
self.assertEqual(0, profile.null_count)
def test_correct_integer_column_detection_port(self):
profile_idx = self.trained_schema._col_name_to_idx["srcport"][0]
profile = self.trained_schema.profile[profile_idx]
col_schema_info = profile.profiles['data_type_profile']._profiles["int"]
self.assertEqual(2999, profile.sample_size)
self.assertEqual(col_schema_info.sample_size,
col_schema_info.match_count)
self.assertEqual(197, profile.null_count)
def test_correct_integer_column_detection_destport(self):
profile_idx = self.trained_schema._col_name_to_idx["destport"][0]
profile = self.trained_schema.profile[profile_idx]
col_schema_info = profile.profiles['data_type_profile']._profiles["int"]
self.assertEqual(2999, profile.sample_size)
self.assertEqual(col_schema_info.sample_size,
col_schema_info.match_count)
self.assertEqual(197, profile.null_count)
def test_report(self):
report = self.trained_schema.report()
self.assertListEqual(list(report.keys()), [
'global_stats', 'data_stats'])
self.assertListEqual(
list(report['global_stats']),
[
"samples_used", "column_count", "row_count",
"row_has_null_ratio", 'row_is_null_ratio',
"unique_row_ratio", "duplicate_row_count", "file_type",
"encoding", "correlation_matrix", "chi2_matrix", "profile_schema", "times"
]
)
flat_report = self.trained_schema.report(
report_options={"output_format": "flat"})
self.assertEqual(test_utils.get_depth(flat_report), 1)
with mock.patch('dataprofiler.profilers.helpers.report_helpers'
'._prepare_report') as pr_mock:
self.trained_schema.report(
report_options={"output_format": 'pretty'})
# Once for global_stats, once for each of 16 columns
self.assertEqual(pr_mock.call_count, 17)
def test_report_schema_and_data_stats_match_order(self):
data = pd.DataFrame([[1, 2, 3, 4, 5, 6],
[10, 20, 30, 40, 50, 60]],
columns=["a", "b", "a", "b", "c", "d"])
profiler_options = ProfilerOptions()
profiler_options.set({'data_labeler.is_enabled': False})
profiler = dp.StructuredProfiler(data=data, options=profiler_options)
report = profiler.report()
schema = report["global_stats"]["profile_schema"]
data_stats = report["data_stats"]
expected_schema = {"a": [0, 2], "b": [1, 3], "c": [4], "d": [5]}
self.assertDictEqual(expected_schema, schema)
# Check that the column order in the report matches the column order
# In the schema (and in the data)
for name in schema:
for idx in schema[name]:
# Use min of column to validate column order amongst duplicates
col_min = data.iloc[0, idx]
self.assertEqual(name, data_stats[idx]["column_name"])
self.assertEqual(col_min, data_stats[idx]["statistics"]["min"])
def test_pretty_report_doesnt_cast_schema(self):
report = self.trained_schema.report(
report_options={"output_format": "pretty"})
# Want to ensure the values of this dict are of type list[int]
# Since pretty "prettifies" lists into strings with ... to shorten
expected_schema = {"datetime": [0], "host": [1], "src": [2],
"proto": [3], "type": [4], "srcport": [5],
"destport": [6], "srcip": [7], "locale": [8],
"localeabbr": [9], "postalcode": [10],
"latitude": [11], "longitude": [12], "owner": [13],
"comment": [14], "int_col": [15]}
self.assertDictEqual(expected_schema,
report["global_stats"]["profile_schema"])
def test_omit_keys_with_duplicate_cols(self):
data = pd.DataFrame([[1, 2, 3, 4, 5, 6],
[10, 20, 30, 40, 50, 60]],
columns=["a", "b", "a", "b", "c", "d"])
profiler_options = ProfilerOptions()
profiler_options.set({'data_labeler.is_enabled': False})
profiler = dp.StructuredProfiler(data=data, options=profiler_options)
report = profiler.report(report_options={
"omit_keys": ["data_stats.a.statistics.min",
"data_stats.d.statistics.max",
"data_stats.*.statistics.null_types_index"]})
# Correctness of schema asserted in prior test
schema = report["global_stats"]["profile_schema"]
data_stats = report["data_stats"]
for idx in range(len(report["data_stats"])):
# Assert that min is absent from a's data_stats and not the others
if idx in schema["a"]:
self.assertNotIn("min", data_stats[idx]["statistics"])
else:
self.assertIn("min", report["data_stats"][idx]["statistics"])
# Assert that max is absent from d's data_stats and not the others
if idx in schema["d"]:
self.assertNotIn("max", report["data_stats"][idx]["statistics"])
else:
self.assertIn("max", report["data_stats"][idx]["statistics"])
# Assert that null_types_index not present in any
self.assertNotIn("null_types_index",
report["data_stats"][idx]["statistics"])
def test_omit_cols_preserves_schema(self):
data = pd.DataFrame([[1, 2, 3, 4, 5, 6],
[10, 20, 30, 40, 50, 60]],
columns=["a", "b", "a", "b", "c", "d"])
omit_cols = ["a", "d"]
omit_idxs = [0, 2, 5]
omit_keys = [f"data_stats.{col}" for col in omit_cols]
profiler_options = ProfilerOptions()
profiler_options.set({'data_labeler.is_enabled': False})
profiler = dp.StructuredProfiler(data=data, options=profiler_options)
report = profiler.report(report_options={"omit_keys": omit_keys})
for idx in range(len(report["data_stats"])):
if idx in omit_idxs:
self.assertIsNone(report["data_stats"][idx])
else:
self.assertIsNotNone(report["data_stats"][idx])
# This will keep the data_stats key but remove all columns
report = profiler.report(report_options={"omit_keys": ["data_stats.*"]})
for col_report in report["data_stats"]:
self.assertIsNone(col_report)
def test_report_quantiles(self):
report_none = self.trained_schema.report(
report_options={"num_quantile_groups": None})
report = self.trained_schema.report()
self.assertEqual(report_none, report)
for col in report["data_stats"]:
if col["column_name"] == "int_col":
report_quantiles = col["statistics"]["quantiles"]
break
self.assertEqual(len(report_quantiles), 3)
report2 = self.trained_schema.report(
report_options={"num_quantile_groups": 1000})
for col in report2["data_stats"]:
if col["column_name"] == "int_col":
report2_1000_quant = col["statistics"]["quantiles"]
break
self.assertEqual(len(report2_1000_quant), 999)
self.assertEqual(report_quantiles, {
0: report2_1000_quant[249],
1: report2_1000_quant[499],
2: report2_1000_quant[749],
})
def test_report_omit_keys(self):
# Omit both report keys manually
no_report_keys = self.trained_schema.report(
report_options={"omit_keys": ['global_stats', 'data_stats']})
self.assertCountEqual({}, no_report_keys)
# Omit just data_stats
no_data_stats = self.trained_schema.report(
report_options={"omit_keys": ['data_stats']})
self.assertCountEqual({"global_stats"}, no_data_stats)
# Omit a global stat
no_samples_used = self.trained_schema.report(
report_options={"omit_keys": ['global_stats.samples_used']})
self.assertNotIn("samples_used", no_samples_used["global_stats"])
# Omit all keys
nothing = self.trained_schema.report(
report_options={"omit_keys": ['*']})
self.assertCountEqual({}, nothing)
# Omit every data_stats column
empty_data_stats_cols = self.trained_schema.report(
report_options={"omit_keys": ['global_stats', 'data_stats.*']})
# data_stats key still present, but all columns are None
self.assertCountEqual({"data_stats"}, empty_data_stats_cols)
self.assertTrue(all([rep is None
for rep in empty_data_stats_cols["data_stats"]]))
# Omit specific data_stats column
no_datetime = self.trained_schema.report(
report_options={"omit_keys": ['data_stats.datetime']})
self.assertNotIn("datetime", no_datetime["data_stats"])
# Omit a statistic from each column
no_sum = self.trained_schema.report(
report_options={"omit_keys": ['data_stats.*.statistics.sum']})
self.assertTrue(all(["sum" not in rep["statistics"]
for rep in no_sum["data_stats"]]))
def test_report_compact(self):
report = self.trained_schema.report(
report_options={ "output_format": "pretty" })
omit_keys = [
"data_stats.*.statistics.times",
"data_stats.*.statistics.avg_predictions",
"data_stats.*.statistics.data_label_representation",
"data_stats.*.statistics.null_types_index",
"data_stats.*.statistics.histogram"
]
report = _prepare_report(report, 'pretty', omit_keys)
report_compact = self.trained_schema.report(
report_options={"output_format": "compact"})
self.assertEqual(report, report_compact)
def test_profile_key_name_without_space(self):
def recursive_test_helper(report, prev_key=None):
for key in report:
# do not test keys in 'data_stats' as they contain column names
# neither for 'ave_predictions' and 'data_label_representation'
# as they contain label names
# same for 'null_types_index'
if prev_key not in ['data_stats', 'avg_predictions',
'data_label_representation',
'null_types_index', 'categorical_count']:
# key names should contain only alphanumeric letters or '_'
self.assertIsNotNone(re.match('^[a-zA-Z0-9_]+$', str(key)))
if isinstance(report[key], dict):
recursive_test_helper(report[key], key)
_report = self.trained_schema.report()
recursive_test_helper(_report)
def test_data_label_assigned(self):
# only use 5 samples
trained_schema = dp.StructuredProfiler(self.aws_dataset, samples_per_update=5)
report = trained_schema.report()
has_non_null_column = False
for i in range(len(report['data_stats'])):
# only test non-null columns
if report['data_stats'][i]['data_type'] is not None:
self.assertIsNotNone(report['data_stats'][i]['data_label'])
has_non_null_column = True
if not has_non_null_column:
self.fail(
"Dataset tested did not have a non-null column and therefore "
"could not validate the test.")
def test_text_data_raises_error(self):
text_file_path = os.path.join(
test_root_path, 'data', 'txt/sentence-10x.txt'
)
with self.assertRaisesRegex(TypeError, 'Cannot provide TextData object'
' to StructuredProfiler'):
profiler = dp.StructuredProfiler(dp.Data(text_file_path))
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._update_correlation')
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._update_chi2')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
@mock.patch('dataprofiler.profilers.profile_builder.StructuredProfiler.'
'_update_row_statistics')
@mock.patch('dataprofiler.profilers.profile_builder.StructuredColProfiler')
def test_sample_size_warning_in_the_profiler(self, *mocks):
# structure data profile mock
sdp_mock = mock.Mock()
sdp_mock.clean_data_and_get_base_stats.return_value = (None, None)
mocks[0].return_value = sdp_mock
data = pd.DataFrame([1, None, 3, 4, 5, None])
with self.assertWarnsRegex(UserWarning,
"The data will be profiled with a sample "
"size of 3. All statistics will be based on "
"this subsample and not the whole dataset."):
profile1 = dp.StructuredProfiler(data, samples_per_update=3)
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnPrimitiveTypeProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnStatsProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._update_correlation')
def test_min_col_samples_used(self, *mocks):
# No cols sampled since no cols to sample
empty_df = pd.DataFrame([])
empty_profile = dp.StructuredProfiler(empty_df)
self.assertEqual(0, empty_profile._min_col_samples_used)
# Every column fully sampled
full_df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
full_profile = dp.StructuredProfiler(full_df)
self.assertEqual(3, full_profile._min_col_samples_used)
# First col sampled only twice, so that is min
sparse_df = pd.DataFrame([[1, None, None],
[1, 1, None],
[1, None, 1]])
sparse_profile = dp.StructuredProfiler(sparse_df, min_true_samples=2,
samples_per_update=1)
self.assertEqual(2, sparse_profile._min_col_samples_used)
@mock.patch('dataprofiler.profilers.profile_builder.StructuredProfiler.'
'_update_profile_from_chunk')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
def test_min_true_samples(self, *mocks):
empty_df = pd.DataFrame([])
# Test invalid input
msg = "`min_true_samples` must be an integer or `None`."
with self.assertRaisesRegex(ValueError, msg):
profile = dp.StructuredProfiler(empty_df, min_true_samples="Bloop")
# Test invalid input given to update_profile
profile = dp.StructuredProfiler(empty_df)
with self.assertRaisesRegex(ValueError, msg):
profile.update_profile(empty_df, min_true_samples="Bloop")
# Test None input (equivalent to zero)
profile = dp.StructuredProfiler(empty_df, min_true_samples=None)
self.assertEqual(None, profile._min_true_samples)
# Test valid input
profile = dp.StructuredProfiler(empty_df, min_true_samples=10)
self.assertEqual(10, profile._min_true_samples)
def test_save_and_load(self):
datapth = "dataprofiler/tests/data/"
test_files = ["csv/guns.csv", "csv/iris.csv"]
for test_file in test_files:
# Create Data and StructuredProfiler objects
data = dp.Data(os.path.join(datapth, test_file))
options = ProfilerOptions()
options.set({"correlation.is_enabled": True})
save_profile = dp.StructuredProfiler(data)
# store the expected data_labeler
data_labeler = save_profile.options.data_labeler.data_labeler_object
# Save and Load profile with Mock IO
with mock.patch('builtins.open') as m:
mock_file = setup_save_mock_open(m)
save_profile.save()
mock_file.seek(0)
with mock.patch('dataprofiler.profilers.profile_builder.'
'DataLabeler', return_value=data_labeler):
load_profile = dp.StructuredProfiler.load("mock.pkl")
# validate loaded profile has same data labeler class
self.assertIsInstance(
load_profile.options.data_labeler.data_labeler_object,
data_labeler.__class__)
# only checks first columns
# get first column
first_column_profile = load_profile.profile[0]
self.assertIsInstance(
first_column_profile.profiles['data_label_profile']
._profiles['data_labeler'].data_labeler,
data_labeler.__class__)
# Check that reports are equivalent
save_report = test_utils.clean_report(save_profile.report())
load_report = test_utils.clean_report(load_profile.report())
np.testing.assert_equal(save_report, load_report)
def test_save_and_load_no_labeler(self):
# Create Data and UnstructuredProfiler objects
data = pd.DataFrame([1, 2, 3], columns=["a"])
profile_options = dp.ProfilerOptions()
profile_options.set({"data_labeler.is_enabled": False})
save_profile = dp.StructuredProfiler(data, options=profile_options)
# Save and Load profile with Mock IO
with mock.patch('builtins.open') as m:
mock_file = setup_save_mock_open(m)
save_profile.save()
mock_file.seek(0)
with mock.patch('dataprofiler.profilers.profile_builder.'
'DataLabeler'):
load_profile = dp.StructuredProfiler.load("mock.pkl")
# Check that reports are equivalent
save_report = test_utils.clean_report(save_profile.report())
load_report = test_utils.clean_report(load_profile.report())
self.assertDictEqual(save_report, load_report)
# validate both are still usable after
save_profile.update_profile(pd.DataFrame({"a": [4, 5]}))
load_profile.update_profile(pd.DataFrame({"a": [4, 5]}))
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnPrimitiveTypeProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnStatsProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._update_correlation')
def test_string_index_doesnt_cause_error(self, *mocks):
dp.StructuredProfiler(pd.DataFrame([[1, 2, 3]], index=["hello"]))
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnPrimitiveTypeProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnStatsProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._update_correlation')
def test_dict_in_data_no_error(self, *mocks):
# validates that _update_row_statistics does not error when trying to
# hash a dict.
profiler = dp.StructuredProfiler(pd.DataFrame([[{'test': 1}], [None]]))
self.assertEqual(1, profiler.row_is_null_count)
self.assertEqual(2, profiler.total_samples)
def test_duplicate_columns(self):
data = pd.DataFrame([[1, 2, 3, 4, 5, 6],
[10, 20, 30, 40, 50, 60]],
columns=["a", "b", "a", "b", "c", "d"])
profiler = dp.StructuredProfiler(data)
# Ensure columns are correctly allocated to profiles in list
expected_mapping = {"a": [0, 2], "b": [1, 3], "c": [4], "d": [5]}
self.assertDictEqual(expected_mapping, profiler._col_name_to_idx)
for col in profiler._col_name_to_idx:
for idx in profiler._col_name_to_idx[col]:
# Make sure every index that a column name maps to represents
# A profile for that named column
self.assertEqual(col, profiler._profile[idx].name)
# Check a few stats to ensure calculation with data occurred
# Initialization ensures column ids and profile ids are identical
for col_idx in range(len(profiler._profile)):
col_min = data.iloc[0, col_idx]
col_max = data.iloc[1, col_idx]
col_sum = col_min + col_max
self.assertEqual(col_min, profiler._profile[col_idx].
profile["statistics"]["min"])
self.assertEqual(col_max, profiler._profile[col_idx].
profile["statistics"]["max"])
self.assertEqual(col_sum, profiler._profile[col_idx].
profile["statistics"]["sum"])
# Check that update works as expected
new_data = pd.DataFrame([[100, 200, 300, 400, 500, 600]],
columns=["a", "b", "a", "b", "c", "d"])
profiler.update_profile(new_data)
self.assertDictEqual(expected_mapping, profiler._col_name_to_idx)
for col in profiler._col_name_to_idx:
for idx in profiler._col_name_to_idx[col]:
# Make sure every index that a column name maps to represents
# A profile for that named column
self.assertEqual(col, profiler._profile[idx].name)
for col_idx in range(len(profiler._profile)):
col_min = data.iloc[0, col_idx]
col_max = new_data.iloc[0, col_idx]
col_sum = col_min + col_max + data.iloc[1, col_idx]
self.assertEqual(col_min, profiler._profile[col_idx].
profile["statistics"]["min"])
self.assertEqual(col_max, profiler._profile[col_idx].
profile["statistics"]["max"])
self.assertEqual(col_sum, profiler._profile[col_idx].
profile["statistics"]["sum"])
def test_unique_col_permutation(self, *mocks):
data = pd.DataFrame([[1, 2, 3, 4],
[5, 6, 7, 8]],
columns=["a", "b", "c", "d"])
perm_data = pd.DataFrame([[4, 3, 2, 1],
[8, 7, 6, 5]],
columns=["d", "c", "b", "a"])
# Test via add
first_profiler = dp.StructuredProfiler(data)
perm_profiler = dp.StructuredProfiler(perm_data)
profiler = first_profiler + perm_profiler
for col_idx in range(len(profiler._profile)):
col_min = data.iloc[0, col_idx]
col_max = data.iloc[1, col_idx]
# Sum is doubled since it was updated with the same vals
col_sum = 2 * (col_min + col_max)
self.assertEqual(col_min, profiler._profile[col_idx].
profile["statistics"]["min"])
self.assertEqual(col_max, profiler._profile[col_idx].
profile["statistics"]["max"])
self.assertEqual(col_sum, profiler._profile[col_idx].
profile["statistics"]["sum"])
# Test via update
profiler = dp.StructuredProfiler(data)
profiler.update_profile(perm_data)
for col_idx in range(len(profiler._profile)):
col_min = data.iloc[0, col_idx]
col_max = data.iloc[1, col_idx]
# Sum is doubled since it was updated with the same vals
col_sum = 2 * (col_min + col_max)
self.assertEqual(col_min, profiler._profile[col_idx].
profile["statistics"]["min"])
self.assertEqual(col_max, profiler._profile[col_idx].
profile["statistics"]["max"])
self.assertEqual(col_sum, profiler._profile[col_idx].
profile["statistics"]["sum"])
def test_get_and_validate_schema_mapping(self):
unique_schema_1 = {"a": [0], "b": [1], "c": [2]}
unique_schema_2 = {"a": [2], "b": [0], "c": [1]}
unique_schema_3 = {"a": [0], "b": [1], "d": [2]}
msg = "Columns do not match, cannot update or merge profiles."
with self.assertRaisesRegex(ValueError, msg):
dp.StructuredProfiler._get_and_validate_schema_mapping(
unique_schema_1,unique_schema_3)
expected_schema = {0: 0, 1: 1, 2: 2}
actual_schema = dp.StructuredProfiler. \
_get_and_validate_schema_mapping(unique_schema_1, {})
self.assertDictEqual(actual_schema, expected_schema)
expected_schema = {0: 2, 1: 0, 2: 1}
actual_schema = dp.StructuredProfiler. \
_get_and_validate_schema_mapping(unique_schema_1, unique_schema_2)
self.assertDictEqual(actual_schema, expected_schema)
dupe_schema_1 = {"a": [0], "b": [1, 2], "c": [3, 4, 5]}
dupe_schema_2 = {"a": [0], "b": [1, 3], "c": [2, 4, 5]}
dupe_schema_3 = {"a": [0, 1], "b": [2, 3, 4], "c": [5]}
four_col_schema = {"a": [0], "b": [1, 2], "c": [3, 4, 5], "d": [6]}
msg = ("Different number of columns detected for "
"'a', cannot update or merge profiles.")
with self.assertRaisesRegex(ValueError, msg):
dp.StructuredProfiler._get_and_validate_schema_mapping(
dupe_schema_1, dupe_schema_3)
msg = ("Different column indices under "
"duplicate name 'b', cannot update "
"or merge unless schema is identical.")
with self.assertRaisesRegex(ValueError, msg):
dp.StructuredProfiler._get_and_validate_schema_mapping(
dupe_schema_1, dupe_schema_2)
msg = "Attempted to merge profiles with different numbers of columns"
with self.assertRaisesRegex(ValueError, msg):
dp.StructuredProfiler._get_and_validate_schema_mapping(
dupe_schema_1, four_col_schema)
expected_schema = {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5}
actual_schema = dp.StructuredProfiler. \
_get_and_validate_schema_mapping(dupe_schema_1, dupe_schema_1)
self.assertDictEqual(actual_schema, expected_schema)
@mock.patch("dataprofiler.profilers.data_labeler_column_profile."
"DataLabelerColumn.update")
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
@mock.patch("dataprofiler.profilers.column_profile_compilers."
"ColumnPrimitiveTypeProfileCompiler.diff")
@mock.patch("dataprofiler.profilers.column_profile_compilers."
"ColumnStatsProfileCompiler.diff")
@mock.patch("dataprofiler.profilers.column_profile_compilers."
"ColumnDataLabelerCompiler.diff")
def test_diff(self, *mocks):
# Data labeler compiler diff
mocks[0].return_value = {
'statistics': {
'avg_predictions': {
'a': 'unchanged'
},
'label_representation': {
'a': 'unchanged'
}
},
'data_label': [[], ['a'], []]
}
# stats compiler diff
mocks[1].return_value = {
'order': ['ascending', 'descending'],
'categorical': 'unchanged',
'statistics': {
'all_compiler_stats': 'unchanged'
}
}
# primitive stats compiler diff
mocks[2].return_value = {
'data_type_representation': {
'all_data_types': 'unchanged'
},
'data_type': 'unchanged',
'statistics': {
'numerical_statistics_here': "unchanged"
}
}
data1 = pd.DataFrame([[1, 2], [5, 6]], columns=["a", "b"])
data2 = pd.DataFrame([[4, 3], [8, 7], [None, None], [9, 10]],
columns=["a", "b"])
options = dp.ProfilerOptions()
options.structured_options.correlation.is_enabled = True
profile1 = dp.StructuredProfiler(data1, options=options)
options2 = dp.ProfilerOptions()
options2.structured_options.correlation.is_enabled = True
profile2 = dp.StructuredProfiler(data2, options=options2)
expected_diff = {
'global_stats': {
'samples_used': -2,
'column_count': 'unchanged',
'row_count': -2,
'row_has_null_ratio': -0.25,
'row_is_null_ratio': -0.25,
'unique_row_ratio': 'unchanged',
'duplicate_row_count': -0.25,
'file_type': 'unchanged',
'encoding': 'unchanged',
'correlation_matrix':
np.array([[1.11022302e-16, 3.13803955e-02],
[3.13803955e-02, 0.00000000e+00]],
dtype=np.float),
'chi2_matrix':
np.array([[ 0. , -0.04475479],
[-0.04475479, 0. ]],
dtype=np.float),
'profile_schema':
[{}, {'a': 'unchanged', 'b': 'unchanged'}, {}]},
'data_stats': [
{
'column_name': 'a',
'data_type': 'unchanged',
'data_label': [[], ['a'], []],
'categorical': 'unchanged',
'order': ['ascending', 'descending'],
'statistics': {
'numerical_statistics_here':
'unchanged',
'all_compiler_stats':
'unchanged',
'avg_predictions': {'a': 'unchanged'},
'label_representation': {'a': 'unchanged'},
'sample_size': -2,
'null_count': -1,
'null_types': [[], [], ['nan']],
'null_types_index': [{}, {}, {'nan': {2}}],
'data_type_representation': {
'all_data_types': 'unchanged'
}
}
},
{
'column_name': 'b',
'data_type': 'unchanged',
'data_label': [[], ['a'], []],
'categorical': 'unchanged',
'order': ['ascending', 'descending'],
'statistics': {
'numerical_statistics_here': 'unchanged',
'all_compiler_stats': 'unchanged',
'avg_predictions': {'a': 'unchanged'},
'label_representation': {'a': 'unchanged'},
'sample_size': -2,
'null_count': -1,
'null_types': [[], [], ['nan']],
'null_types_index': [{}, {}, {'nan': {2}}],
'data_type_representation': {
'all_data_types': 'unchanged'
}
}
}
]
}
diff = profile1.diff(profile2)
expected_corr_mat = expected_diff["global_stats"].pop("correlation_matrix")
diff_corr_mat = diff["global_stats"].pop("correlation_matrix")
expected_chi2_mat = expected_diff["global_stats"].pop("chi2_matrix")
diff_chi2_mat = diff["global_stats"].pop("chi2_matrix")
np.testing.assert_array_almost_equal(expected_corr_mat, diff_corr_mat)
np.testing.assert_array_almost_equal(expected_chi2_mat, diff_chi2_mat)
self.assertDictEqual(expected_diff, diff)
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
@mock.patch("dataprofiler.profilers.data_labeler_column_profile."
"DataLabelerColumn.update")
def test_diff_type_checking(self, *mocks):
data = pd.DataFrame([[1, 2], [5, 6]],
columns=["a", "b"])
profile = dp.StructuredProfiler(data)
with self.assertRaisesRegex(TypeError,
'`StructuredProfiler` and `str` are not of '
'the same profiler type.'):
profile.diff("ERROR")
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
@mock.patch("dataprofiler.profilers.data_labeler_column_profile."
"DataLabelerColumn.update")
def test_diff_with_different_schema(self, *mocks):
data1 = pd.DataFrame([[1, 2], [5, 6]],
columns=["G", "b"])
data2 = pd.DataFrame([[4, 3, 1], [8, 7, 3], [None, None, 1], [9, 1, 10]],
columns=["a", "b", "c"])
# Test via add
profile1 = dp.StructuredProfiler(data1)
profile2 = dp.StructuredProfiler(data2)
expected_diff = {
'global_stats': {
'file_type': 'unchanged',
'encoding': 'unchanged',
'samples_used': -2,
'column_count': -1,
'row_count': -2,
'row_has_null_ratio': -0.25,
'row_is_null_ratio': 'unchanged',
'unique_row_ratio': 'unchanged',
'duplicate_row_count': 'unchanged',
'correlation_matrix': None,
'chi2_matrix': None,
'profile_schema': [{'G': [0]},
{'b': 'unchanged'},
{'a': [0], 'c': [2]}]},
'data_stats': []
}
self.assertDictEqual(expected_diff, profile1.diff(profile2))
@mock.patch("dataprofiler.profilers.data_labeler_column_profile."
"DataLabelerColumn.update")
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
@mock.patch("dataprofiler.profilers.column_profile_compilers."
"ColumnPrimitiveTypeProfileCompiler.diff")
@mock.patch("dataprofiler.profilers.column_profile_compilers."
"ColumnStatsProfileCompiler.diff")
@mock.patch("dataprofiler.profilers.column_profile_compilers."
"ColumnDataLabelerCompiler.diff")
@mock.patch("sys.stderr", new_callable=StringIO)
def test_logs(self, mock_stderr, *mocks):
options = StructuredOptions()
options.multiprocess.is_enabled = False
# Capture logs of level INFO and above
with self.assertLogs('DataProfiler.profilers.profile_builder',
level='INFO') as logs:
StructuredProfiler(pd.DataFrame([[0, 1], [2, 3]]), options=options)
# Logs to update user on nulls and statistics
self.assertEqual(['INFO:DataProfiler.profilers.profile_builder:'
'Finding the Null values in the columns... ',
'INFO:DataProfiler.profilers.profile_builder:'
'Calculating the statistics... '],
logs.output)
# Ensure tqdm printed progress bar
self.assertIn('#' * 10, mock_stderr.getvalue())
# Clear stderr
mock_stderr.seek(0)
mock_stderr.truncate(0)
# Now tqdm shouldn't be printed
dp.set_verbosity(logging.WARNING)
StructuredProfiler(pd.DataFrame([[0, 1], [2, 3]]))
# Ensure no progress bar printed
self.assertNotIn('#' * 10, mock_stderr.getvalue())
def test_unique_row_ratio_empty_profiler(self):
profiler = StructuredProfiler(pd.DataFrame([]))
self.assertEqual(0, profiler._get_unique_row_ratio())
class TestStructuredColProfilerClass(unittest.TestCase):
def setUp(self):
test_utils.set_seed(seed=0)
@classmethod
def setUpClass(cls):
test_utils.set_seed(seed=0)
cls.input_file_path = os.path.join(
test_root_path, 'data', 'csv/aws_honeypot_marx_geo.csv'
)
cls.aws_dataset = pd.read_csv(cls.input_file_path)
def test_base_props(self):
src_column = self.aws_dataset.src
src_profile = StructuredColProfiler(
src_column, sample_size=len(src_column))
self.assertIsInstance(src_profile.profiles['data_type_profile'],
ColumnPrimitiveTypeProfileCompiler)
self.assertIsInstance(src_profile.profiles['data_stats_profile'],
ColumnStatsProfileCompiler)
self.assertIsInstance(src_profile.profiles['data_label_profile'],
ColumnDataLabelerCompiler)
data_types = ['int', 'float', 'datetime', 'text']
six.assertCountEqual(
self, data_types,
list(src_profile.profiles['data_type_profile']._profiles.keys())
)
stats_types = ['category', 'order']
six.assertCountEqual(
self, stats_types,
list(src_profile.profiles['data_stats_profile']._profiles.keys())
)
self.assertEqual(3, src_profile.null_count)
self.assertEqual(2999, src_profile.sample_size)
total_nulls = 0
for _, null_rows in src_profile.null_types_index.items():
total_nulls += len(null_rows)
self.assertEqual(3, total_nulls)
# test updated base props with batch addition
src_profile.update_profile(src_column)
src_profile.update_profile(src_column)
self.assertEqual(3*3, src_profile.null_count)
self.assertEqual(2999*3, src_profile.sample_size)
@mock.patch('dataprofiler.profilers.column_profile_compilers.'
'ColumnPrimitiveTypeProfileCompiler')
@mock.patch('dataprofiler.profilers.column_profile_compilers.'
'ColumnStatsProfileCompiler')
@mock.patch('dataprofiler.profilers.column_profile_compilers.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._update_correlation')
def test_add_profilers(self, *mocks):
data = pd.Series([1, None, 3, 4, 5, None])
profile1 = StructuredColProfiler(data[:2])
profile2 = StructuredColProfiler(data[2:])
# test incorrect type
with self.assertRaisesRegex(TypeError,
'`StructuredColProfiler` and `int` are '
'not of the same profiler type.'):
profile1 + 3
# test mismatched names
profile1.name = 'profile1'
profile2.name = 'profile2'
with self.assertRaisesRegex(ValueError,
'Structured profile names are unmatched: '
'profile1 != profile2'):
profile1 + profile2
# test mismatched profiles due to options
profile2.name = 'profile1'
profile1._profiles = dict(test1=mock.Mock())
profile2.profiles.pop('data_label_profile')
with self.assertRaisesRegex(ValueError,
'Structured profilers were not setup with '
'the same options, hence they do not '
'calculate the same profiles and cannot be '
'added together.'):
profile1 + profile2
# test success
profile1.profiles = dict(test=1)
profile2.profiles = dict(test=2)
merged_profile = profile1 + profile2
self.assertEqual(3, merged_profile.profiles['test'])
self.assertCountEqual(['5.0', '4.0', '3.0', '1.0'], merged_profile.sample)
self.assertEqual(6, merged_profile.sample_size)
self.assertEqual(2, merged_profile.null_count)
self.assertListEqual(['nan'], merged_profile.null_types)
self.assertDictEqual({'nan': {1, 5}}, merged_profile.null_types_index)
# test add with different sampling properties
profile1._min_sample_size = 10
profile2._min_sample_size = 100
profile1._sampling_ratio = 0.5
profile2._sampling_ratio = 0.3
profile1._min_true_samples = 11
profile2._min_true_samples = 1
merged_profile = profile1 + profile2
self.assertEqual(100, merged_profile._min_sample_size)
self.assertEqual(0.5, merged_profile._sampling_ratio)
self.assertEqual(11, merged_profile._min_true_samples)
def test_integrated_merge_diff_options(self):
options = dp.ProfilerOptions()
options.set({'data_labeler.is_enabled': False})
data = pd.DataFrame([1, 2, 3, 4])
profile1 = dp.StructuredProfiler(data, options=options)
profile2 = dp.StructuredProfiler(data)
with self.assertRaisesRegex(ValueError,
'Structured profilers were not setup with '
'the same options, hence they do not '
'calculate the same profiles and cannot be '
'added together.'):
profile1 + profile2
def test_clean_data_and_get_base_stats(self, *mocks):
data = pd.Series([1, None, 3, 4, None, 6],
index=['a', 'b', 'c', 'd', 'e', 'f'])
# validate that if sliced data, still functional
# previously `iloc` was used at:
# `df_series = df_series.loc[sorted(true_sample_list)]`
# which caused errors
#Tests with default null values set
profiler = mock.Mock(spec=StructuredColProfiler)
null_values = {
"": 0,
"nan": re.IGNORECASE,
"none": re.IGNORECASE,
"null": re.IGNORECASE,
" *": 0,
"--*": 0,
"__*": 0,
}
test_utils.set_seed(seed=0)
df_series, base_stats = \
StructuredColProfiler.clean_data_and_get_base_stats(
df_series=data[1:], sample_size=6, null_values=null_values,
min_true_samples=0)
# note data above is a subset `df_series=data[1:]`, 1.0 will not exist
self.assertTrue(np.issubdtype(np.object_, df_series.dtype))
self.assertDictEqual({'sample': ['4.0', '6.0', '3.0'],
'sample_size': 5, 'null_count': 2,
'null_types': dict(nan=['e', 'b']),
'min_id': None, 'max_id': None}, base_stats)
# Tests with some other null values set
null_values = {
"1.0": 0,
"3.0": 0
}
df_series, base_stats = \
StructuredColProfiler.clean_data_and_get_base_stats(
df_series=data, sample_size=6, null_values=null_values,
min_true_samples=0)
self.assertDictEqual({'sample': ["nan", '6.0', '4.0', "nan"],
'sample_size': 6, 'null_count': 2,
'null_types': {'1.0': ['a'], '3.0': ['c']},
'min_id': None, 'max_id': None}, base_stats)
# Tests with no null values set
null_values = {}
df_series, base_stats = \
StructuredColProfiler.clean_data_and_get_base_stats(
df_series=data, sample_size=6, null_values=null_values,
min_true_samples=0)
self.assertDictEqual({'sample': ["3.0", "4.0", '6.0', "nan", "1.0"],
'sample_size': 6, 'null_count': 0,
'null_types': {},
'min_id': None, 'max_id': None}, base_stats)
def test_column_names(self):
data = [['a', 1], ['b', 2], ['c', 3]]
df = pd.DataFrame(data, columns=['letter', 'number'])
profile1 = StructuredColProfiler(df['letter'])
profile2 = StructuredColProfiler(df['number'])
self.assertEqual(profile1.name, 'letter')
self.assertEqual(profile2.name, 'number')
df_series = pd.Series([1, 2, 3, 4, 5])
profile = StructuredColProfiler(df_series)
self.assertEqual(profile.name, df_series.name)
# Ensure issue raised
profile = StructuredColProfiler(df['letter'])
with self.assertRaises(ValueError) as context:
profile.update_profile(df['number'])
self.assertTrue(
'Column names have changed, col number does not match prior name letter',
context
)
def test_update_match_are_abstract(self):
six.assertCountEqual(
self,
{'profile', '_update_helper', 'update'},
dp.profilers.BaseColumnProfiler.__abstractmethods__
)
def test_data_labeler_toggle(self):
src_column = self.aws_dataset.src
structured_options = StructuredOptions()
structured_options.data_labeler.is_enabled = False
std_profile = StructuredColProfiler(src_column,
sample_size=len(src_column))
togg_profile = StructuredColProfiler(src_column,
sample_size=len(src_column),
options=structured_options)
self.assertIn('data_label_profile', std_profile.profiles)
self.assertNotIn('data_label_profile', togg_profile.profiles)
def test_null_count(self):
column = pd.Series([1, float('nan')] * 10)
# test null_count when full sample size
random.seed(0)
profile = StructuredColProfiler(column, sample_size=len(column))
self.assertEqual(10, profile.null_count)
def test_generating_report_ensure_no_error(self):
file_path = os.path.join(test_root_path, 'data', 'csv/diamonds.csv')
data = pd.read_csv(file_path)
profile = dp.StructuredProfiler(data[:1000])
readable_report = profile.report(
report_options={"output_format": "compact"})
def test_get_sample_size(self):
data = pd.DataFrame([0] * int(50e3))
# test data size < min_sample_size = 5000 by default
profiler = dp.StructuredProfiler(pd.DataFrame([]))
profiler._min_sample_size = 5000
profiler._sampling_ratio = 0.2
sample_size = profiler._get_sample_size(data[:1000])
self.assertEqual(1000, sample_size)
# test data size * 0.20 < min_sample_size < data size
sample_size = profiler._get_sample_size(data[:10000])
self.assertEqual(5000, sample_size)
# test min_sample_size > data size * 0.20
sample_size = profiler._get_sample_size(data)
self.assertEqual(10000, sample_size)
# test min_sample_size > data size * 0.10
profiler._sampling_ratio = 0.5
sample_size = profiler._get_sample_size(data)
self.assertEqual(25000, sample_size)
@mock.patch('dataprofiler.profilers.profile_builder.StructuredProfiler.'
'_update_profile_from_chunk')
def test_sample_size_passed_to_profile(self, *mocks):
update_mock = mocks[0]
# data setup
data = pd.DataFrame([0] * int(50e3))
# option setup
profiler_options = ProfilerOptions()
profiler_options.structured_options.multiprocess.is_enabled = False
profiler_options.set({'data_labeler.is_enabled': False})
# test data size < min_sample_size = 5000 by default
profiler = dp.StructuredProfiler(data[:1000], options=profiler_options)
profiler._min_sample_size = 5000
profiler._sampling_ratio = 0.2
self.assertEqual(1000, update_mock.call_args[0][1])
# test data size * 0.20 < min_sample_size < data size
profiler = dp.StructuredProfiler(data[:10000], options=profiler_options)
profiler._min_sample_size = 5000
profiler._sampling_ratio = 0.2
self.assertEqual(5000, update_mock.call_args[0][1])
# test min_sample_size > data size * 0.20
profiler = dp.StructuredProfiler(data, options=profiler_options)
profiler._min_sample_size = 5000
profiler._sampling_ratio = 0.2
self.assertEqual(10000, update_mock.call_args[0][1])
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnPrimitiveTypeProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnStatsProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
def test_index_overlap_for_update_profile(self, *mocks):
data = pd.Series([0, None, 1, 2, None])
profile = StructuredColProfiler(data)
self.assertEqual(0, profile._min_id)
self.assertEqual(4, profile._max_id)
self.assertDictEqual(profile.null_types_index, {'nan': {1, 4}})
profile.update_profile(data)
# Now all indices will be shifted by max_id + 1 (5)
# So the 2 None will move from indices 1, 4 to 6, 9
self.assertEqual(0, profile._min_id)
self.assertEqual(9, profile._max_id)
self.assertDictEqual(profile.null_types_index, {'nan': {1, 4, 6, 9}})
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnPrimitiveTypeProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnStatsProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
def test_index_overlap_for_merge(self, *mocks):
data = pd.Series([0, None, 1, 2, None])
profile1 = StructuredColProfiler(data)
profile2 = StructuredColProfiler(data)
# Ensure merged profile included shifted indices
profile3 = profile1 + profile2
self.assertEqual(0, profile3._min_id)
self.assertEqual(9, profile3._max_id)
self.assertDictEqual(profile3.null_types_index, {'nan': {1, 4, 6, 9}})
# Ensure original profiles not overwritten
self.assertEqual(0, profile1._min_id)
self.assertEqual(4, profile1._max_id)
self.assertDictEqual(profile1.null_types_index, {'nan': {1, 4}})
self.assertEqual(0, profile2._min_id)
self.assertEqual(4, profile2._max_id)
self.assertDictEqual(profile2.null_types_index, {'nan': {1, 4}})
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnPrimitiveTypeProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnStatsProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
def test_min_max_id_properly_update(self, *mocks):
data = pd.Series([1, None, 3, 4, 5, None, 1])
profile1 = StructuredColProfiler(data[:2])
profile2 = StructuredColProfiler(data[2:])
# Base initialization
self.assertEqual(0, profile1._min_id)
self.assertEqual(1, profile1._max_id)
self.assertEqual(2, profile2._min_id)
self.assertEqual(6, profile2._max_id)
# Needs to work with merge
profile3 = profile1 + profile2
self.assertEqual(0, profile3._min_id)
self.assertEqual(6, profile3._max_id)
# Needs to work with update_profile
profile = StructuredColProfiler(data[:2])
profile.update_profile(data[2:])
self.assertEqual(0, profile._min_id)
self.assertEqual(6, profile._max_id)
@mock.patch('dataprofiler.profilers.data_labeler_column_profile.DataLabeler')
@mock.patch("dataprofiler.profilers.data_labeler_column_profile."
"DataLabelerColumn.update")
@mock.patch("dataprofiler.profilers.column_profile_compilers."
"ColumnPrimitiveTypeProfileCompiler.diff")
@mock.patch("dataprofiler.profilers.column_profile_compilers."
"ColumnStatsProfileCompiler.diff")
@mock.patch("dataprofiler.profilers.column_profile_compilers."
"ColumnDataLabelerCompiler.diff")
def test_diff(self, *mocks):
# Data labeler compiler diff
mocks[0].return_value = {
'statistics': {
'avg_predictions': {
'a': 'unchanged'
},
'label_representation': {
'a': 'unchanged'
}
},
'data_label': [[], ['a'], []]
}
# stats compiler diff
mocks[1].return_value = {
'order': ['ascending', 'descending'],
'categorical': 'unchanged',
'statistics': {
'all_compiler_stats': 'unchanged'
}
}
# primitive stats compiler diff
mocks[2].return_value = {
'data_type_representation': {
'all_data_types': 'unchanged'
},
'data_type': 'unchanged',
'statistics': {
'numerical_statistics_here': "unchanged"
}
}
data = pd.Series([1, None, 3, 4, 5, None, 1])
data2 = pd.Series(["hello", "goodby", 125, 0])
data.name = "TEST"
data2.name = "TEST"
profile1 = StructuredColProfiler(data)
profile2 = StructuredColProfiler(data2)
expected_diff = {
'column_name': 'TEST',
'data_type': 'unchanged',
'data_label': [[], ['a'], []],
'categorical': 'unchanged',
'order': ['ascending', 'descending'],
'statistics': {
'numerical_statistics_here': 'unchanged',
'all_compiler_stats': 'unchanged',
'avg_predictions': {'a': 'unchanged'},
'label_representation': {'a': 'unchanged'},
'sample_size': 3,
'null_count': 2,
'null_types': [['nan'], [], []],
'null_types_index': [{'nan': {1, 5}}, {}, {}],
'data_type_representation': {
'all_data_types': 'unchanged'
}
}
}
self.assertDictEqual(expected_diff, dict(profile1.diff(profile2)))
@mock.patch('dataprofiler.profilers.profile_builder.UnstructuredCompiler',
spec=UnstructuredCompiler)
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler',
spec=UnstructuredDataLabeler)
class TestUnstructuredProfiler(unittest.TestCase):
@classmethod
def setUp(cls):
test_utils.set_seed(seed=0)
def test_base(self, *mocks):
# ensure can make an empty profiler
profiler = UnstructuredProfiler(None)
self.assertIsNone(profiler.encoding)
self.assertIsNone(profiler.file_type)
self.assertIsNone(profiler._profile)
self.assertIsNone(profiler._samples_per_update)
self.assertEqual(0, profiler._min_true_samples)
self.assertEqual(0, profiler.total_samples)
self.assertEqual(0, profiler._empty_line_count)
self.assertEqual(0, profiler.memory_size)
self.assertEqual(0.2, profiler._sampling_ratio)
self.assertEqual(5000, profiler._min_sample_size)
self.assertEqual([], profiler.sample)
self.assertIsInstance(profiler.options, UnstructuredOptions)
self.assertDictEqual({}, profiler.times)
# can set samples_per_update and min_true_samples
profiler = UnstructuredProfiler(None, samples_per_update=10,
min_true_samples=5)
self.assertEqual(profiler._samples_per_update, 10)
self.assertEqual(profiler._min_true_samples, 5)
# can properties update correctly for data
data = | pd.Series(['this', 'is my', '\n\r', 'test']) | pandas.Series |
import pandas as pd
import numpy as np
from datetime import datetime
def consolidate():
#################################################################################
# Read in Data
bene_train = pd.read_csv('./data/Train_Beneficiary.csv')
inpat_train = pd.read_csv('./data/Train_Inpatient.csv')
outpat_train = pd.read_csv('./data/Train_Outpatient.csv')
target_train = | pd.read_csv('./data/Train.csv') | pandas.read_csv |
"""
the battery.py document contains the battery class which sets up the test battery and runs the different components
and the pipe class which handles the tracking and support for MP runs
maybe it would make sense to put the folder dict here and pass it on to make it more consistent
currently it's in basetest and savingpipe, which is a potential cause for errors if they are not the same
battery class has all the methods for running the entire battery of tests
can be run single thread and MP
MP will produce compatibility issues, but is a lot faster
battery has PySD helper now included
battery mainly does:
- set up of test list
- writing of pipe line files (for MP) or handling of pipe line (for non MP)
- determining the MP settings
Version 0.3
Update 30.07.18/sk
"""
import os
import pandas as pd
import shutil
import datetime
import pickle
import subprocess
from configparser import ConfigParser
from tb import descriptives as desc
from tb.tests import Sensitivity, MonteCarlo, Equilibrium, TimeStep, Switches, Distance, KnockOut, Extreme, Horizon
from tb.tb_backend.builder import Batch, ExecFile
from tb.tb_backend.pysdhelper import PysdHelper
from tb.tb_backend.report import Report
class Battery:
"""
Battery handles the setting up and execution of the tests
"""
def __init__(self, folder, mp_setting=True, first=None, last=None, distance=False, knockout=False):
# total elapsed is currently broken, need to be added again
self.total_elapsed = 0
# test list is initialized for adding the tests later on
self.test_lst = []
self.file_lst = []
# these are the settings that come from the testingbattery master file
self.folder = folder
self.first_file = first
self.last_file = last
self.mp_setting = mp_setting
# distance is not in the config file because it's currently useless
self.distance = distance
# knockout is not in the config file because it's currently wrong 18.06.18/sk
self.knockout = knockout
# defining the report and debug folder because folder dicts are limited to tests (defined in base test)
# could also be passed from here, but well...
self.report_folder = os.path.join(os.path.split(self.folder)[0], 'report', os.path.split(self.folder)[1])
self.debug_folder = os.path.join(self.report_folder, '_debug', '_runtime')
# config file is defined and read in here
self.cf = ConfigParser()
# config folder doesn't change, so it's put here to it's on one line
self.cf.read(os.path.join(os.path.split(folder)[0], '_config', 'tb_config.ini'))
# reading in the values from the config file for test parameters
self.sensitivity_percentage = self.cf['test parameters'].getfloat('sensitivity_percentage', fallback=0.1)
self.montecarlo_percentage = self.cf['test parameters'].getfloat('montecarlo_percentage', fallback=0.5)
self.montecarlo_runs = self.cf['test parameters'].getint('montecarlo_runs', fallback=100)
self.equilibrium_method = self.cf['test parameters'].getint('equilibrium_method', fallback=1)
self.equilibrium_increment = self.cf['test parameters'].getfloat('equilibrium_increment', fallback=0.2)
self.equilibrium_incset = self.cf['test parameters'].getboolean('equilibrium_incset', fallback=True)
self.extreme_max = self.cf['test parameters'].getint('extreme_max', fallback=10)
# reading in the setting for the pysd helper from the config file
self.pysdhelper_setting = self.cf['component control'].getboolean('PySD_helper', fallback=True)
# getting the translation setting for the initialization
self.set_trans = self.cf['component control'].getboolean('translation', fallback=True)
# getting the mode as well, is needed for the clean up of some .csv files
self.testing_mode = self.cf['testing'].getboolean('testing_mode')
# define the MP settings, if MP is true, then it is overwritten
self.cores = 'no MP'
self.processes = 'no MP'
self.max_tasks = 'no MP'
# initialization methods #
def clear_reports(self):
"""
clears out the reports (html files) from the source folder
:return:
"""
rep_lst = [f for f in os.listdir(self.folder) if f.endswith('.html')]
for file in rep_lst:
os.remove(os.path.join(self.folder, file))
def run_report(self):
"""
Run report is used when the PySD helper is on to extract statistics from the original file,
prior to the pysd helper adjustments
the method is in descriptives.py and should be rewritten at some point
:return:
"""
desc.init(self.folder)
trans_elapsed = desc.create_report(self.first_file, self.last_file)
self.total_elapsed += trans_elapsed
def run_pysdhelper(self):
"""
run pysd helper is the pysd helper integration and runs it on all models that are in the testing folder
previously treated models are not rerun again
:return:
"""
file_lst = [f for f in os.listdir(self.folder) if f.endswith('.mdl')]
# this could potentially lead to the result that a model that has the original file name ending with treated,
# not being treated, but then just rename and it works
file_lst = [f for f in file_lst if not f.endswith('_treated.mdl')]
for file in file_lst:
model = PysdHelper(self.folder, file)
model.run_helper()
def clear_error_files(self):
"""
error files are only removed when a new model is translated, presumably after new model changes have been done
this allows running tests independently on the same translated version while keeping the errors
:return:
"""
file_lst = [f for f in os.listdir(self.folder) if f.endswith('.mdl')]
for file in file_lst:
try:
os.remove(os.path.join(self.folder, file.rsplit('.', 1)[0], 'error_file.csv'))
except FileNotFoundError:
pass
def run_translate(self):
"""
run translate calls the full translate method from the descriptives.py and is run after the pysd helper
:return:
"""
if not self.pysdhelper_setting:
desc.init(self.folder)
trans_elapsed = desc.full_translate(self.first_file, self.last_file)
# currently the time elapsed is broken
self.total_elapsed += trans_elapsed
def load_files(self):
"""
load files gets the file list after the translation for the tests
needs to be expanded to xmile types when xmile translation is done
:return: list of files
"""
# first we load the .mdl files then rename it to py because if there are macros,
# pysd will create .py files in the same folder which creates problems in the testing of the models
self.file_lst = [f for f in os.listdir(self.folder) if f.endswith('.mdl')]
self.file_lst = [f.replace('.mdl', '.py') for f in self.file_lst]
def clear_base(self):
"""
Removes the reporting and result files in the base folder (each models individual results folder)
:return:
"""
for file in self.file_lst:
try:
olfiles = [f for f in os.listdir(os.path.join(self.folder, file.rsplit('.', 1)[0])) if
f.endswith('.csv')]
except FileNotFoundError:
olfiles = []
if olfiles:
for olfile in olfiles:
# error file needs to be kept because it might have errors from translation in it
if not olfile == 'error_file.csv':
os.remove(os.path.join(self.folder, file.rsplit('.', 1)[0], olfile))
def init_reports(self):
"""
initializes the report page
since name of the model changes if pysd helper is active, the information is saved in different folders
to make sure it all drops to the same model, the statistics, the psyd helper actions and the working model
are added to the report based on pickled report tuples
:return:
"""
# grabbing the pickle file list from the report folder
pfile_lst = [f for f in os.listdir(self.report_folder) if f.endswith('.pickle')]
for file in self.file_lst:
print('Creating report for', file)
report = Report(self.folder, file)
# setting the styles to make sure the tables all look the same
report.set_styles()
report.write_quicklinks()
# pickle files that are for this model are selected here
rep_lst = [f for f in pfile_lst if f.startswith(file.replace('_treated', '').rsplit('.', 1)[0])]
for rep in rep_lst:
# if there are 3 pickle files, that means that the pysd helper has been run and thus it makes sense to
# report the original model, if not, it doesn't make sense because it's the same as the working model
if len(rep_lst) == 3:
if rep.endswith('orig.pickle'):
pickle_in = open(os.path.join(self.report_folder, rep), 'rb')
rep_tpl = pickle.load(pickle_in)
report.write_trans(rep_tpl)
if rep.endswith('helper.pickle'):
pickle_in = open(os.path.join(self.report_folder, rep), 'rb')
rep_tpl = pickle.load(pickle_in)
report.write_helper(rep_tpl)
# an argument could be made that the working model doesn't need to be reported when the original model
# is reported, since the changes likely are marginal in terms of numbers, but it's not that much to do,
# so we'll just leave it
if rep.endswith('work.pickle'):
pickle_in = open(os.path.join(self.report_folder, rep), 'rb')
rep_tpl = pickle.load(pickle_in)
report.write_trans(rep_tpl)
report.save_report()
def set_mp_settings(self):
"""
determines the number of CPUs used and creates the settings for the test execution
is only run if the mp setting is on
values are set to 'No MP' unless this method is run
:return:
"""
# getting the MP settings regardless of MP setting
cpu_cnt = os.cpu_count()
# 0.69 is set to keep some cpu power for regular operation but still using as much power as possible for
# calculations
mp_cpu = round(cpu_cnt * 0.69)
self.cores = mp_cpu
# processes could probably be higher, but for now lets keep it at cpu number
self.processes = mp_cpu
# max tasks defines the limit until a child is relaunched
# value is set pretty randomly, if RAM issues are reported, this needs to be lowered
self.max_tasks = 1000
def prep_test_lst(self):
"""
component control, true means test is run
setting comes from config file, defaults to True
:return:
"""
equilibrium = self.cf['component control'].getboolean('equilibrium', fallback=True)
sensitivity = self.cf['component control'].getboolean('sensitivity', fallback=True)
switches = self.cf['component control'].getboolean('switches', fallback=True)
timestep = self.cf['component control'].getboolean('timestep', fallback=True)
montecarlo = self.cf['component control'].getboolean('montecarlo', fallback=True)
extreme = self.cf['component control'].getboolean('extreme', fallback=True)
horizon = self.cf['component control'].getboolean('horizon', fallback=True)
for mdl_file in self.file_lst[self.first_file:self.last_file]:
err_code = 'Translation Error'
try:
# this has to be in the right order for the testing sequence
# if run single thread, the list is going to determine order
# if run with MP, then the tests are run in alphabetical order, thus need to have the test ID
# 02 - 09 are kept open for other test that might be linked to later tests
# 10 - 19 are sensitivity tests
# 20+ are other tests
# 99 is used by the dummy test generator
if equilibrium:
err_code = 'Translation Error equi'
test = Equilibrium(self.folder, mdl_file, self.equilibrium_method, self.equilibrium_increment,
self.equilibrium_incset)
test.testID = '00'
self.test_lst.append(test)
# distance is currently useless, but might be useful for other tests at some point
# distance is currently just handled in testing mode and the setting comes from the testing battery
# contrary to all other tests which get the settings from the config file
if self.distance:
err_code = 'Translation Error dist'
test = Distance(self.folder, mdl_file)
test.testID = '01'
self.test_lst.append(test)
if montecarlo:
err_code = 'Translation Error mc'
test = MonteCarlo(self.folder, mdl_file, self.montecarlo_percentage, self.montecarlo_runs)
test.testID = '10'
self.test_lst.append(test)
if sensitivity:
err_code = 'Translation Error sens'
test = Sensitivity(self.folder, mdl_file, self.sensitivity_percentage)
test.testID = '11'
self.test_lst.append(test)
if extreme:
err_code = 'Translation Error ext'
test = Extreme(self.folder, mdl_file, self.extreme_max)
test.testID = '20'
self.test_lst.append(test)
if timestep:
err_code = 'Translation Error ts'
test = TimeStep(self.folder, mdl_file)
test.testID = '21'
self.test_lst.append(test)
if self.knockout:
err_code = 'Translation Error ko'
test = KnockOut(self.folder, mdl_file)
test.testID = '22'
self.test_lst.append(test)
if horizon:
err_code = 'Translation Error hori'
test = Horizon(self.folder, mdl_file)
test.testID = '23'
self.test_lst.append(test)
if switches:
err_code = 'Translation Error swit'
test = Switches(self.folder, mdl_file)
test.testID = '24'
self.test_lst.append(test)
except Exception as e:
# there should be no errors here but better safe than sorry
f = open(os.path.join(self.folder, 'exec_error_file.txt'), 'a')
f.write('%s, %s : %s\n' % (err_code, str(mdl_file), str(e)))
f.close()
def create_batch_file(self):
"""
creates the batch file for mp file execution
batch file contents are in the builder
:return:
"""
batch = Batch(self.folder)
batch.write_batch()
# run pipe methods #
@staticmethod
def clean_files(folder, ftype):
"""
cleans all files in a folder defined by file extension
:param folder: folder to be cleaned
:param ftype: file type (file extension) to be deleted
:return:
"""
file_lst = [f for f in os.listdir(folder) if f.endswith(ftype)]
for f in file_lst:
os.remove(os.path.join(folder, f))
def report_errors(self):
"""
report files are handled after all tests are executed and added to the report
:return:
"""
for mdl_file in self.file_lst[self.first_file:self.last_file]:
rep = Report(self.folder, mdl_file)
error_link = os.path.join(self.folder, mdl_file.rsplit('.', 1)[0], 'error_file.csv')
try:
error_df = pd.read_csv(error_link, index_col=0)
error_df = error_df.loc[error_df['Error Type'] != 'No Error']
cnts = error_df['Source'].value_counts()
# error link needs relative path to make sure results can be copied to other locations
error_link = error_link.replace(self.folder, '')
except FileNotFoundError:
# if the reading of the error file fails, we pass an emtpy pandas series to the report tuple
cnts = | pd.Series() | pandas.Series |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import scipy.special
import scipy.optimize
import scipy.io
import glob
# Import the project utils
import sys
sys.path.insert(0, '../')
import NB_sortseq_utils as utils
# Import matplotlib stuff for plotting
import matplotlib.pyplot as plt
import matplotlib.cm as cm
# Seaborn, useful for graphics
import seaborn as sns
sns.set_palette("deep", color_codes=True)
utils.set_plotting_style1()
#===============================================================================
# Set output directory based on the graphicspath.tex file to print in dropbox
#===============================================================================
output = 'output_figs/'
#===============================================================================
# Read the data
#===============================================================================
datadir = '../mass_spec/*/'
files = glob.glob(datadir+'*.csv')
df = | pd.DataFrame() | pandas.DataFrame |
import unittest
import pathlib
import os
import pandas as pd
from enda.contracts import Contracts
from enda.timeseries import TimeSeries
class TestContracts(unittest.TestCase):
EXAMPLE_A_DIR = os.path.join(pathlib.Path(__file__).parent.absolute(), "example_a")
CONTRACTS_PATH = os.path.join(EXAMPLE_A_DIR, "contracts.csv")
def test_read_contracts_from_file(self):
contracts = Contracts.read_contracts_from_file(TestContracts.CONTRACTS_PATH)
self.assertEqual((7, 12), contracts.shape)
def test_check_contracts_dates(self):
contracts = Contracts.read_contracts_from_file(
TestContracts.CONTRACTS_PATH,
date_start_col="date_start",
date_end_exclusive_col="date_end_exclusive",
date_format="%Y-%m-%d"
)
# check that it fails if the given date_start_col is not there
with self.assertRaises(ValueError):
Contracts.check_contracts_dates(
contracts,
date_start_col="dummy",
date_end_exclusive_col="date_end_exclusive"
)
# check that it fails if one contract ends before it starts
c = contracts.copy(deep=True)
# set a wrong date_end_exclusive for the first contract
c.loc[0, "date_end_exclusive"] = pd.to_datetime("2020-09-16")
with self.assertRaises(ValueError):
Contracts.check_contracts_dates(
c,
date_start_col="dummy",
date_end_exclusive_col="date_end_exclusive"
)
@staticmethod
def get_simple_portfolio_by_day():
contracts = Contracts.read_contracts_from_file(TestContracts.CONTRACTS_PATH)
contracts["contracts_count"] = 1 # add a variable to count the number of contracts for each row
# count the running total, each day, of some columns
portfolio_by_day = Contracts.compute_portfolio_by_day(
contracts,
columns_to_sum=["contracts_count", "subscribed_power_kva", "estimated_annual_consumption_kwh"],
date_start_col="date_start",
date_end_exclusive_col="date_end_exclusive"
)
return portfolio_by_day
def test_compute_portfolio_by_day_1(self):
"""" test with a single group """
portfolio_by_day = TestContracts.get_simple_portfolio_by_day()
# print(portfolio_by_day)
self.assertEqual((11, 3), portfolio_by_day.shape)
self.assertEqual(4, portfolio_by_day.loc["2020-09-26", "contracts_count"])
self.assertEqual(30, portfolio_by_day.loc["2020-09-26", "subscribed_power_kva"])
self.assertEqual(5, portfolio_by_day["contracts_count"].max())
self.assertEqual(48, portfolio_by_day["subscribed_power_kva"].max())
def test_compute_portfolio_by_day_2(self):
"""" test with 2 groups , and a single measure to sum"""
contracts = Contracts.read_contracts_from_file(TestContracts.CONTRACTS_PATH)
contracts_sm = contracts[contracts["smart_metered"]]
pf_sm = Contracts.compute_portfolio_by_day(contracts_sm, columns_to_sum=["subscribed_power_kva"])
contracts_slp = contracts[~contracts["smart_metered"]]
pf_slp = Contracts.compute_portfolio_by_day(contracts_slp, columns_to_sum=["subscribed_power_kva"])
# print(pf_sm, pf_slp)
self.assertEqual(pf_sm.shape, (5, 1))
self.assertEqual(pf_slp.shape, (11, 1))
self.assertEqual(18, pf_sm.loc["2020-09-20", "subscribed_power_kva"])
self.assertEqual(27, pf_slp.loc["2020-09-26", "subscribed_power_kva"])
def test_get_portfolio_between_dates_1(self):
""" test with a portfolio by day """
portfolio_by_day = TestContracts.get_simple_portfolio_by_day()
self.assertEqual(pd.to_datetime("2020-09-16"), portfolio_by_day.index.min())
self.assertEqual(pd.to_datetime("2020-09-26"), portfolio_by_day.index.max())
pf = Contracts.get_portfolio_between_dates(
portfolio_by_day,
start_datetime=pd.to_datetime("2020-09-10"),
end_datetime_exclusive= | pd.to_datetime("2020-09-30") | pandas.to_datetime |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import scipy.odr
import itertools
def computeModelDetails(frame):
""" Takes a dataframe and computes columns related to the dynamical frb model """
tauwerror_expr = lambda r: 1e3*r['time_res']*np.sqrt(r['max_sigma']**6*r['min_sigma_error']**2*np.cos(r['angle']-np.pi/2)**4 + r['angle_error']**2*r['max_sigma']**2*r['min_sigma']**2*(-r['max_sigma']**2 + r['min_sigma']**2)**2*np.cos(r['angle']-np.pi/2)**2*np.sin(r['angle']-np.pi/2)**2 + r['max_sigma_error']**2*r['min_sigma']**6*np.sin(r['angle']-np.pi/2)**4)/(r['max_sigma']**2*np.cos(r['angle']-np.pi/2)**2 + r['min_sigma']**2*np.sin(r['angle']-np.pi/2)**2)**1.5
frame['drift_abs'] = -1*(frame['drift (mhz/ms)'])
frame['drift_over_nuobs'] = frame[['drift_abs','center_f']].apply(lambda row: row['drift_abs'] / row['center_f'], axis=1)
frame['recip_drift_over_nuobs'] = 1/frame['drift_over_nuobs']
frame['drift_abs_nuobssq'] = frame['drift_abs']/frame['center_f']**2/1000 # unitless
frame['min_sigma'] = frame[['sigmax','sigmay']].apply(lambda row: min(abs(row['sigmax']), abs(row['sigmay'])), axis=1)
frame['max_sigma'] = frame[['sigmax','sigmay']].apply(lambda row: max(abs(row['sigmax']), abs(row['sigmay'])), axis=1)
# the following two lines assume that if sigmax > sigmay, then sigmax_error > sigmay_error, which is true (so far) for this dataset
frame['min_sigma_error'] = frame[['sigmax_error','sigmay_error']].apply(lambda row: min(row['sigmax_error'], row['sigmay_error']), axis=1)
frame['max_sigma_error'] = frame[['sigmax_error','sigmay_error']].apply(lambda row: max(row['sigmax_error'], row['sigmay_error']), axis=1)
frame['sigma_t'] = frame[['min_sigma','time_res']].apply(lambda row: row['min_sigma']*row['time_res'], axis=1)
frame['tau_w'] = frame[['time_res', 'min_sigma', 'max_sigma', 'angle']].apply(
lambda r: r['time_res']*r['min_sigma']*r['max_sigma'] / np.sqrt( np.abs((np.sin(r['angle']-np.pi/2)*r['min_sigma'])**2 + (np.cos(r['angle']-np.pi/2)*r['max_sigma'])**2 )),
axis=1
)
# this error is in ms
frame['tau_w_error'] = frame[['tau_w', 'time_res', 'min_sigma', 'max_sigma', 'min_sigma_error', 'max_sigma_error', 'angle', 'angle_error']].apply(
tauwerror_expr,
axis=1
)
frame['sigma_t_ms'] = frame['sigma_t']*1e3
frame['tau_w_ms'] = frame['tau_w']*1e3
## Redshift corrections
if 'z' in frame.index:
frame['drift_z'] = frame[['drift_over_nuobs', 'z']].apply(lambda row: row['drift_over_nuobs']*(1+row['z']), axis=1)
frame['tau_w_ms_z'] = frame[['tau_w_ms', 'z']].apply(lambda row: row['tau_w_ms']/(1+row['z']), axis=1)
return frame
def cleanAngle(row):
angle = row['angle']
if angle < 0 or angle > np.pi:
if angle > np.pi:
return angle % (np.pi)
elif angle < 0 and angle > -np.pi:
return angle + np.pi
elif angle < 0 and angle < -np.pi:
angle = angle % (2*np.pi)
if angle > np.pi:
return angle - np.pi
else:
return angle
else:
return angle
def atanmodel(B, x):
return np.arctan(x/B[0])
def offset_atanmodel(B, x, zero_ddm_fit=6.554):
return np.arctan(x/zero_ddm_fit) + B[0]
def reciprocal(x, a):
return a/x
def reciprocal_log(x, b):
return -x+b
def log_log(x, k, b):
return k*x+b
def reciprocal_odr(B, x):
return B[0]/x
def reciprocal_odr_log(B, x):
return -x+B[0]
def fitreciprocal(x, data, sigma=1):
guess = [522]
abs_sigma = True
if (type(sigma) == int) and (sigma == 1):
abs_sigma = False
sigma = np.zeros(len(data.ravel())) + sigma
popt, pcov = scipy.optimize.curve_fit(reciprocal, x, data, p0=guess, sigma=sigma, absolute_sigma=abs_sigma)
return popt, pcov
def fitreciprocal_log(x, data, sigma=1, loglog=False):
guess = [522]
abs_sigma = True
if (type(sigma) == int) and (sigma == 1):
abs_sigma = False
sigma = np.zeros(len(data.ravel())) + sigma
if loglog:
guess = [1,1]
popt, pcov = scipy.optimize.curve_fit(log_log, x, data, p0=guess, sigma=sigma, absolute_sigma=abs_sigma)
else:
popt, pcov = scipy.optimize.curve_fit(reciprocal_log, x, data, p0=guess, sigma=sigma, absolute_sigma=abs_sigma)
return popt, pcov
def modelerror(frame):
ex = np.sqrt(frame['red_chisq'])*frame['tau_w_error']
ey = np.sqrt(frame['red_chisq'])*frame['drift error (mhz/ms)']/frame['center_f']
return ex, ey
def rangeerror(frame):
"""
These ranges are not errors in the statistical sense. they are the min/max values, which should
be larger than the real errors. So this is extremely conservative while also being easier
to compute.
The strange shape of the returned value is due to a quirk in the way pandas handles asymmetric
errors.
"""
ex = [np.array([frame['tau_w_ms'] - frame['tw_min'], frame['tw_max'] - frame['tau_w_ms']])]
ey = [np.array([frame['drift_over_nuobs'] - frame['drift_nu_min'], frame['drift_nu_max'] - frame['drift_over_nuobs']])]
return ex, ey
def log_error(frame):
""" see modelerror() """
sx = np.log((frame['tau_w_ms'] + np.sqrt(frame['red_chisq'])*frame['tau_w_error']) / frame['tau_w_ms'])
sy = np.log((frame['drift_over_nuobs'] + np.sqrt(frame['red_chisq'])*(frame['drift error (mhz/ms)'])) / frame['drift_over_nuobs'])
return sx, sy
def rangelog_error(frame):
""" The range errors are asymmetric. Average the error """
ex, ey = rangeerror(frame)
ex = np.log((frame['tau_w_ms'] + (ex[0][0]+ex[0][1])/2 ) / frame['tau_w_ms'])
ey = np.log((frame['drift_over_nuobs'] + (ey[0][0]+ey[0][1])/2) / frame['drift_over_nuobs'])
return ey, ey
# return np.log(np.maximum(ex[0][0], ex[0][1])), np.log(np.maximum(ey[0][0], ey[0][1]))
def rangeerror_odr(frame):
""" The range errors are asymmetric. Take the largest error """
ex, ey = rangeerror(frame)
return np.maximum(ex[0][0], ex[0][1]), np.maximum(ey[0][0], ey[0][1])
def fitodr(frame, beta0=[1000], errorfunc=log_error, log=True):
fit_model = scipy.odr.Model(reciprocal_odr)
fit_model_log = scipy.odr.Model(reciprocal_odr_log)
fitdata = scipy.odr.RealData(frame['tau_w_ms'],
frame['drift_over_nuobs'],
sx=rangeerror_odr(frame)[0],
sy=rangeerror_odr(frame)[1])
fitdata_log = scipy.odr.RealData(np.log(frame['tau_w_ms']),
np.log(frame['drift_over_nuobs']),
sx=errorfunc(frame)[0],
sy=errorfunc(frame)[1])
odrfitter_log = scipy.odr.ODR(fitdata_log, fit_model_log, beta0=beta0)
odrfitter_log.set_job(fit_type=0)
odrfitter = scipy.odr.ODR(fitdata, fit_model, beta0=beta0)
odrfitter.set_job(fit_type=0)
if log:
# print('log odr')
return odrfitter_log.run()
else:
# print('linear odr')
return odrfitter.run()
def driftranges(source):
"""
Given all burst and model data at different trial DMs,
computes the range of drifts durations across the range of trial DMs
"""
yaxis = 'drift_over_nuobs'
xaxis ='tau_w_ms'
for burst in source.index.unique():
burstdf = source.loc[burst]
eduration = np.sqrt(burstdf['red_chisq'])*burstdf['tau_w_error']
edriftnuobs = np.sqrt(burstdf['red_chisq'])*burstdf['drift error (mhz/ms)']/burstdf['center_f']
dmax, dmin = np.max(burstdf[yaxis] + edriftnuobs), np.min(burstdf[yaxis] - edriftnuobs)
tmax, tmin = np.max(burstdf[xaxis] + eduration) , np.min(burstdf[xaxis] - eduration)
source.loc[burst, 'drift_nu_max'] = dmax
source.loc[burst, 'drift_nu_min'] = dmin
source.loc[burst, 'drift_max'] = dmax*burstdf['center_f']
source.loc[burst, 'drift_min'] = dmin*burstdf['center_f']
source.loc[burst, 'tw_max'] = tmax
source.loc[burst, 'tw_min'] = tmin
# print(f'burst: {burst},\t\tdriftrange = ({dmin}, {dmax}),\t\ttwrange = ({tmin}, {tmax})')
return source
def plotDriftVsDuration(frames=[], labels=[], title=None, logscale=True, annotatei=0,
markers=['o', 'p', 'X', 'd', 's'], hidefit=[], hidefitlabel=False,
fitlines=['r-', 'b--', 'g-.'], fitextents=None,
errorfunc=modelerror, fiterrorfunc=rangelog_error, dmtrace=False):
""" wip """
plt.rcParams["errorbar.capsize"] = 4
plt.rcParams["font.family"] = "serif"
markersize = 125#100
fontsize = 25 #18
annotsize = 14
filename = 'log_drift_over_nu_obsvsduration' if logscale else 'drift_over_nu_obsvsduration'
figsize = (17, 8)
figsize = (17, 9)
# figsize = (14, 10)
yaxis = 'drift_over_nuobs'
yaxis_lbl = 'Sub-burst Slope $\\,\\left|\\frac{d\\nu_\\mathrm{obs}}{dt_\\mathrm{D}}\\right|(1/\\nu_{\\mathrm{obs}})$ (ms$^{-1}$)'
# yaxis = 'recip_drift_over_nuobs'
# yaxis_lbl = 'nu_obs / drift'
if type(markers) == list:
markers = itertools.cycle(markers)
if type(fitlines) == list:
fitlines = itertools.cycle(fitlines)
ax = frames[0].plot.scatter(x='tau_w_ms', y=yaxis,
xerr=errorfunc(frames[0])[0],
yerr=errorfunc(frames[0])[1],
figsize=figsize, s=markersize, c='color', colorbar=False, fontsize=fontsize,
logy=logscale, logx=logscale, marker=next(markers), edgecolors='k',
label=labels[0])
for frame, lbl in zip(frames[1:], labels[1:]):
frame.plot.scatter(ax=ax, x='tau_w_ms', y=yaxis,
xerr=errorfunc(frame)[0],
yerr=errorfunc(frame)[1],
figsize=figsize, s=markersize, c='color', colorbar=False, fontsize=fontsize,
logy=logscale, logx=logscale, marker=next(markers), edgecolors='k',
label=lbl)
if type(annotatei) == int:
annotatei =[annotatei]
for ai in annotatei:
if ai < len(frames):
for k, v in frames[ai].iterrows():
if v[yaxis] > 0 or not logscale:
ax.annotate(k, (v['tau_w_ms'], v[yaxis]), xytext=(-3,5),
textcoords='offset points', weight='bold', size=annotsize)
alldata = | pd.concat([f for f in frames]) | pandas.concat |
import numpy as np
import pandas as pd
# The order of the pixel colors - RGB or GRB. Some NeoPixels have red and green reversed!
# For RGBW NeoPixels, simply change the ORDER to RGBW or GRBW.
# NeoPixels must be connected to D10, D12, D18 or D21 to work.
# %%
class Screen:
def __init__(self,
nrows,
ncols,
board="dummy",
alter_rows=True,
):
# The two main dataframes that you'll use:
self.leds = | pd.DataFrame() | pandas.DataFrame |
import random
import unittest
import numpy as np
import pandas as pd
from haychecker.chc.metrics import deduplication
class TestDeduplication(unittest.TestCase):
def test_singlecolumns_empty(self):
df = pd.DataFrame()
df["c1"] = []
df["c2"] = []
r1, r2 = deduplication(["c1", "c2"], df)
self.assertEqual(r1, 100.)
self.assertEqual(r2, 100.)
def test_wholetable_empty(self):
df = pd.DataFrame()
df["c1"] = []
df["c2"] = []
r = deduplication(df=df)[0]
self.assertEqual(r, 100.)
def test_singlecolumns_allsame(self):
df = | pd.DataFrame() | pandas.DataFrame |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], | pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) | pandas.Index |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 22 16:30:38 2019
input/output operation.
@author: zoharslong
"""
from base64 import b64encode, b64decode
from numpy import ndarray as typ_np_ndarray
from pandas.core.series import Series as typ_pd_Series # 定义series类型
from pandas.core.frame import DataFrame as typ_pd_DataFrame # 定义dataframe类型
from pandas.core.indexes.base import Index as typ_pd_Index # 定义dataframe.columns类型
from pandas.core.indexes.range import RangeIndex as typ_pd_RangeIndex # 定义dataframe.index类型
from pandas.core.groupby.generic import DataFrameGroupBy as typ_pd_DataFrameGroupBy # 定义dataframe.groupby类型
from pandas import DataFrame as pd_DataFrame, read_csv, read_excel, concat, ExcelWriter
from time import sleep
from datetime import timedelta as typ_dt_timedelta
from os import listdir, makedirs
from tempfile import gettempdir # 用于搜索fakeuseragent本地temp
from os.path import join as os_join, exists as os_exists
from openpyxl import load_workbook # 保存已有的excel文件中的表
from fake_useragent import UserAgent, VERSION as fku_version # FakeUserAgentError,
from pymongo import MongoClient
from pymongo.errors import DuplicateKeyError
from pymysql import connect, IntegrityError
from urllib3.util.retry import MaxRetryError
from urllib3.response import ProtocolError
from urllib3.connection import NewConnectionError
from requests.models import ChunkedEncodingError
from requests.adapters import ProxyError
from requests import post, get, TooManyRedirects, ReadTimeout
from re import findall as re_find, sub as re_sub
from random import randint
from json import loads, JSONDecodeError
from pyzohar.sub_slt_bsc.bsz import stz, lsz, dcz, dtz
# from socket import getfqdn, gethostname # 获得本机IP # from telnetlib import Telnet # 代理ip有效性检测的第二种方法
class ioBsc(pd_DataFrame):
"""
I/O basic
ioBsc.lcn in {
'fld','fls',
'mng','mdb','cln',
'sql','sdb','tbl',
'url'/'url_lst'/'url_ctt','url_htp',
'hdr','pst','prm',
'prx',‘prx_tms’,
'ppc':{
'key': [],
'ndx': [],
},
}
"""
lst_typ_dts = [
str,
stz,
list,
lsz,
dict,
dcz,
tuple,
bytes,
typ_np_ndarray,
typ_pd_DataFrame,
typ_pd_Series,
typ_pd_Index,
typ_pd_RangeIndex,
typ_pd_DataFrameGroupBy,
type(None)
] # data sets' type
lst_typ_lcn = [list, lsz, dict, dcz, type(None)] # io methods' type
def __init__(self, dts=None, lcn=None, *, spr=False):
# all the i/o operations have the same attributes for locate target data: location and collection
super().__init__() # 不将dts预传入DataFrame
self.__dts, self._dts, self.typ = None, None, None # 接受数据
self.len, self.clm, self.hdr, self.tal = None, None, None, None
self.kys, self.vls = None, None
self.__lcn, self.iot = None, None # 连接信息
self._mySql, self._mySdb, self._myTbl = None, None, None
self._myMng, self._myMdb, self._myCln = None, None, None
self.__init_rst(dts, lcn, spr=spr)
def __init_rst(self, dts=None, lcn=None, *, spr=False):
"""
private reset initiation.
:param dts: a data set to input or output
:return: None
"""
try:
self.dts = dts.copy() if self.dts is None and dts is not None else []
except AttributeError:
self.dts = dts if self.dts is None and dts is not None else []
self.lcn = lcn.copy() if self.lcn is None and lcn is not None else {}
if spr:
self.spr_nit()
def spr_nit(self, rtn=False):
"""
super initiation.
:param rtn: default False
:return:
"""
try:
super(ioBsc, self).__init__(self.__dts)
except ValueError:
print('info: %s cannot convert to DataFrame.' % (str(self.__dts)[:8]+'..'))
if rtn:
return self
def __str__(self):
"""
print(io path).
:return: None
"""
dct_prn = {i: self.lcn[i] for i in self.lcn.keys() if i in ['fls', 'cln', 'tbl', 'url']}
return '<io: %s; ds: %s>' % (str(dct_prn), self.typ)
__repr__ = __str__ # 调用类名的输出与print(className)相同
@property
def dts(self):
"""
@property get & set lsz.seq.
:return: lsz.seq
"""
return self.__dts
@dts.setter
def dts(self, dts):
"""
self.dts = dts
:param dts: a dataset to import.
:return: None
"""
if dts is None or type(dts) in self.lst_typ_dts:
try:
self.__dts = dts.copy() if dts is not None else dts
except AttributeError:
self.__dts = dts
self.__attr_rst('dts')
else:
raise TypeError('info: dts\'s type %s is not available.' % type(dts))
def set_dts(self, dts, *, ndx_rst=True, ndx_lvl=None):
"""
if do not reset index after set data set, use self.set_dts() instead of self.dts
:param dts: data set to fill self.dts
:param ndx_rst: if reset data set's index or not, default True
:param ndx_lvl: DataFrame.reset_index(level=prm), default None
:return: None
"""
if dts is None or type(dts) in self.lst_typ_dts:
try:
self.__dts = dts.copy() if dts is not None else dts
except AttributeError:
self.__dts = dts
self.__attr_rst('dts', ndx_rst=ndx_rst, ndx_lvl=ndx_lvl)
else:
raise TypeError('info: dts\'s type %s is not available.' % type(dts))
@property
def lcn(self):
"""
self.location.
:return: self.__lcn
"""
return self.__lcn
@lcn.setter
def lcn(self, lcn):
"""
set self.__lcn in self.lcn.
:param lcn: a dict of params for self
:return: None
"""
if type(lcn) in self.lst_typ_lcn:
if self.__lcn is None: # 当self.__lcn为空时, 直接对self__lcn进行赋值
self.__lcn = lcn
elif type(lcn) in [dict]: # 当self.__lcn中已有值时, 使用lcn对其进行更新
self.__lcn.update(lcn) # 使用update更新self.__lcn, 要求self.__lcn必为dict类型
self.__attr_rst('lcn')
else:
raise TypeError('info: lcn\'s type %s is not available.' % type(lcn))
def mng_nit(self):
"""
if self.io type in mongodb, reset mongo attributes _myMng, _mySdb, _myCln.
:return: None
"""
if 'mng' not in self.lcn.keys():
self.lcn['mng'] = None
self.lcn['mng'] = "mongodb://localhost:27017" if not self.lcn['mng'] else self.lcn['mng']
self._myMng = MongoClient(host=self.lcn['mng'])
self._myMdb = self._myMng[self.lcn['mdb']] if [True if 'mdb' in self.lcn.keys() else False] else None
self._myCln = self._myMdb[self.lcn['cln']] if [True if 'cln' in self.lcn.keys() else False] else None
def sql_nit(self):
"""
SQL initiate. needs self.lcn={'sql'={'hst','prt','usr','psw'},'sdb','tbl'}
:return: None
"""
if 'sql' not in self.lcn.keys():
self.lcn['sql'] = None
self.lcn['sql'] = {'hst': '172.16.0.13', 'prt': 3306, 'usr': None, 'psw': None} if \
not self.lcn['sql'] else self.lcn['sql']
self._mySql = self.lcn['sql'] if [True if 'sql' in self.lcn.keys() else False] else None
self._mySdb = self.lcn['sdb'] if [True if 'sdb' in self.lcn.keys() else False] else None
self._myTbl = self.lcn['tbl'] if [True if 'tbl' in self.lcn.keys() else False] else None
def api_nit(self):
"""
API initiate. needs self.lcn={'url'/'url_lst'/'url_ctt','pst','hdr','prx','prm'}
:return:
"""
# 检查本地fakeUserAgent文件是否存在, 若否则自动创建
if 'fake_useragent_' + fku_version + '.json' not in listdir(gettempdir()):
fku = get('https://fake-useragent.herokuapp.com/browsers/' + fku_version, timeout=180)
with open(os_join(gettempdir(), 'fake_useragent_' + fku_version + '.json'), "w") as wrt:
wrt.write(fku.text)
if 'pst' not in self.lcn.keys():
self.lcn['pst'] = None # post请求中在请求data中发送的参数数据
if 'hdr' not in self.lcn.keys():
self.lcn['hdr'] = {'User-Agent': UserAgent(use_cache_server=False).random} # 若未指定请求头就现编一个简直可怕
else:
self.lcn['hdr'].update({'User-Agent': UserAgent(use_cache_server=False).random}) # 若制定了则自动刷新一次假头
if 'prx' not in self.lcn.keys():
self.lcn['prx'] = None # 是否调用代理
if 'prm' not in self.lcn.keys():
self.lcn['prm'] = None # get请求中后缀于url的参数
def dts_nit(self, ndx_rst=True, ndx_lvl=None):
"""
dataset initiate, generate attributes typ, len, kys, vls, clm, hdr, tal and if reset index or not.
:param ndx_rst: if reset index or not, default True
:param ndx_lvl: if reset index, set the level of index
:return: None
"""
lst_xcp = []
try:
self.typ = type(self.__dts)
except TypeError:
lst_xcp.append('type')
try:
self.len = self.dts.__len__()
except AttributeError:
lst_xcp.append('len')
try:
self.kys = self.dts.keys()
except (AttributeError, TypeError):
lst_xcp.append('keys')
try:
self.vls = self.dts.values()
except (AttributeError, TypeError):
lst_xcp.append('values')
if self.typ in [typ_pd_DataFrame]:
self.clm = self.dts.columns
self.hdr = self.dts.head()
self.tal = self.dts.tail()
self.hdr = self.dts[:5] if self.typ in [list] else self.hdr
try:
if ndx_rst:
self.dts.reset_index(drop=True, inplace=True, level=ndx_lvl)
except AttributeError:
lst_xcp.append('resetIndex')
if not lst_xcp:
print('info: %s is not available for %s.' % (str(lst_xcp), str(self.__dts)[:8] + '..'))
def lcn_nit(self, prn=False):
"""
location initiate, let self.iot in ['lcl','mng','sql','api'] for [local, mongodb, sql, api].
:return: None
"""
self.iot = []
if [True for i in self.lcn.keys() if i in ['fld']] == [True]:
self.iot.append('lcl')
if [True for i in self.lcn.keys() if i in ['sdb']] == [True]:
self.iot.append('sql')
if [True for i in self.lcn.keys() if i in ['mdb']] == [True]:
self.iot.append('mng')
if set([True for i in self.lcn.keys() if re_find('url', i)]) in [{True}]:
self.iot.append('api')
if not self.iot and prn:
print(' info: <.lcn: %s> is not available.' % self.lcn)
def __attr_rst(self, typ=None, *, ndx_rst=True, ndx_lvl=None):
"""
reset attributes lsz.typ.
:param typ: type of attributes resets, in ['dts','lcn'] for data set reset and location reset
:return: None
"""
if typ in ['dts', None]:
self.dts_nit(ndx_rst, ndx_lvl)
if typ in ['lcn', None]:
self.lcn_nit()
if [True for i in self.iot if i in ['mng', 'mnz']]: # for special cases, reset some attributes
self.mng_nit()
if [True for i in self.iot if i in ['sql', 'sqz']]:
self.sql_nit()
if [True for i in self.iot if i in ['api', 'apz']]:
self.api_nit()
def typ_to_dtf(self, clm=None, *, spr=False, rtn=False):
"""
self.dts's type from others to dataFrame.
:param clm: define the columns' name in the final dataFrame
:param spr: super or not, default False
:param rtn: return or not, default False
:return: None if not rtn
"""
if self.typ in [typ_pd_DataFrame]:
pass
elif self.len == 0 or self.dts in [None, [], [{}]]:
self.dts = pd_DataFrame()
elif self.typ in [dict, dcz]:
self.dts = | pd_DataFrame([self.dts]) | pandas.DataFrame |
from __future__ import division
import logging
import os.path
import pandas as pd
import sys
# #find parent directory and import base (travis)
# parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# sys.path.append(parentddir)
from base.uber_model import UberModel, ModelSharedInputs
from .stir_functions import StirFunctions
class StirInputs(ModelSharedInputs):
"""
Input class for STIR.
"""
def __init__(self):
"""Class representing the inputs for STIR"""
super(StirInputs, self).__init__()
self.application_rate = pd.Series([], dtype="float")
self.column_height = pd.Series([], dtype="float")
self.spray_drift_fraction = pd.Series([], dtype="float")
self.direct_spray_duration = pd.Series([], dtype="float")
self.molecular_weight = pd.Series([], dtype="float")
self.vapor_pressure = pd.Series([], dtype="float")
self.avian_oral_ld50 = pd.Series([], dtype="float")
self.body_weight_assessed_bird = pd.Series([], dtype="float")
self.body_weight_tested_bird = pd.Series([], dtype="float")
self.mineau_scaling_factor = pd.Series([], dtype="float")
self.mammal_inhalation_lc50 = pd.Series([], dtype="float")
self.duration_mammal_inhalation_study = pd.Series([], dtype="float")
self.body_weight_assessed_mammal = pd.Series([], dtype="float")
self.body_weight_tested_mammal = pd.Series([], dtype="float")
self.mammal_oral_ld50 = pd.Series([], dtype="float")
class StirOutputs(object):
"""
Output class for STIR.
"""
def __init__(self):
"""Class representing the outputs for STIR"""
super(StirOutputs, self).__init__()
self.out_sat_air_conc = pd.Series([], dtype="float", name="out_sat_air_conc")
self.out_inh_rate_avian = pd.Series([], dtype="float", name="out_inh_rate_avian")
self.out_vid_avian = pd.Series([], dtype="float", name="out_vid_avian")
self.out_inh_rate_mammal = pd.Series([], dtype="float", name="out_inh_rate_mammal")
self.out_vid_mammal = pd.Series([], dtype="float", name="out_vid_mammal")
self.out_ar2 = pd.Series([], dtype="float", name="out_ar2")
self.out_air_conc = pd.Series([], dtype="float", name="out_air_conc")
self.out_sid_avian = pd.Series([], dtype="float", name="out_sid_avian")
self.out_sid_mammal = pd.Series([], dtype="float", name="out_sid_mammal")
self.out_cf = | pd.Series([], dtype="float", name="out_cf") | pandas.Series |
"""
Test output formatting for Series/DataFrame, including to_string & reprs
"""
from datetime import datetime
from io import StringIO
import itertools
from operator import methodcaller
import os
from pathlib import Path
import re
from shutil import get_terminal_size
import sys
import textwrap
import dateutil
import numpy as np
import pytest
import pytz
from pandas.compat import (
IS64,
is_platform_windows,
)
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
get_option,
option_context,
read_csv,
reset_option,
set_option,
)
import pandas._testing as tm
import pandas.io.formats.format as fmt
import pandas.io.formats.printing as printing
use_32bit_repr = is_platform_windows() or not IS64
@pytest.fixture(params=["string", "pathlike", "buffer"])
def filepath_or_buffer_id(request):
"""
A fixture yielding test ids for filepath_or_buffer testing.
"""
return request.param
@pytest.fixture
def filepath_or_buffer(filepath_or_buffer_id, tmp_path):
"""
A fixture yielding a string representing a filepath, a path-like object
and a StringIO buffer. Also checks that buffer is not closed.
"""
if filepath_or_buffer_id == "buffer":
buf = StringIO()
yield buf
assert not buf.closed
else:
assert isinstance(tmp_path, Path)
if filepath_or_buffer_id == "pathlike":
yield tmp_path / "foo"
else:
yield str(tmp_path / "foo")
@pytest.fixture
def assert_filepath_or_buffer_equals(
filepath_or_buffer, filepath_or_buffer_id, encoding
):
"""
Assertion helper for checking filepath_or_buffer.
"""
def _assert_filepath_or_buffer_equals(expected):
if filepath_or_buffer_id == "string":
with open(filepath_or_buffer, encoding=encoding) as f:
result = f.read()
elif filepath_or_buffer_id == "pathlike":
result = filepath_or_buffer.read_text(encoding=encoding)
elif filepath_or_buffer_id == "buffer":
result = filepath_or_buffer.getvalue()
assert result == expected
return _assert_filepath_or_buffer_equals
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
def has_info_repr(df):
r = repr(df)
c1 = r.split("\n")[0].startswith("<class")
c2 = r.split("\n")[0].startswith(r"<class") # _repr_html_
return c1 or c2
def has_non_verbose_info_repr(df):
has_info = has_info_repr(df)
r = repr(df)
# 1. <class>
# 2. Index
# 3. Columns
# 4. dtype
# 5. memory usage
# 6. trailing newline
nv = len(r.split("\n")) == 6
return has_info and nv
def has_horizontally_truncated_repr(df):
try: # Check header row
fst_line = np.array(repr(df).splitlines()[0].split())
cand_col = np.where(fst_line == "...")[0][0]
except IndexError:
return False
# Make sure each row has this ... in the same place
r = repr(df)
for ix, l in enumerate(r.splitlines()):
if not r.split()[cand_col] == "...":
return False
return True
def has_vertically_truncated_repr(df):
r = repr(df)
only_dot_row = False
for row in r.splitlines():
if re.match(r"^[\.\ ]+$", row):
only_dot_row = True
return only_dot_row
def has_truncated_repr(df):
return has_horizontally_truncated_repr(df) or has_vertically_truncated_repr(df)
def has_doubly_truncated_repr(df):
return has_horizontally_truncated_repr(df) and has_vertically_truncated_repr(df)
def has_expanded_repr(df):
r = repr(df)
for line in r.split("\n"):
if line.endswith("\\"):
return True
return False
@pytest.mark.filterwarnings("ignore::FutureWarning:.*format")
class TestDataFrameFormatting:
def test_eng_float_formatter(self, float_frame):
df = float_frame
df.loc[5] = 0
fmt.set_eng_float_format()
repr(df)
fmt.set_eng_float_format(use_eng_prefix=True)
repr(df)
fmt.set_eng_float_format(accuracy=0)
repr(df)
tm.reset_display_options()
def test_show_null_counts(self):
df = DataFrame(1, columns=range(10), index=range(10))
df.iloc[1, 1] = np.nan
def check(show_counts, result):
buf = StringIO()
df.info(buf=buf, show_counts=show_counts)
assert ("non-null" in buf.getvalue()) is result
with option_context(
"display.max_info_rows", 20, "display.max_info_columns", 20
):
check(None, True)
check(True, True)
check(False, False)
with option_context("display.max_info_rows", 5, "display.max_info_columns", 5):
check(None, False)
check(True, False)
check(False, False)
# GH37999
with tm.assert_produces_warning(
FutureWarning, match="null_counts is deprecated.+"
):
buf = StringIO()
df.info(buf=buf, null_counts=True)
assert "non-null" in buf.getvalue()
# GH37999
with pytest.raises(ValueError, match=r"null_counts used with show_counts.+"):
df.info(null_counts=True, show_counts=True)
def test_repr_truncation(self):
max_len = 20
with option_context("display.max_colwidth", max_len):
df = DataFrame(
{
"A": np.random.randn(10),
"B": [
tm.rands(np.random.randint(max_len - 1, max_len + 1))
for i in range(10)
],
}
)
r = repr(df)
r = r[r.find("\n") + 1 :]
adj = fmt.get_adjustment()
for line, value in zip(r.split("\n"), df["B"]):
if adj.len(value) + 1 > max_len:
assert "..." in line
else:
assert "..." not in line
with option_context("display.max_colwidth", 999999):
assert "..." not in repr(df)
with option_context("display.max_colwidth", max_len + 2):
assert "..." not in repr(df)
def test_repr_deprecation_negative_int(self):
# TODO(2.0): remove in future version after deprecation cycle
# Non-regression test for:
# https://github.com/pandas-dev/pandas/issues/31532
width = get_option("display.max_colwidth")
with tm.assert_produces_warning(FutureWarning):
set_option("display.max_colwidth", -1)
set_option("display.max_colwidth", width)
def test_repr_chop_threshold(self):
df = DataFrame([[0.1, 0.5], [0.5, -0.1]])
reset_option("display.chop_threshold") # default None
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
with option_context("display.chop_threshold", 0.2):
assert repr(df) == " 0 1\n0 0.0 0.5\n1 0.5 0.0"
with option_context("display.chop_threshold", 0.6):
assert repr(df) == " 0 1\n0 0.0 0.0\n1 0.0 0.0"
with option_context("display.chop_threshold", None):
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
def test_repr_chop_threshold_column_below(self):
# GH 6839: validation case
df = DataFrame([[10, 20, 30, 40], [8e-10, -1e-11, 2e-9, -2e-11]]).T
with option_context("display.chop_threshold", 0):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 -1.000000e-11\n"
"2 30.0 2.000000e-09\n"
"3 40.0 -2.000000e-11"
)
with option_context("display.chop_threshold", 1e-8):
assert repr(df) == (
" 0 1\n"
"0 10.0 0.000000e+00\n"
"1 20.0 0.000000e+00\n"
"2 30.0 0.000000e+00\n"
"3 40.0 0.000000e+00"
)
with option_context("display.chop_threshold", 5e-11):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 0.000000e+00\n"
"2 30.0 2.000000e-09\n"
"3 40.0 0.000000e+00"
)
def test_repr_obeys_max_seq_limit(self):
with option_context("display.max_seq_items", 2000):
assert len(printing.pprint_thing(list(range(1000)))) > 1000
with option_context("display.max_seq_items", 5):
assert len(printing.pprint_thing(list(range(1000)))) < 100
with option_context("display.max_seq_items", 1):
assert len(printing.pprint_thing(list(range(1000)))) < 9
def test_repr_set(self):
assert printing.pprint_thing({1}) == "{1}"
def test_repr_is_valid_construction_code(self):
# for the case of Index, where the repr is traditional rather than
# stylized
idx = Index(["a", "b"])
res = eval("pd." + repr(idx))
tm.assert_series_equal(Series(res), Series(idx))
def test_repr_should_return_str(self):
# https://docs.python.org/3/reference/datamodel.html#object.__repr__
# "...The return value must be a string object."
# (str on py2.x, str (unicode) on py3)
data = [8, 5, 3, 5]
index1 = ["\u03c3", "\u03c4", "\u03c5", "\u03c6"]
cols = ["\u03c8"]
df = DataFrame(data, columns=cols, index=index1)
assert type(df.__repr__()) == str # both py2 / 3
def test_repr_no_backslash(self):
with option_context("mode.sim_interactive", True):
df = DataFrame(np.random.randn(10, 4))
assert "\\" not in repr(df)
def test_expand_frame_repr(self):
df_small = DataFrame("hello", index=[0], columns=[0])
df_wide = DataFrame("hello", index=[0], columns=range(10))
df_tall = DataFrame("hello", index=range(30), columns=range(5))
with option_context("mode.sim_interactive", True):
with option_context(
"display.max_columns",
10,
"display.width",
20,
"display.max_rows",
20,
"display.show_dimensions",
True,
):
with option_context("display.expand_frame_repr", True):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_truncated_repr(df_wide)
assert has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert has_expanded_repr(df_tall)
with option_context("display.expand_frame_repr", False):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_horizontally_truncated_repr(df_wide)
assert not has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert not has_expanded_repr(df_tall)
def test_repr_non_interactive(self):
# in non interactive mode, there can be no dependency on the
# result of terminal auto size detection
df = DataFrame("hello", index=range(1000), columns=range(5))
with option_context(
"mode.sim_interactive", False, "display.width", 0, "display.max_rows", 5000
):
assert not has_truncated_repr(df)
assert not has_expanded_repr(df)
def test_repr_truncates_terminal_size(self, monkeypatch):
# see gh-21180
terminal_size = (118, 96)
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
index = range(5)
columns = MultiIndex.from_tuples(
[
("This is a long title with > 37 chars.", "cat"),
("This is a loooooonger title with > 43 chars.", "dog"),
]
)
df = DataFrame(1, index=index, columns=columns)
result = repr(df)
h1, h2 = result.split("\n")[:2]
assert "long" in h1
assert "loooooonger" in h1
assert "cat" in h2
assert "dog" in h2
# regular columns
df2 = DataFrame({"A" * 41: [1, 2], "B" * 41: [1, 2]})
result = repr(df2)
assert df2.columns[0] in result.split("\n")[0]
def test_repr_truncates_terminal_size_full(self, monkeypatch):
# GH 22984 ensure entire window is filled
terminal_size = (80, 24)
df = DataFrame(np.random.rand(1, 7))
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
assert "..." not in str(df)
def test_repr_truncation_column_size(self):
# dataframe with last column very wide -> check it is not used to
# determine size of truncation (...) column
df = DataFrame(
{
"a": [108480, 30830],
"b": [12345, 12345],
"c": [12345, 12345],
"d": [12345, 12345],
"e": ["a" * 50] * 2,
}
)
assert "..." in str(df)
assert " ... " not in str(df)
def test_repr_max_columns_max_rows(self):
term_width, term_height = get_terminal_size()
if term_width < 10 or term_height < 10:
pytest.skip(f"terminal size too small, {term_width} x {term_height}")
def mkframe(n):
index = [f"{i:05d}" for i in range(n)]
return DataFrame(0, index, index)
df6 = mkframe(6)
df10 = mkframe(10)
with option_context("mode.sim_interactive", True):
with option_context("display.width", term_width * 2):
with option_context("display.max_rows", 5, "display.max_columns", 5):
assert not has_expanded_repr(mkframe(4))
assert not has_expanded_repr(mkframe(5))
assert not has_expanded_repr(df6)
assert has_doubly_truncated_repr(df6)
with option_context("display.max_rows", 20, "display.max_columns", 10):
# Out off max_columns boundary, but no extending
# since not exceeding width
assert not has_expanded_repr(df6)
assert not has_truncated_repr(df6)
with option_context("display.max_rows", 9, "display.max_columns", 10):
# out vertical bounds can not result in expanded repr
assert not has_expanded_repr(df10)
assert has_vertically_truncated_repr(df10)
# width=None in terminal, auto detection
with option_context(
"display.max_columns",
100,
"display.max_rows",
term_width * 20,
"display.width",
None,
):
df = mkframe((term_width // 7) - 2)
assert not has_expanded_repr(df)
df = mkframe((term_width // 7) + 2)
printing.pprint_thing(df._repr_fits_horizontal_())
assert has_expanded_repr(df)
def test_repr_min_rows(self):
df = DataFrame({"a": range(20)})
# default setting no truncation even if above min_rows
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
df = DataFrame({"a": range(61)})
# default of max_rows 60 triggers truncation if above
assert ".." in repr(df)
assert ".." in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 4):
# truncated after first two rows
assert ".." in repr(df)
assert "2 " not in repr(df)
assert "..." in df._repr_html_()
assert "<td>2</td>" not in df._repr_html_()
with option_context("display.max_rows", 12, "display.min_rows", None):
# when set to None, follow value of max_rows
assert "5 5" in repr(df)
assert "<td>5</td>" in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 12):
# when set value higher as max_rows, use the minimum
assert "5 5" not in repr(df)
assert "<td>5</td>" not in df._repr_html_()
with option_context("display.max_rows", None, "display.min_rows", 12):
# max_rows of None -> never truncate
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
def test_str_max_colwidth(self):
# GH 7856
df = DataFrame(
[
{
"a": "foo",
"b": "bar",
"c": "uncomfortably long line with lots of stuff",
"d": 1,
},
{"a": "foo", "b": "bar", "c": "stuff", "d": 1},
]
)
df.set_index(["a", "b", "c"])
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably long line with lots of stuff 1\n"
"1 foo bar stuff 1"
)
with option_context("max_colwidth", 20):
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably lo... 1\n"
"1 foo bar stuff 1"
)
def test_auto_detect(self):
term_width, term_height = get_terminal_size()
fac = 1.05 # Arbitrary large factor to exceed term width
cols = range(int(term_width * fac))
index = range(10)
df = DataFrame(index=index, columns=cols)
with option_context("mode.sim_interactive", True):
with option_context("display.max_rows", None):
with option_context("display.max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
with option_context("display.max_rows", 0):
with option_context("display.max_columns", 0):
# Truncate with auto detection.
assert has_horizontally_truncated_repr(df)
index = range(int(term_height * fac))
df = DataFrame(index=index, columns=cols)
with option_context("display.max_rows", 0):
with option_context("display.max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
# Truncate vertically
assert has_vertically_truncated_repr(df)
with option_context("display.max_rows", None):
with option_context("display.max_columns", 0):
assert has_horizontally_truncated_repr(df)
def test_to_string_repr_unicode(self):
buf = StringIO()
unicode_values = ["\u03c3"] * 10
unicode_values = np.array(unicode_values, dtype=object)
df = DataFrame({"unicode": unicode_values})
df.to_string(col_space=10, buf=buf)
# it works!
repr(df)
idx = Index(["abc", "\u03c3a", "aegdvg"])
ser = Series(np.random.randn(len(idx)), idx)
rs = repr(ser).split("\n")
line_len = len(rs[0])
for line in rs[1:]:
try:
line = line.decode(get_option("display.encoding"))
except AttributeError:
pass
if not line.startswith("dtype:"):
assert len(line) == line_len
# it works even if sys.stdin in None
_stdin = sys.stdin
try:
sys.stdin = None
repr(df)
finally:
sys.stdin = _stdin
def test_east_asian_unicode_false(self):
# not aligned properly because of east asian width
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あああああ あ\n"
"bb い いいい\nc う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\nあああ あああああ あ\n"
"いいいいいい い いいい\nうう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n0 あああああ ... さ\n"
".. ... ... ...\n3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\nあああ あああああ ... さ\n"
".. ... ... ...\naaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
def test_east_asian_unicode_true(self):
# Enable Unicode option -----------------------------------------
with option_context("display.unicode.east_asian_width", True):
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\n"
"a あああああ あ\n"
"bb い いいい\n"
"c う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\n"
"あああ あああああ あ\n"
"いいいいいい い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n"
"0 あああああ ... さ\n"
".. ... ... ...\n"
"3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\n"
"あああ あああああ ... さ\n"
"... ... ... ...\n"
"aaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
# ambiguous unicode
df = DataFrame(
{"b": ["あ", "いいい", "¡¡", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "¡¡¡"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c ¡¡ 33333\n"
"¡¡¡ ええええええ 4"
)
assert repr(df) == expected
def test_to_string_buffer_all_unicode(self):
buf = StringIO()
empty = DataFrame({"c/\u03c3": Series(dtype=object)})
nonempty = DataFrame({"c/\u03c3": Series([1, 2, 3])})
print(empty, file=buf)
print(nonempty, file=buf)
# this should work
buf.getvalue()
def test_to_string_with_col_space(self):
df = DataFrame(np.random.random(size=(1, 3)))
c10 = len(df.to_string(col_space=10).split("\n")[1])
c20 = len(df.to_string(col_space=20).split("\n")[1])
c30 = len(df.to_string(col_space=30).split("\n")[1])
assert c10 < c20 < c30
# GH 8230
# col_space wasn't being applied with header=False
with_header = df.to_string(col_space=20)
with_header_row1 = with_header.splitlines()[1]
no_header = df.to_string(col_space=20, header=False)
assert len(with_header_row1) == len(no_header)
def test_to_string_with_column_specific_col_space_raises(self):
df = DataFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
msg = (
"Col_space length\\(\\d+\\) should match "
"DataFrame number of columns\\(\\d+\\)"
)
with pytest.raises(ValueError, match=msg):
df.to_string(col_space=[30, 40])
with pytest.raises(ValueError, match=msg):
df.to_string(col_space=[30, 40, 50, 60])
msg = "unknown column"
with pytest.raises(ValueError, match=msg):
df.to_string(col_space={"a": "foo", "b": 23, "d": 34})
def test_to_string_with_column_specific_col_space(self):
df = DataFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
result = df.to_string(col_space={"a": 10, "b": 11, "c": 12})
# 3 separating space + each col_space for (id, a, b, c)
assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
result = df.to_string(col_space=[10, 11, 12])
assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
def test_to_string_truncate_indices(self):
for index in [
tm.makeStringIndex,
tm.makeUnicodeIndex,
tm.makeIntIndex,
tm.makeDateIndex,
tm.makePeriodIndex,
]:
for column in [tm.makeStringIndex]:
for h in [10, 20]:
for w in [10, 20]:
with option_context("display.expand_frame_repr", False):
df = DataFrame(index=index(h), columns=column(w))
with option_context("display.max_rows", 15):
if h == 20:
assert has_vertically_truncated_repr(df)
else:
assert not has_vertically_truncated_repr(df)
with option_context("display.max_columns", 15):
if w == 20:
assert has_horizontally_truncated_repr(df)
else:
assert not (has_horizontally_truncated_repr(df))
with option_context(
"display.max_rows", 15, "display.max_columns", 15
):
if h == 20 and w == 20:
assert has_doubly_truncated_repr(df)
else:
assert not has_doubly_truncated_repr(df)
def test_to_string_truncate_multilevel(self):
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
df = DataFrame(index=arrays, columns=arrays)
with option_context("display.max_rows", 7, "display.max_columns", 7):
assert has_doubly_truncated_repr(df)
def test_truncate_with_different_dtypes(self):
# 11594, 12045
# when truncated the dtypes of the splits can differ
# 11594
import datetime
s = Series(
[datetime.datetime(2012, 1, 1)] * 10
+ [datetime.datetime(1012, 1, 2)]
+ [datetime.datetime(2012, 1, 3)] * 10
)
with option_context("display.max_rows", 8):
result = str(s)
assert "object" in result
# 12045
df = DataFrame({"text": ["some words"] + [None] * 9})
with option_context("display.max_rows", 8, "display.max_columns", 3):
result = str(df)
assert "None" in result
assert "NaN" not in result
def test_truncate_with_different_dtypes_multiindex(self):
# GH#13000
df = DataFrame({"Vals": range(100)})
frame = pd.concat([df], keys=["Sweep"], names=["Sweep", "Index"])
result = repr(frame)
result2 = repr(frame.iloc[:5])
assert result.startswith(result2)
def test_datetimelike_frame(self):
# GH 12211
df = DataFrame({"date": [Timestamp("20130101").tz_localize("UTC")] + [NaT] * 5})
with option_context("display.max_rows", 5):
result = str(df)
assert "2013-01-01 00:00:00+00:00" in result
assert "NaT" in result
assert "..." in result
assert "[6 rows x 1 columns]" in result
dts = [Timestamp("2011-01-01", tz="US/Eastern")] * 5 + [NaT] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00-05:00 1\n"
"1 2011-01-01 00:00:00-05:00 2\n"
".. ... ..\n"
"8 NaT 9\n"
"9 NaT 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [NaT] * 5 + [Timestamp("2011-01-01", tz="US/Eastern")] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 NaT 1\n"
"1 NaT 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [Timestamp("2011-01-01", tz="Asia/Tokyo")] * 5 + [
Timestamp("2011-01-01", tz="US/Eastern")
] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00+09:00 1\n"
"1 2011-01-01 00:00:00+09:00 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
@pytest.mark.parametrize(
"start_date",
[
"2017-01-01 23:59:59.999999999",
"2017-01-01 23:59:59.99999999",
"2017-01-01 23:59:59.9999999",
"2017-01-01 23:59:59.999999",
"2017-01-01 23:59:59.99999",
"2017-01-01 23:59:59.9999",
],
)
def test_datetimeindex_highprecision(self, start_date):
# GH19030
# Check that high-precision time values for the end of day are
# included in repr for DatetimeIndex
df = DataFrame({"A": date_range(start=start_date, freq="D", periods=5)})
result = str(df)
assert start_date in result
dti = date_range(start=start_date, freq="D", periods=5)
df = DataFrame({"A": range(5)}, index=dti)
result = str(df.index)
assert start_date in result
def test_nonunicode_nonascii_alignment(self):
df = DataFrame([["aa\xc3\xa4\xc3\xa4", 1], ["bbbb", 2]])
rep_str = df.to_string()
lines = rep_str.split("\n")
assert len(lines[1]) == len(lines[2])
def test_unicode_problem_decoding_as_ascii(self):
dm = DataFrame({"c/\u03c3": Series({"test": np.nan})})
str(dm.to_string())
def test_string_repr_encoding(self, datapath):
filepath = datapath("io", "parser", "data", "unicode_series.csv")
df = read_csv(filepath, header=None, encoding="latin1")
repr(df)
repr(df[1])
def test_repr_corner(self):
# representing infs poses no problems
df = DataFrame({"foo": [-np.inf, np.inf]})
repr(df)
def test_frame_info_encoding(self):
index = ["'Til There Was You (1997)", "ldum klaka (Cold Fever) (1994)"]
fmt.set_option("display.max_rows", 1)
df = DataFrame(columns=["a", "b", "c"], index=index)
repr(df)
repr(df.T)
fmt.set_option("display.max_rows", 200)
def test_wide_repr(self):
with option_context(
"mode.sim_interactive",
True,
"display.show_dimensions",
True,
"display.max_columns",
20,
):
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
assert f"10 rows x {max_cols - 1} columns" in rep_str
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 120):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_wide_columns(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
df = DataFrame(
np.random.randn(5, 3), columns=["a" * 90, "b" * 90, "c" * 90]
)
rep_str = repr(df)
assert len(rep_str.splitlines()) == 20
def test_wide_repr_named(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
df.index.name = "DataFrame Index"
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "DataFrame Index" in line
reset_option("display.expand_frame_repr")
def test_wide_repr_multiindex(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)), index=midx)
df.index.names = ["Level 0", "Level 1"]
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "Level 0 Level 1" in line
reset_option("display.expand_frame_repr")
def test_wide_repr_multiindex_cols(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = get_option("display.max_columns")
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
mcols = MultiIndex.from_arrays(tm.rands_array(3, size=(2, max_cols - 1)))
df = DataFrame(
tm.rands_array(25, (10, max_cols - 1)), index=midx, columns=mcols
)
df.index.names = ["Level 0", "Level 1"]
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150, "display.max_columns", 20):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_unicode(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = 20
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_wide_long_columns(self):
with option_context("mode.sim_interactive", True):
df = DataFrame({"a": ["a" * 30, "b" * 30], "b": ["c" * 70, "d" * 80]})
result = repr(df)
assert "ccccc" in result
assert "ddddd" in result
def test_long_series(self):
n = 1000
s = Series(
np.random.randint(-50, 50, n),
index=[f"s{x:04d}" for x in range(n)],
dtype="int64",
)
import re
str_rep = str(s)
nmatches = len(re.findall("dtype", str_rep))
assert nmatches == 1
def test_index_with_nan(self):
# GH 2850
df = DataFrame(
{
"id1": {0: "1a3", 1: "9h4"},
"id2": {0: np.nan, 1: "d67"},
"id3": {0: "78d", 1: "79d"},
"value": {0: 123, 1: 64},
}
)
# multi-index
y = df.set_index(["id1", "id2", "id3"])
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"1a3 NaN 78d 123\n9h4 d67 79d 64"
)
assert result == expected
# index
y = df.set_index("id2")
result = y.to_string()
expected = (
" id1 id3 value\nid2 \n"
"NaN 1a3 78d 123\nd67 9h4 79d 64"
)
assert result == expected
# with append (this failed in 0.12)
y = df.set_index(["id1", "id2"]).set_index("id3", append=True)
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"1a3 NaN 78d 123\n9h4 d67 79d 64"
)
assert result == expected
# all-nan in mi
df2 = df.copy()
df2.loc[:, "id2"] = np.nan
y = df2.set_index("id2")
result = y.to_string()
expected = (
" id1 id3 value\nid2 \n"
"NaN 1a3 78d 123\nNaN 9h4 79d 64"
)
assert result == expected
# partial nan in mi
df2 = df.copy()
df2.loc[:, "id2"] = np.nan
y = df2.set_index(["id2", "id3"])
result = y.to_string()
expected = (
" id1 value\nid2 id3 \n"
"NaN 78d 1a3 123\n 79d 9h4 64"
)
assert result == expected
df = DataFrame(
{
"id1": {0: np.nan, 1: "9h4"},
"id2": {0: np.nan, 1: "d67"},
"id3": {0: np.nan, 1: "79d"},
"value": {0: 123, 1: 64},
}
)
y = df.set_index(["id1", "id2", "id3"])
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"NaN NaN NaN 123\n9h4 d67 79d 64"
)
assert result == expected
def test_to_string(self):
# big mixed
biggie = DataFrame(
{"A": np.random.randn(200), "B": tm.makeStringIndex(200)},
index=np.arange(200),
)
biggie.loc[:20, "A"] = np.nan
biggie.loc[:20, "B"] = np.nan
s = biggie.to_string()
buf = StringIO()
retval = biggie.to_string(buf=buf)
assert retval is None
assert buf.getvalue() == s
assert isinstance(s, str)
# print in right order
result = biggie.to_string(
columns=["B", "A"], col_space=17, float_format="%.5f".__mod__
)
lines = result.split("\n")
header = lines[0].strip().split()
joined = "\n".join([re.sub(r"\s+", " ", x).strip() for x in lines[1:]])
recons = read_csv(StringIO(joined), names=header, header=None, sep=" ")
tm.assert_series_equal(recons["B"], biggie["B"])
assert recons["A"].count() == biggie["A"].count()
assert (np.abs(recons["A"].dropna() - biggie["A"].dropna()) < 0.1).all()
# expected = ['B', 'A']
# assert header == expected
result = biggie.to_string(columns=["A"], col_space=17)
header = result.split("\n")[0].strip().split()
expected = ["A"]
assert header == expected
biggie.to_string(columns=["B", "A"], formatters={"A": lambda x: f"{x:.1f}"})
biggie.to_string(columns=["B", "A"], float_format=str)
biggie.to_string(columns=["B", "A"], col_space=12, float_format=str)
frame = DataFrame(index=np.arange(200))
frame.to_string()
def test_to_string_no_header(self):
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(header=False)
expected = "0 1 4\n1 2 5\n2 3 6"
assert df_s == expected
def test_to_string_specified_header(self):
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(header=["X", "Y"])
expected = " X Y\n0 1 4\n1 2 5\n2 3 6"
assert df_s == expected
msg = "Writing 2 cols but got 1 aliases"
with pytest.raises(ValueError, match=msg):
df.to_string(header=["X"])
def test_to_string_no_index(self):
# GH 16839, GH 13032
df = DataFrame({"x": [11, 22], "y": [33, -44], "z": ["AAA", " "]})
df_s = df.to_string(index=False)
# Leading space is expected for positive numbers.
expected = " x y z\n11 33 AAA\n22 -44 "
assert df_s == expected
df_s = df[["y", "x", "z"]].to_string(index=False)
expected = " y x z\n 33 11 AAA\n-44 22 "
assert df_s == expected
def test_to_string_line_width_no_index(self):
# GH 13998, GH 22505
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 1 \n 2 \n 3 \n\n y \n 4 \n 5 \n 6 "
assert df_s == expected
df = DataFrame({"x": [11, 22, 33], "y": [4, 5, 6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n11 \n22 \n33 \n\n y \n 4 \n 5 \n 6 "
assert df_s == expected
df = DataFrame({"x": [11, 22, -33], "y": [4, 5, -6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 11 \n 22 \n-33 \n\n y \n 4 \n 5 \n-6 "
assert df_s == expected
def test_to_string_float_formatting(self):
tm.reset_display_options()
fmt.set_option(
"display.precision",
5,
"display.column_space",
12,
"display.notebook_repr_html",
False,
)
df = DataFrame(
{"x": [0, 0.25, 3456.000, 12e45, 1.64e6, 1.7e8, 1.253456, np.pi, -1e6]}
)
df_s = df.to_string()
if _three_digit_exp():
expected = (
" x\n0 0.00000e+000\n1 2.50000e-001\n"
"2 3.45600e+003\n3 1.20000e+046\n4 1.64000e+006\n"
"5 1.70000e+008\n6 1.25346e+000\n7 3.14159e+000\n"
"8 -1.00000e+006"
)
else:
expected = (
" x\n0 0.00000e+00\n1 2.50000e-01\n"
"2 3.45600e+03\n3 1.20000e+46\n4 1.64000e+06\n"
"5 1.70000e+08\n6 1.25346e+00\n7 3.14159e+00\n"
"8 -1.00000e+06"
)
assert df_s == expected
df = DataFrame({"x": [3234, 0.253]})
df_s = df.to_string()
expected = " x\n0 3234.000\n1 0.253"
assert df_s == expected
tm.reset_display_options()
assert get_option("display.precision") == 6
df = DataFrame({"x": [1e9, 0.2512]})
df_s = df.to_string()
if _three_digit_exp():
expected = " x\n0 1.000000e+009\n1 2.512000e-001"
else:
expected = " x\n0 1.000000e+09\n1 2.512000e-01"
assert df_s == expected
def test_to_string_float_format_no_fixed_width(self):
# GH 21625
df = DataFrame({"x": [0.19999]})
expected = " x\n0 0.200"
assert df.to_string(float_format="%.3f") == expected
# GH 22270
df = DataFrame({"x": [100.0]})
expected = " x\n0 100"
assert df.to_string(float_format="%.0f") == expected
def test_to_string_small_float_values(self):
df = DataFrame({"a": [1.5, 1e-17, -5.5e-7]})
result = df.to_string()
# sadness per above
if _three_digit_exp():
expected = (
" a\n"
"0 1.500000e+000\n"
"1 1.000000e-017\n"
"2 -5.500000e-007"
)
else:
expected = (
" a\n"
"0 1.500000e+00\n"
"1 1.000000e-17\n"
"2 -5.500000e-07"
)
assert result == expected
# but not all exactly zero
df = df * 0
result = df.to_string()
expected = " 0\n0 0\n1 0\n2 -0"
def test_to_string_float_index(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(np.arange(5), index=index)
result = df.to_string()
expected = " 0\n1.5 0\n2.0 1\n3.0 2\n4.0 3\n5.0 4"
assert result == expected
def test_to_string_complex_float_formatting(self):
# GH #25514, 25745
with option_context("display.precision", 5):
df = DataFrame(
{
"x": [
(0.4467846931321966 + 0.0715185102060818j),
(0.2739442392974528 + 0.23515228785438969j),
(0.26974928742135185 + 0.3250604054898979j),
(-1j),
]
}
)
result = df.to_string()
expected = (
" x\n0 0.44678+0.07152j\n"
"1 0.27394+0.23515j\n"
"2 0.26975+0.32506j\n"
"3 -0.00000-1.00000j"
)
assert result == expected
def test_to_string_ascii_error(self):
data = [
(
"0 ",
" .gitignore ",
" 5 ",
" \xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2",
)
]
df = DataFrame(data)
# it works!
repr(df)
def test_to_string_int_formatting(self):
df = DataFrame({"x": [-15, 20, 25, -35]})
assert issubclass(df["x"].dtype.type, np.integer)
output = df.to_string()
expected = " x\n0 -15\n1 20\n2 25\n3 -35"
assert output == expected
def test_to_string_index_formatter(self):
df = DataFrame([range(5), range(5, 10), range(10, 15)])
rs = df.to_string(formatters={"__index__": lambda x: "abc"[x]})
xp = """\
0 1 2 3 4
a 0 1 2 3 4
b 5 6 7 8 9
c 10 11 12 13 14\
"""
assert rs == xp
def test_to_string_left_justify_cols(self):
tm.reset_display_options()
df = DataFrame({"x": [3234, 0.253]})
df_s = df.to_string(justify="left")
expected = " x \n0 3234.000\n1 0.253"
assert df_s == expected
def test_to_string_format_na(self):
tm.reset_display_options()
df = DataFrame(
{
"A": [np.nan, -1, -2.1234, 3, 4],
"B": [np.nan, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 NaN NaN\n"
"1 -1.0000 foo\n"
"2 -2.1234 foooo\n"
"3 3.0000 fooooo\n"
"4 4.0000 bar"
)
assert result == expected
df = DataFrame(
{
"A": [np.nan, -1.0, -2.0, 3.0, 4.0],
"B": [np.nan, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 NaN NaN\n"
"1 -1.0 foo\n"
"2 -2.0 foooo\n"
"3 3.0 fooooo\n"
"4 4.0 bar"
)
assert result == expected
def test_to_string_format_inf(self):
# Issue #24861
tm.reset_display_options()
df = DataFrame(
{
"A": [-np.inf, np.inf, -1, -2.1234, 3, 4],
"B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 -inf -inf\n"
"1 inf inf\n"
"2 -1.0000 foo\n"
"3 -2.1234 foooo\n"
"4 3.0000 fooooo\n"
"5 4.0000 bar"
)
assert result == expected
df = DataFrame(
{
"A": [-np.inf, np.inf, -1.0, -2.0, 3.0, 4.0],
"B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 -inf -inf\n"
"1 inf inf\n"
"2 -1.0 foo\n"
"3 -2.0 foooo\n"
"4 3.0 fooooo\n"
"5 4.0 bar"
)
assert result == expected
def test_to_string_decimal(self):
# Issue #23614
df = DataFrame({"A": [6.0, 3.1, 2.2]})
expected = " A\n0 6,0\n1 3,1\n2 2,2"
assert df.to_string(decimal=",") == expected
def test_to_string_line_width(self):
df = DataFrame(123, index=range(10, 15), columns=range(30))
s = df.to_string(line_width=80)
assert max(len(line) for line in s.split("\n")) == 80
def test_show_dimensions(self):
df = DataFrame(123, index=range(10, 15), columns=range(30))
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
True,
):
assert "5 rows" in str(df)
assert "5 rows" in df._repr_html_()
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
False,
):
assert "5 rows" not in str(df)
assert "5 rows" not in df._repr_html_()
with option_context(
"display.max_rows",
2,
"display.max_columns",
2,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
"truncate",
):
assert "5 rows" in str(df)
assert "5 rows" in df._repr_html_()
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
"truncate",
):
assert "5 rows" not in str(df)
assert "5 rows" not in df._repr_html_()
def test_repr_html(self, float_frame):
df = float_frame
df._repr_html_()
fmt.set_option("display.max_rows", 1, "display.max_columns", 1)
df._repr_html_()
fmt.set_option("display.notebook_repr_html", False)
df._repr_html_()
tm.reset_display_options()
df = DataFrame([[1, 2], [3, 4]])
fmt.set_option("display.show_dimensions", True)
assert "2 rows" in df._repr_html_()
fmt.set_option("display.show_dimensions", False)
assert "2 rows" not in df._repr_html_()
tm.reset_display_options()
def test_repr_html_mathjax(self):
df = DataFrame([[1, 2], [3, 4]])
assert "tex2jax_ignore" not in df._repr_html_()
with option_context("display.html.use_mathjax", False):
assert "tex2jax_ignore" in df._repr_html_()
def test_repr_html_wide(self):
max_cols = 20
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." not in df._repr_html_()
wide_df = DataFrame(tm.rands_array(25, size=(10, max_cols + 1)))
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in wide_df._repr_html_()
def test_repr_html_wide_multiindex_cols(self):
max_cols = 20
mcols = MultiIndex.from_product(
[np.arange(max_cols // 2), ["foo", "bar"]], names=["first", "second"]
)
df = DataFrame(tm.rands_array(25, size=(10, len(mcols))), columns=mcols)
reg_repr = df._repr_html_()
assert "..." not in reg_repr
mcols = MultiIndex.from_product(
(np.arange(1 + (max_cols // 2)), ["foo", "bar"]), names=["first", "second"]
)
df = DataFrame(tm.rands_array(25, size=(10, len(mcols))), columns=mcols)
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in df._repr_html_()
def test_repr_html_long(self):
with option_context("display.max_rows", 60):
max_rows = get_option("display.max_rows")
h = max_rows - 1
df = DataFrame({"A": np.arange(1, 1 + h), "B": np.arange(41, 41 + h)})
reg_repr = df._repr_html_()
assert ".." not in reg_repr
assert str(41 + max_rows // 2) in reg_repr
h = max_rows + 1
df = DataFrame({"A": np.arange(1, 1 + h), "B": np.arange(41, 41 + h)})
long_repr = df._repr_html_()
assert ".." in long_repr
assert str(41 + max_rows // 2) not in long_repr
assert f"{h} rows " in long_repr
assert "2 columns" in long_repr
def test_repr_html_float(self):
with option_context("display.max_rows", 60):
max_rows = get_option("display.max_rows")
h = max_rows - 1
df = DataFrame(
{
"idx": np.linspace(-10, 10, h),
"A": np.arange(1, 1 + h),
"B": np.arange(41, 41 + h),
}
).set_index("idx")
reg_repr = df._repr_html_()
assert ".." not in reg_repr
assert f"<td>{40 + h}</td>" in reg_repr
h = max_rows + 1
df = DataFrame(
{
"idx": np.linspace(-10, 10, h),
"A": np.arange(1, 1 + h),
"B": np.arange(41, 41 + h),
}
).set_index("idx")
long_repr = df._repr_html_()
assert ".." in long_repr
assert "<td>31</td>" not in long_repr
assert f"{h} rows " in long_repr
assert "2 columns" in long_repr
def test_repr_html_long_multiindex(self):
max_rows = 60
max_L1 = max_rows // 2
tuples = list(itertools.product(np.arange(max_L1), ["foo", "bar"]))
idx = MultiIndex.from_tuples(tuples, names=["first", "second"])
df = DataFrame(np.random.randn(max_L1 * 2, 2), index=idx, columns=["A", "B"])
with option_context("display.max_rows", 60, "display.max_columns", 20):
reg_repr = df._repr_html_()
assert "..." not in reg_repr
tuples = list(itertools.product(np.arange(max_L1 + 1), ["foo", "bar"]))
idx = MultiIndex.from_tuples(tuples, names=["first", "second"])
df = DataFrame(
np.random.randn((max_L1 + 1) * 2, 2), index=idx, columns=["A", "B"]
)
long_repr = df._repr_html_()
assert "..." in long_repr
def test_repr_html_long_and_wide(self):
max_cols = 20
max_rows = 60
h, w = max_rows - 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." not in df._repr_html_()
h, w = max_rows + 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in df._repr_html_()
def test_info_repr(self):
# GH#21746 For tests inside a terminal (i.e. not CI) we need to detect
# the terminal size to ensure that we try to print something "too big"
term_width, term_height = get_terminal_size()
max_rows = 60
max_cols = 20 + (max(term_width, 80) - 80) // 4
# Long
h, w = max_rows + 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert has_vertically_truncated_repr(df)
with option_context("display.large_repr", "info"):
assert has_info_repr(df)
# Wide
h, w = max_rows - 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert has_horizontally_truncated_repr(df)
with option_context(
"display.large_repr", "info", "display.max_columns", max_cols
):
assert has_info_repr(df)
def test_info_repr_max_cols(self):
# GH #6939
df = DataFrame(np.random.randn(10, 5))
with option_context(
"display.large_repr",
"info",
"display.max_columns",
1,
"display.max_info_columns",
4,
):
assert has_non_verbose_info_repr(df)
with option_context(
"display.large_repr",
"info",
"display.max_columns",
1,
"display.max_info_columns",
5,
):
assert not has_non_verbose_info_repr(df)
# test verbose overrides
# fmt.set_option('display.max_info_columns', 4) # exceeded
def test_info_repr_html(self):
max_rows = 60
max_cols = 20
# Long
h, w = max_rows + 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert r"<class" not in df._repr_html_()
with option_context("display.large_repr", "info"):
assert r"<class" in df._repr_html_()
# Wide
h, w = max_rows - 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert "<class" not in df._repr_html_()
with option_context(
"display.large_repr", "info", "display.max_columns", max_cols
):
assert "<class" in df._repr_html_()
def test_fake_qtconsole_repr_html(self, float_frame):
df = float_frame
def get_ipython():
return {"config": {"KernelApp": {"parent_appname": "ipython-qtconsole"}}}
repstr = df._repr_html_()
assert repstr is not None
fmt.set_option("display.max_rows", 5, "display.max_columns", 2)
repstr = df._repr_html_()
assert "class" in repstr # info fallback
tm.reset_display_options()
def test_pprint_pathological_object(self):
"""
If the test fails, it at least won't hang.
"""
class A:
def __getitem__(self, key):
return 3 # obviously simplified
df = DataFrame([A()])
repr(df) # just don't die
def test_float_trim_zeros(self):
vals = [
2.08430917305e10,
3.52205017305e10,
2.30674817305e10,
2.03954217305e10,
5.59897817305e10,
]
skip = True
for line in repr(DataFrame({"A": vals})).split("\n")[:-2]:
if line.startswith("dtype:"):
continue
if _three_digit_exp():
assert ("+010" in line) or skip
else:
assert ("+10" in line) or skip
skip = False
@pytest.mark.parametrize(
"data, expected",
[
(["3.50"], "0 3.50\ndtype: object"),
([1.20, "1.00"], "0 1.2\n1 1.00\ndtype: object"),
([np.nan], "0 NaN\ndtype: float64"),
([None], "0 None\ndtype: object"),
(["3.50", np.nan], "0 3.50\n1 NaN\ndtype: object"),
([3.50, np.nan], "0 3.5\n1 NaN\ndtype: float64"),
([3.50, np.nan, "3.50"], "0 3.5\n1 NaN\n2 3.50\ndtype: object"),
([3.50, None, "3.50"], "0 3.5\n1 None\n2 3.50\ndtype: object"),
],
)
def test_repr_str_float_truncation(self, data, expected):
# GH#38708
series = Series(data)
result = repr(series)
assert result == expected
@pytest.mark.parametrize(
"float_format,expected",
[
("{:,.0f}".format, "0 1,000\n1 test\ndtype: object"),
("{:.4f}".format, "0 1000.0000\n1 test\ndtype: object"),
],
)
def test_repr_float_format_in_object_col(self, float_format, expected):
# GH#40024
df = Series([1000.0, "test"])
with option_context("display.float_format", float_format):
result = repr(df)
assert result == expected
def test_dict_entries(self):
df = DataFrame({"A": [{"a": 1, "b": 2}]})
val = df.to_string()
assert "'a': 1" in val
assert "'b': 2" in val
def test_categorical_columns(self):
# GH35439
data = [[4, 2], [3, 2], [4, 3]]
cols = ["aaaaaaaaa", "b"]
df = DataFrame(data, columns=cols)
df_cat_cols = DataFrame(data, columns=pd.CategoricalIndex(cols))
assert df.to_string() == df_cat_cols.to_string()
def test_period(self):
# GH 12615
df = DataFrame(
{
"A": pd.period_range("2013-01", periods=4, freq="M"),
"B": [
pd.Period("2011-01", freq="M"),
pd.Period("2011-02-01", freq="D"),
pd.Period("2011-03-01 09:00", freq="H"),
pd.Period("2011-04", freq="M"),
],
"C": list("abcd"),
}
)
exp = (
" A B C\n"
"0 2013-01 2011-01 a\n"
"1 2013-02 2011-02-01 b\n"
"2 2013-03 2011-03-01 09:00 c\n"
"3 2013-04 2011-04 d"
)
assert str(df) == exp
@pytest.mark.parametrize(
"length, max_rows, min_rows, expected",
[
(10, 10, 10, 10),
(10, 10, None, 10),
(10, 8, None, 8),
(20, 30, 10, 30), # max_rows > len(frame), hence max_rows
(50, 30, 10, 10), # max_rows < len(frame), hence min_rows
(100, 60, 10, 10), # same
(60, 60, 10, 60), # edge case
(61, 60, 10, 10), # edge case
],
)
def test_max_rows_fitted(self, length, min_rows, max_rows, expected):
"""Check that display logic is correct.
GH #37359
See description here:
https://pandas.pydata.org/docs/dev/user_guide/options.html#frequently-used-options
"""
formatter = fmt.DataFrameFormatter(
DataFrame(np.random.rand(length, 3)),
max_rows=max_rows,
min_rows=min_rows,
)
result = formatter.max_rows_fitted
assert result == expected
def gen_series_formatting():
s1 = Series(["a"] * 100)
s2 = Series(["ab"] * 100)
s3 = Series(["a", "ab", "abc", "abcd", "abcde", "abcdef"])
s4 = s3[::-1]
test_sers = {"onel": s1, "twol": s2, "asc": s3, "desc": s4}
return test_sers
class TestSeriesFormatting:
def setup_method(self, method):
self.ts = tm.makeTimeSeries()
def test_repr_unicode(self):
s = Series(["\u03c3"] * 10)
repr(s)
a = Series(["\u05d0"] * 1000)
a.name = "title1"
repr(a)
def test_to_string(self):
buf = StringIO()
s = self.ts.to_string()
retval = self.ts.to_string(buf=buf)
assert retval is None
assert buf.getvalue().strip() == s
# pass float_format
format = "%.4f".__mod__
result = self.ts.to_string(float_format=format)
result = [x.split()[1] for x in result.split("\n")[:-1]]
expected = [format(x) for x in self.ts]
assert result == expected
# empty string
result = self.ts[:0].to_string()
assert result == "Series([], Freq: B)"
result = self.ts[:0].to_string(length=0)
assert result == "Series([], Freq: B)"
# name and length
cp = self.ts.copy()
cp.name = "foo"
result = cp.to_string(length=True, name=True, dtype=True)
last_line = result.split("\n")[-1].strip()
assert last_line == (f"Freq: B, Name: foo, Length: {len(cp)}, dtype: float64")
def test_freq_name_separation(self):
s = Series(
np.random.randn(10), index=date_range("1/1/2000", periods=10), name=0
)
result = repr(s)
assert "Freq: D, Name: 0" in result
def test_to_string_mixed(self):
s = Series(["foo", np.nan, -1.23, 4.56])
result = s.to_string()
expected = "0 foo\n" + "1 NaN\n" + "2 -1.23\n" + "3 4.56"
assert result == expected
# but don't count NAs as floats
s = Series(["foo", np.nan, "bar", "baz"])
result = s.to_string()
expected = "0 foo\n" + "1 NaN\n" + "2 bar\n" + "3 baz"
assert result == expected
s = Series(["foo", 5, "bar", "baz"])
result = s.to_string()
expected = "0 foo\n" + "1 5\n" + "2 bar\n" + "3 baz"
assert result == expected
def test_to_string_float_na_spacing(self):
s = Series([0.0, 1.5678, 2.0, -3.0, 4.0])
s[::2] = np.nan
result = s.to_string()
expected = (
"0 NaN\n"
+ "1 1.5678\n"
+ "2 NaN\n"
+ "3 -3.0000\n"
+ "4 NaN"
)
assert result == expected
def test_to_string_without_index(self):
# GH 11729 Test index=False option
s = Series([1, 2, 3, 4])
result = s.to_string(index=False)
expected = "1\n" + "2\n" + "3\n" + "4"
assert result == expected
def test_unicode_name_in_footer(self):
s = Series([1, 2], name="\u05e2\u05d1\u05e8\u05d9\u05ea")
sf = fmt.SeriesFormatter(s, name="\u05e2\u05d1\u05e8\u05d9\u05ea")
sf._get_footer() # should not raise exception
def test_east_asian_unicode_series(self):
# not aligned properly because of east asian width
# unicode index
s = Series(["a", "bb", "CCC", "D"], index=["あ", "いい", "ううう", "ええええ"])
expected = "あ a\nいい bb\nううう CCC\nええええ D\ndtype: object"
assert repr(s) == expected
# unicode values
s = Series(["あ", "いい", "ううう", "ええええ"], index=["a", "bb", "c", "ddd"])
expected = "a あ\nbb いい\nc ううう\nddd ええええ\ndtype: object"
assert repr(s) == expected
# both
s = Series(["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"])
expected = (
"ああ あ\nいいいい いい\nう ううう\nえええ ええええ\ndtype: object"
)
assert repr(s) == expected
# unicode footer
s = Series(
["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"], name="おおおおおおお"
)
expected = (
"ああ あ\nいいいい いい\nう ううう\n"
"えええ ええええ\nName: おおおおおおお, dtype: object"
)
assert repr(s) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
s = Series([1, 22, 3333, 44444], index=idx)
expected = (
"あ いい 1\n"
"う え 22\n"
"おおお かかかか 3333\n"
"き くく 44444\ndtype: int64"
)
assert repr(s) == expected
# object dtype, shorter than unicode repr
s = Series([1, 22, 3333, 44444], index=[1, "AB", np.nan, "あああ"])
expected = (
"1 1\nAB 22\nNaN 3333\nあああ 44444\ndtype: int64"
)
assert repr(s) == expected
# object dtype, longer than unicode repr
s = Series(
[1, 22, 3333, 44444], index=[1, "AB", Timestamp("2011-01-01"), "あああ"]
)
expected = (
"1 1\n"
"AB 22\n"
"2011-01-01 00:00:00 3333\n"
"あああ 44444\ndtype: int64"
)
assert repr(s) == expected
# truncate
with option_context("display.max_rows", 3):
s = Series(["あ", "いい", "ううう", "ええええ"], name="おおおおおおお")
expected = (
"0 あ\n ... \n"
"3 ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
s.index = ["ああ", "いいいい", "う", "えええ"]
expected = (
"ああ あ\n ... \n"
"えええ ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
# Enable Unicode option -----------------------------------------
with option_context("display.unicode.east_asian_width", True):
# unicode index
s = Series(["a", "bb", "CCC", "D"], index=["あ", "いい", "ううう", "ええええ"])
expected = (
"あ a\nいい bb\nううう CCC\n"
"ええええ D\ndtype: object"
)
assert repr(s) == expected
# unicode values
s = Series(["あ", "いい", "ううう", "ええええ"], index=["a", "bb", "c", "ddd"])
expected = (
"a あ\nbb いい\nc ううう\n"
"ddd ええええ\ndtype: object"
)
assert repr(s) == expected
# both
s = Series(["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"])
expected = (
"ああ あ\n"
"いいいい いい\n"
"う ううう\n"
"えええ ええええ\ndtype: object"
)
assert repr(s) == expected
# unicode footer
s = Series(
["あ", "いい", "ううう", "ええええ"],
index=["ああ", "いいいい", "う", "えええ"],
name="おおおおおおお",
)
expected = (
"ああ あ\n"
"いいいい いい\n"
"う ううう\n"
"えええ ええええ\n"
"Name: おおおおおおお, dtype: object"
)
assert repr(s) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
s = Series([1, 22, 3333, 44444], index=idx)
expected = (
"あ いい 1\n"
"う え 22\n"
"おおお かかかか 3333\n"
"き くく 44444\n"
"dtype: int64"
)
assert repr(s) == expected
# object dtype, shorter than unicode repr
s = Series([1, 22, 3333, 44444], index=[1, "AB", np.nan, "あああ"])
expected = (
"1 1\nAB 22\nNaN 3333\n"
"あああ 44444\ndtype: int64"
)
assert repr(s) == expected
# object dtype, longer than unicode repr
s = Series(
[1, 22, 3333, 44444],
index=[1, "AB", Timestamp("2011-01-01"), "あああ"],
)
expected = (
"1 1\n"
"AB 22\n"
"2011-01-01 00:00:00 3333\n"
"あああ 44444\ndtype: int64"
)
assert repr(s) == expected
# truncate
with option_context("display.max_rows", 3):
s = Series(["あ", "いい", "ううう", "ええええ"], name="おおおおおおお")
expected = (
"0 あ\n ... \n"
"3 ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
s.index = ["ああ", "いいいい", "う", "えええ"]
expected = (
"ああ あ\n"
" ... \n"
"えええ ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
# ambiguous unicode
s = Series(
["¡¡", "い¡¡", "ううう", "ええええ"], index=["ああ", "¡¡¡¡いい", "¡¡", "えええ"]
)
expected = (
"ああ ¡¡\n"
"¡¡¡¡いい い¡¡\n"
"¡¡ ううう\n"
"えええ ええええ\ndtype: object"
)
assert repr(s) == expected
def test_float_trim_zeros(self):
vals = [
2.08430917305e10,
3.52205017305e10,
2.30674817305e10,
2.03954217305e10,
5.59897817305e10,
]
for line in repr(Series(vals)).split("\n"):
if line.startswith("dtype:"):
continue
if _three_digit_exp():
assert "+010" in line
else:
assert "+10" in line
def test_datetimeindex(self):
index = date_range("20130102", periods=6)
s = Series(1, index=index)
result = s.to_string()
assert "2013-01-02" in result
# nat in index
s2 = Series(2, index=[Timestamp("20130111"), NaT])
s = s2.append(s)
result = s.to_string()
assert "NaT" in result
# nat in summary
result = str(s2.index)
assert "NaT" in result
@pytest.mark.parametrize(
"start_date",
[
"2017-01-01 23:59:59.999999999",
"2017-01-01 23:59:59.99999999",
"2017-01-01 23:59:59.9999999",
"2017-01-01 23:59:59.999999",
"2017-01-01 23:59:59.99999",
"2017-01-01 23:59:59.9999",
],
)
def test_datetimeindex_highprecision(self, start_date):
# GH19030
# Check that high-precision time values for the end of day are
# included in repr for DatetimeIndex
s1 = Series(date_range(start=start_date, freq="D", periods=5))
result = str(s1)
assert start_date in result
dti = date_range(start=start_date, freq="D", periods=5)
s2 = Series(3, index=dti)
result = str(s2.index)
assert start_date in result
def test_timedelta64(self):
from datetime import (
datetime,
timedelta,
)
Series(np.array([1100, 20], dtype="timedelta64[ns]")).to_string()
s = Series(date_range("2012-1-1", periods=3, freq="D"))
# GH2146
# adding NaTs
y = s - s.shift(1)
result = y.to_string()
assert "1 days" in result
assert "00:00:00" not in result
assert "NaT" in result
# with frac seconds
o = Series([datetime(2012, 1, 1, microsecond=150)] * 3)
y = s - o
result = y.to_string()
assert "-1 days +23:59:59.999850" in result
# rounding?
o = Series([datetime(2012, 1, 1, 1)] * 3)
y = s - o
result = y.to_string()
assert "-1 days +23:00:00" in result
assert "1 days 23:00:00" in result
o = Series([datetime(2012, 1, 1, 1, 1)] * 3)
y = s - o
result = y.to_string()
assert "-1 days +22:59:00" in result
assert "1 days 22:59:00" in result
o = Series([datetime(2012, 1, 1, 1, 1, microsecond=150)] * 3)
y = s - o
result = y.to_string()
assert "-1 days +22:58:59.999850" in result
assert "0 days 22:58:59.999850" in result
# neg time
td = timedelta(minutes=5, seconds=3)
s2 = Series(date_range("2012-1-1", periods=3, freq="D")) + td
y = s - s2
result = y.to_string()
assert "-1 days +23:54:57" in result
td = timedelta(microseconds=550)
s2 = Series(date_range("2012-1-1", periods=3, freq="D")) + td
y = s - td
result = y.to_string()
assert "2012-01-01 23:59:59.999450" in result
# no boxing of the actual elements
td = Series(pd.timedelta_range("1 days", periods=3))
result = td.to_string()
assert result == "0 1 days\n1 2 days\n2 3 days"
def test_mixed_datetime64(self):
df = DataFrame({"A": [1, 2], "B": ["2012-01-01", "2012-01-02"]})
df["B"] = pd.to_datetime(df.B)
result = repr(df.loc[0])
assert "2012-01-01" in result
def test_period(self):
# GH 12615
index = pd.period_range("2013-01", periods=6, freq="M")
s = Series(np.arange(6, dtype="int64"), index=index)
exp = (
"2013-01 0\n"
"2013-02 1\n"
"2013-03 2\n"
"2013-04 3\n"
"2013-05 4\n"
"2013-06 5\n"
"Freq: M, dtype: int64"
)
assert str(s) == exp
s = Series(index)
exp = (
"0 2013-01\n"
"1 2013-02\n"
"2 2013-03\n"
"3 2013-04\n"
"4 2013-05\n"
"5 2013-06\n"
"dtype: period[M]"
)
assert str(s) == exp
# periods with mixed freq
s = Series(
[
pd.Period("2011-01", freq="M"),
pd.Period("2011-02-01", freq="D"),
pd.Period("2011-03-01 09:00", freq="H"),
]
)
exp = (
"0 2011-01\n1 2011-02-01\n"
"2 2011-03-01 09:00\ndtype: object"
)
assert str(s) == exp
def test_max_multi_index_display(self):
# GH 7101
# doc example (indexing.rst)
# multi-index
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
tuples = list(zip(*arrays))
index = MultiIndex.from_tuples(tuples, names=["first", "second"])
s = Series(np.random.randn(8), index=index)
with option_context("display.max_rows", 10):
assert len(str(s).split("\n")) == 10
with option_context("display.max_rows", 3):
assert len(str(s).split("\n")) == 5
with option_context("display.max_rows", 2):
assert len(str(s).split("\n")) == 5
with option_context("display.max_rows", 1):
assert len(str(s).split("\n")) == 4
with option_context("display.max_rows", 0):
assert len(str(s).split("\n")) == 10
# index
s = Series(np.random.randn(8), None)
with option_context("display.max_rows", 10):
assert len(str(s).split("\n")) == 9
with option_context("display.max_rows", 3):
assert len(str(s).split("\n")) == 4
with option_context("display.max_rows", 2):
assert len(str(s).split("\n")) == 4
with option_context("display.max_rows", 1):
assert len(str(s).split("\n")) == 3
with option_context("display.max_rows", 0):
assert len(str(s).split("\n")) == 9
# Make sure #8532 is fixed
def test_consistent_format(self):
s = Series([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9999, 1, 1] * 10)
with option_context("display.max_rows", 10, "display.show_dimensions", False):
res = repr(s)
exp = (
"0 1.0000\n1 1.0000\n2 1.0000\n3 "
"1.0000\n4 1.0000\n ... \n125 "
"1.0000\n126 1.0000\n127 0.9999\n128 "
"1.0000\n129 1.0000\ndtype: float64"
)
assert res == exp
def chck_ncols(self, s):
with option_context("display.max_rows", 10):
res = repr(s)
lines = res.split("\n")
lines = [
line for line in repr(s).split("\n") if not re.match(r"[^\.]*\.+", line)
][:-1]
ncolsizes = len({len(line.strip()) for line in lines})
assert ncolsizes == 1
def test_format_explicit(self):
test_sers = gen_series_formatting()
with option_context("display.max_rows", 4, "display.show_dimensions", False):
res = repr(test_sers["onel"])
exp = "0 a\n1 a\n ..\n98 a\n99 a\ndtype: object"
assert exp == res
res = repr(test_sers["twol"])
exp = "0 ab\n1 ab\n ..\n98 ab\n99 ab\ndtype: object"
assert exp == res
res = repr(test_sers["asc"])
exp = (
"0 a\n1 ab\n ... \n4 abcde\n5 "
"abcdef\ndtype: object"
)
assert exp == res
res = repr(test_sers["desc"])
exp = (
"5 abcdef\n4 abcde\n ... \n1 ab\n0 "
"a\ndtype: object"
)
assert exp == res
def test_ncols(self):
test_sers = gen_series_formatting()
for s in test_sers.values():
self.chck_ncols(s)
def test_max_rows_eq_one(self):
s = Series(range(10), dtype="int64")
with option_context("display.max_rows", 1):
strrepr = repr(s).split("\n")
exp1 = ["0", "0"]
res1 = strrepr[0].split()
assert exp1 == res1
exp2 = [".."]
res2 = strrepr[1].split()
assert exp2 == res2
def test_truncate_ndots(self):
def getndots(s):
return len(re.match(r"[^\.]*(\.*)", s).groups()[0])
s = Series([0, 2, 3, 6])
with option_context("display.max_rows", 2):
strrepr = repr(s).replace("\n", "")
assert getndots(strrepr) == 2
s = Series([0, 100, 200, 400])
with option_context("display.max_rows", 2):
strrepr = repr(s).replace("\n", "")
assert getndots(strrepr) == 3
def test_show_dimensions(self):
# gh-7117
s = Series(range(5))
assert "Length" not in repr(s)
with option_context("display.max_rows", 4):
assert "Length" in repr(s)
with option_context("display.show_dimensions", True):
assert "Length" in repr(s)
with option_context("display.max_rows", 4, "display.show_dimensions", False):
assert "Length" not in repr(s)
def test_repr_min_rows(self):
s = Series(range(20))
# default setting no truncation even if above min_rows
assert ".." not in repr(s)
s = Series(range(61))
# default of max_rows 60 triggers truncation if above
assert ".." in repr(s)
with option_context("display.max_rows", 10, "display.min_rows", 4):
# truncated after first two rows
assert ".." in repr(s)
assert "2 " not in repr(s)
with option_context("display.max_rows", 12, "display.min_rows", None):
# when set to None, follow value of max_rows
assert "5 5" in repr(s)
with option_context("display.max_rows", 10, "display.min_rows", 12):
# when set value higher as max_rows, use the minimum
assert "5 5" not in repr(s)
with option_context("display.max_rows", None, "display.min_rows", 12):
# max_rows of None -> never truncate
assert ".." not in repr(s)
def test_to_string_name(self):
s = Series(range(100), dtype="int64")
s.name = "myser"
res = s.to_string(max_rows=2, name=True)
exp = "0 0\n ..\n99 99\nName: myser"
assert res == exp
res = s.to_string(max_rows=2, name=False)
exp = "0 0\n ..\n99 99"
assert res == exp
def test_to_string_dtype(self):
s = Series(range(100), dtype="int64")
res = s.to_string(max_rows=2, dtype=True)
exp = "0 0\n ..\n99 99\ndtype: int64"
assert res == exp
res = s.to_string(max_rows=2, dtype=False)
exp = "0 0\n ..\n99 99"
assert res == exp
def test_to_string_length(self):
s = Series(range(100), dtype="int64")
res = s.to_string(max_rows=2, length=True)
exp = "0 0\n ..\n99 99\nLength: 100"
assert res == exp
def test_to_string_na_rep(self):
s = Series(index=range(100), dtype=np.float64)
res = s.to_string(na_rep="foo", max_rows=2)
exp = "0 foo\n ..\n99 foo"
assert res == exp
def test_to_string_float_format(self):
s = Series(range(10), dtype="float64")
res = s.to_string(float_format=lambda x: f"{x:2.1f}", max_rows=2)
exp = "0 0.0\n ..\n9 9.0"
assert res == exp
def test_to_string_header(self):
s = Series(range(10), dtype="int64")
s.index.name = "foo"
res = s.to_string(header=True, max_rows=2)
exp = "foo\n0 0\n ..\n9 9"
assert res == exp
res = s.to_string(header=False, max_rows=2)
exp = "0 0\n ..\n9 9"
assert res == exp
def test_to_string_multindex_header(self):
# GH 16718
df = DataFrame({"a": [0], "b": [1], "c": [2], "d": [3]}).set_index(["a", "b"])
res = df.to_string(header=["r1", "r2"])
exp = " r1 r2\na b \n0 1 2 3"
assert res == exp
def test_to_string_empty_col(self):
# GH 13653
s = Series(["", "Hello", "World", "", "", "Mooooo", "", ""])
res = s.to_string(index=False)
exp = " \n Hello\n World\n \n \nMooooo\n \n "
assert re.match(exp, res)
class TestGenericArrayFormatter:
def test_1d_array(self):
# GenericArrayFormatter is used on types for which there isn't a dedicated
# formatter. np.bool_ is one of those types.
obj = fmt.GenericArrayFormatter(np.array([True, False]))
res = obj.get_result()
assert len(res) == 2
# Results should be right-justified.
assert res[0] == " True"
assert res[1] == " False"
def test_2d_array(self):
obj = fmt.GenericArrayFormatter(np.array([[True, False], [False, True]]))
res = obj.get_result()
assert len(res) == 2
assert res[0] == " [True, False]"
assert res[1] == " [False, True]"
def test_3d_array(self):
obj = fmt.GenericArrayFormatter(
np.array([[[True, True], [False, False]], [[False, True], [True, False]]])
)
res = obj.get_result()
assert len(res) == 2
assert res[0] == " [[True, True], [False, False]]"
assert res[1] == " [[False, True], [True, False]]"
def test_2d_extension_type(self):
# GH 33770
# Define a stub extension type with just enough code to run Series.__repr__()
class DtypeStub(pd.api.extensions.ExtensionDtype):
@property
def type(self):
return np.ndarray
@property
def name(self):
return "DtypeStub"
class ExtTypeStub(pd.api.extensions.ExtensionArray):
def __len__(self):
return 2
def __getitem__(self, ix):
return [ix == 1, ix == 0]
@property
def dtype(self):
return DtypeStub()
series = Series(ExtTypeStub())
res = repr(series) # This line crashed before #33770 was fixed.
expected = "0 [False True]\n" + "1 [ True False]\n" + "dtype: DtypeStub"
assert res == expected
def _three_digit_exp():
return f"{1.7e8:.4g}" == "1.7e+008"
class TestFloatArrayFormatter:
def test_misc(self):
obj = fmt.FloatArrayFormatter(np.array([], dtype=np.float64))
result = obj.get_result()
assert len(result) == 0
def test_format(self):
obj = fmt.FloatArrayFormatter(np.array([12, 0], dtype=np.float64))
result = obj.get_result()
assert result[0] == " 12.0"
assert result[1] == " 0.0"
def test_output_display_precision_trailing_zeroes(self):
# Issue #20359: trimming zeros while there is no decimal point
# Happens when display precision is set to zero
with option_context("display.precision", 0):
s = Series([840.0, 4200.0])
expected_output = "0 840\n1 4200\ndtype: float64"
assert str(s) == expected_output
def test_output_significant_digits(self):
# Issue #9764
# In case default display precision changes:
with option_context("display.precision", 6):
# DataFrame example from issue #9764
d = DataFrame(
{
"col1": [
9.999e-8,
1e-7,
1.0001e-7,
2e-7,
4.999e-7,
5e-7,
5.0001e-7,
6e-7,
9.999e-7,
1e-6,
1.0001e-6,
2e-6,
4.999e-6,
5e-6,
5.0001e-6,
6e-6,
]
}
)
expected_output = {
(0, 6): " col1\n"
"0 9.999000e-08\n"
"1 1.000000e-07\n"
"2 1.000100e-07\n"
"3 2.000000e-07\n"
"4 4.999000e-07\n"
"5 5.000000e-07",
(1, 6): " col1\n"
"1 1.000000e-07\n"
"2 1.000100e-07\n"
"3 2.000000e-07\n"
"4 4.999000e-07\n"
"5 5.000000e-07",
(1, 8): " col1\n"
"1 1.000000e-07\n"
"2 1.000100e-07\n"
"3 2.000000e-07\n"
"4 4.999000e-07\n"
"5 5.000000e-07\n"
"6 5.000100e-07\n"
"7 6.000000e-07",
(8, 16): " col1\n"
"8 9.999000e-07\n"
"9 1.000000e-06\n"
"10 1.000100e-06\n"
"11 2.000000e-06\n"
"12 4.999000e-06\n"
"13 5.000000e-06\n"
"14 5.000100e-06\n"
"15 6.000000e-06",
(9, 16): " col1\n"
"9 0.000001\n"
"10 0.000001\n"
"11 0.000002\n"
"12 0.000005\n"
"13 0.000005\n"
"14 0.000005\n"
"15 0.000006",
}
for (start, stop), v in expected_output.items():
assert str(d[start:stop]) == v
def test_too_long(self):
# GH 10451
with option_context("display.precision", 4):
# need both a number > 1e6 and something that normally formats to
# having length > display.precision + 6
df = DataFrame({"x": [12345.6789]})
assert str(df) == " x\n0 12345.6789"
df = DataFrame({"x": [2e6]})
assert str(df) == " x\n0 2000000.0"
df = | DataFrame({"x": [12345.6789, 2e6]}) | pandas.DataFrame |
import torch
import pandas as pd
from torch.utils.data import Dataset
import h5pickle as h5py
import io
import os
import numpy as np
from sklearn import preprocessing
import yaml
class ParticleJetDataset(Dataset):
"""CMS Particle Jet dataset."""
def __init__(self, dataPath, yamlPath=None, normalize=True, filenames=None):
data_path = dataPath
if yamlPath is None:
if os.path.isfile(data_path):
yamlConfig = self.parse_config(os.path.dirname(os.path.realpath(data_path)) + "\jet_config.yaml") # Default to yaml being in same dir as file
else:
yamlConfig = self.parse_config(os.path.realpath(data_path) + "\jet_config.yaml") # Default to yaml in dataset dir
else:
yamlConfig = self.parse_config(yamlPath)
# List of features to use
features = yamlConfig['Inputs']
self.features_list = features
# List of labels to use
labels = yamlConfig['Labels']
self.labels_list = labels
columns_arr = np.array([])
features_labels_df = pd.DataFrame()
loaded_files = 0
#Check/Handle directory of files vs 1 file
if filenames is not None: #Using dataset of .h5 files split into k folds by utilities/k_fold_split.py
if os.path.isdir(data_path):
print("Directory of data files found!")
first = True
for file in os.listdir(data_path):
if (file.endswith(".h5") or file.endswith(".h5df")) and (file in filenames):
try:
print("Loading " + str(file))
self.h5File = h5py.File(os.path.join(data_path,file), 'r', libver='latest', swmr=True)
if first:
columns_arr = np.array(self.h5File['jetFeatureNames'][:]).astype(str) # slicing h5 data because otherwise it's a reference to the actual file?
first = False
this_file = pd.DataFrame(self.h5File["jets"][:], columns=columns_arr)
features_labels_df = pd.concat([features_labels_df,this_file],axis=0) #concat along axis 0 if doesn't work?
self.h5File.close()
loaded_files +=1
except Exception as e:
print("Error! Failed to load jet file " + file)
print(e)
elif os.path.isfile(data_path):
print("Single data file found!")
self.h5File = h5py.File(dataPath, 'r', libver='latest', swmr=True)
# Convert to dataframe
columns_arr = np.array(self.h5File['jetFeatureNames'][:]).astype(str) # slicing h5 data because otherwise it's a reference to the actual file?
features_labels_df = pd.DataFrame(self.h5File["jets"][:], columns=columns_arr)
else:
print("Error! path specified is a special file (socket, FIFO, device file), or isn't valid")
print("Given Path: {}".format(data_path))
else: #Using a directory full of .h5 files
if os.path.isdir(data_path):
print("Directory of data files found!")
first = True
for file in os.listdir(data_path):
if file.endswith(".h5") or file.endswith(".h5df"):
try:
print("Loading " + str(file))
self.h5File = h5py.File(os.path.join(data_path,file), 'r', libver='latest', swmr=True)
if first:
columns_arr = np.array(self.h5File['jetFeatureNames'][:]).astype(str) # slicing h5 data because otherwise it's a reference to the actual file?
first = False
this_file = | pd.DataFrame(self.h5File["jets"][:], columns=columns_arr) | pandas.DataFrame |
"""
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
from io import StringIO
import numpy as np
import pytest
from pandas import DataFrame, Series
import pandas._testing as tm
def test_int_conversion(all_parsers):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,kwargs,expected",
[
(
"A,B\nTrue,1\nFalse,2\nTrue,3",
{},
DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]),
),
(
"A,B\nYES,1\nno,2\nyes,3\nNo,3\nYes,3",
{"true_values": ["yes", "Yes", "YES"], "false_values": ["no", "NO", "No"]},
DataFrame(
[[True, 1], [False, 2], [True, 3], [False, 3], [True, 3]],
columns=["A", "B"],
),
),
(
"A,B\nTRUE,1\nFALSE,2\nTRUE,3",
{},
DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]),
),
(
"A,B\nfoo,bar\nbar,foo",
{"true_values": ["foo"], "false_values": ["bar"]},
DataFrame([[True, False], [False, True]], columns=["A", "B"]),
),
],
)
def test_parse_bool(all_parsers, data, kwargs, expected):
parser = all_parsers
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(all_parsers):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame(
{
"Numbers": [
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194,
]
}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("sep", [" ", r"\s+"])
def test_integer_overflow_bug(all_parsers, sep):
# see gh-2601
data = "65248E10 11\n55555E55 22\n"
parser = all_parsers
result = parser.read_csv(StringIO(data), header=None, sep=sep)
expected = DataFrame([[6.5248e14, 11], [5.5555e59, 22]])
tm.assert_frame_equal(result, expected)
def test_int64_min_issues(all_parsers):
# see gh-2599
parser = all_parsers
data = "A,B\n0,0\n0,"
result = parser.read_csv(StringIO(data))
expected = DataFrame({"A": [0, 0], "B": [0, np.nan]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("conv", [None, np.int64, np.uint64])
def test_int64_overflow(all_parsers, conv):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
parser = all_parsers
if conv is None:
# 13007854817840016671868 > UINT64_MAX, so this
# will overflow and return object as the dtype.
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[
"00013007854817840016671868",
"00013007854817840016749251",
"00013007854817840016754630",
"00013007854817840016781876",
"00013007854817840017028824",
"00013007854817840017963235",
"00013007854817840018860166",
],
columns=["ID"],
)
tm.assert_frame_equal(result, expected)
else:
# 13007854817840016671868 > UINT64_MAX, so attempts
# to cast to either int64 or uint64 will result in
# an OverflowError being raised.
msg = (
"(Python int too large to convert to C long)|"
"(long too big to convert)|"
"(int too big to convert)"
)
with pytest.raises(OverflowError, match=msg):
parser.read_csv(StringIO(data), converters={"ID": conv})
@pytest.mark.parametrize(
"val", [np.iinfo(np.uint64).max, np.iinfo(np.int64).max, np.iinfo(np.int64).min]
)
def test_int64_uint64_range(all_parsers, val):
# These numbers fall right inside the int64-uint64
# range, so they should be parsed as string.
parser = all_parsers
result = parser.read_csv(StringIO(str(val)), header=None)
expected = DataFrame([val])
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
import pandas as pd
def get_toy_data_seqclassification():
train_data = {
"sentence1": [
'Amrozi accused his brother , whom he called " the witness " , of deliberately distorting his evidence .',
"Yucaipa owned Dominick 's before selling the chain to Safeway in 1998 for $ 2.5 billion .",
"They had published an advertisement on the Internet on June 10 , offering the cargo for sale , he added .",
"Around 0335 GMT , Tab shares were up 19 cents , or 4.4 % , at A $ 4.56 , having earlier set a record high of A $ 4.57 .",
],
"sentence2": [
'Referring to him as only " the witness " , Amrozi accused his brother of deliberately distorting his evidence .',
"Yucaipa bought Dominick 's in 1995 for $ 693 million and sold it to Safeway for $ 1.8 billion in 1998 .",
"On June 10 , the ship 's owners had published an advertisement on the Internet , offering the explosives for sale .",
"Tab shares jumped 20 cents , or 4.6 % , to set a record closing high at A $ 4.57 .",
],
"label": [1, 0, 1, 0],
"idx": [0, 1, 2, 3],
}
train_dataset = pd.DataFrame(train_data)
dev_data = {
"sentence1": [
"The stock rose $ 2.11 , or about 11 percent , to close Friday at $ 21.51 on the New York Stock Exchange .",
"Revenue in the first quarter of the year dropped 15 percent from the same period a year earlier .",
"The Nasdaq had a weekly gain of 17.27 , or 1.2 percent , closing at 1,520.15 on Friday .",
"The DVD-CCA then appealed to the state Supreme Court .",
],
"sentence2": [
"PG & E Corp. shares jumped $ 1.63 or 8 percent to $ 21.03 on the New York Stock Exchange on Friday .",
"With the scandal hanging over Stewart 's company , revenue the first quarter of the year dropped 15 percent from the same period a year earlier .",
"The tech-laced Nasdaq Composite .IXIC rallied 30.46 points , or 2.04 percent , to 1,520.15 .",
"The DVD CCA appealed that decision to the U.S. Supreme Court .",
],
"label": [1, 1, 0, 1],
"idx": [4, 5, 6, 7],
}
dev_dataset = pd.DataFrame(dev_data)
test_data = {
"sentence1": [
"That compared with $ 35.18 million , or 24 cents per share , in the year-ago period .",
"Shares of Genentech , a much larger company with several products on the market , rose more than 2 percent .",
"Legislation making it harder for consumers to erase their debts in bankruptcy court won overwhelming House approval in March .",
"The Nasdaq composite index increased 10.73 , or 0.7 percent , to 1,514.77 .",
],
"sentence2": [
"Earnings were affected by a non-recurring $ 8 million tax benefit in the year-ago period .",
"Shares of Xoma fell 16 percent in early trade , while shares of Genentech , a much larger company with several products on the market , were up 2 percent .",
"Legislation making it harder for consumers to erase their debts in bankruptcy court won speedy , House approval in March and was endorsed by the White House .",
"The Nasdaq Composite index , full of technology stocks , was lately up around 18 points .",
],
"label": [0, 0, 0, 0],
"idx": [8, 10, 11, 12],
}
test_dataset = pd.DataFrame(test_data)
custom_sent_keys = ["sentence1", "sentence2"]
label_key = "label"
X_train = train_dataset[custom_sent_keys]
y_train = train_dataset[label_key]
X_val = dev_dataset[custom_sent_keys]
y_val = dev_dataset[label_key]
X_test = test_dataset[custom_sent_keys]
return X_train, y_train, X_val, y_val, X_test
def get_toy_data_multiclassclassification():
train_data = {
"text": [
"i didnt feel humiliated",
"i can go from feeling so hopeless to so damned hopeful just from being around someone who cares and is awake",
"im grabbing a minute to post i feel greedy wrong",
"i am ever feeling nostalgic about the fireplace i will know that it is still on the property",
"i am feeling grouchy",
"ive been feeling a little burdened lately wasnt sure why that was",
"ive been taking or milligrams or times recommended amount and ive fallen asleep a lot faster but i also feel like so funny",
"i feel as confused about life as a teenager or as jaded as a year old man",
"i have been with petronas for years i feel that petronas has performed well and made a huge profit",
"i feel romantic too",
"i feel like i have to make the suffering i m seeing mean something",
"i do feel that running is a divine experience and that i can expect to have some type of spiritual encounter",
],
"label": [0, 0, 3, 2, 3, 0, 5, 4, 1, 2, 0, 1],
}
train_dataset = pd.DataFrame(train_data)
dev_data = {
"text": [
"i think it s the easiest time of year to feel dissatisfied",
"i feel low energy i m just thirsty",
"i have immense sympathy with the general point but as a possible proto writer trying to find time to write in the corners of life and with no sign of an agent let alone a publishing contract this feels a little precious",
"i do not feel reassured anxiety is on each side",
],
"label": [3, 0, 1, 1],
}
dev_dataset = pd.DataFrame(dev_data)
custom_sent_keys = ["text"]
label_key = "label"
X_train = train_dataset[custom_sent_keys]
y_train = train_dataset[label_key]
X_val = dev_dataset[custom_sent_keys]
y_val = dev_dataset[label_key]
return X_train, y_train, X_val, y_val
def get_toy_data_multiplechoiceclassification():
train_data = {
"video-id": [
"anetv_fruimvo90vA",
"anetv_fruimvo90vA",
"anetv_fruimvo90vA",
"anetv_MldEr60j33M",
"lsmdc0049_Hannah_and_her_sisters-69438",
],
"fold-ind": ["10030", "10030", "10030", "5488", "17405"],
"startphrase": [
"A woman is seen running down a long track and jumping into a pit. The camera",
"A woman is seen running down a long track and jumping into a pit. The camera",
"A woman is seen running down a long track and jumping into a pit. The camera",
"A man in a white shirt bends over and picks up a large weight. He",
"Someone furiously shakes someone away. He",
],
"sent1": [
"A woman is seen running down a long track and jumping into a pit.",
"A woman is seen running down a long track and jumping into a pit.",
"A woman is seen running down a long track and jumping into a pit.",
"A man in a white shirt bends over and picks up a large weight.",
"Someone furiously shakes someone away.",
],
"sent2": ["The camera", "The camera", "The camera", "He", "He"],
"gold-source": ["gen", "gen", "gold", "gen", "gold"],
"ending0": [
"captures her as well as lifting weights down in place.",
"follows her spinning her body around and ends by walking down a lane.",
"watches her as she walks away and sticks her tongue out to another person.",
"lifts the weights over his head.",
"runs to a woman standing waiting.",
],
"ending1": [
"pans up to show another woman running down the track.",
"pans around the two.",
"captures her as well as lifting weights down in place.",
"also lifts it onto his chest before hanging it back out again.",
"tackles him into the passenger seat.",
],
"ending2": [
"follows her movements as the group members follow her instructions.",
"captures her as well as lifting weights down in place.",
"follows her spinning her body around and ends by walking down a lane.",
"spins around and lifts a barbell onto the floor.",
"pounds his fist against a cupboard.",
],
"ending3": [
"follows her spinning her body around and ends by walking down a lane.",
"follows her movements as the group members follow her instructions.",
"pans around the two.",
"bends down and lifts the weight over his head.",
"offers someone the cup on his elbow and strides out.",
],
"label": [1, 3, 0, 0, 2],
}
dev_data = {
"video-id": [
"lsmdc3001_21_JUMP_STREET-422",
"lsmdc0001_American_Beauty-45991",
"lsmdc0001_American_Beauty-45991",
"lsmdc0001_American_Beauty-45991",
],
"fold-ind": ["11783", "10977", "10970", "10968"],
"startphrase": [
"Firing wildly he shoots holes through the tanker. He",
"He puts his spatula down. The Mercedes",
"He stands and looks around, his eyes finally landing on: "
"The digicam and a stack of cassettes on a shelf. Someone",
"He starts going through someone's bureau. He opens the drawer "
"in which we know someone keeps his marijuana, but he",
],
"sent1": [
"Firing wildly he shoots holes through the tanker.",
"He puts his spatula down.",
"He stands and looks around, his eyes finally landing on: "
"The digicam and a stack of cassettes on a shelf.",
"He starts going through someone's bureau.",
],
"sent2": [
"He",
"<NAME>",
"Someone",
"He opens the drawer in which we know someone keeps his marijuana, but he",
],
"gold-source": ["gold", "gold", "gold", "gold"],
"ending0": [
"overtakes the rig and falls off his bike.",
"fly open and drinks.",
"looks at someone's papers.",
"stops one down and rubs a piece of the gift out.",
],
"ending1": [
"squeezes relentlessly on the peanut jelly as well.",
"walks off followed driveway again.",
"feels around it and falls in the seat once more.",
"cuts the mangled parts.",
],
"ending2": [
"scrambles behind himself and comes in other directions.",
"slots them into a separate green.",
"sprints back from the wreck and drops onto his back.",
"hides it under his hat to watch.",
],
"ending3": [
"sweeps a explodes and knocks someone off.",
"pulls around to the drive - thru window.",
"sits at the kitchen table, staring off into space.",
"does n't discover its false bottom.",
],
"label": [0, 3, 3, 3],
}
test_data = {
"video-id": [
"lsmdc0001_American_Beauty-45991",
"lsmdc0001_American_Beauty-45991",
"lsmdc0001_American_Beauty-45991",
"lsmdc0001_American_Beauty-45991",
],
"fold-ind": ["10980", "10976", "10978", "10969"],
"startphrase": [
"Someone leans out of the drive - thru window, "
"grinning at her, holding bags filled with fast food. The Counter Girl",
"Someone looks up suddenly when he hears. He",
"Someone drives; someone sits beside her. They",
"He opens the drawer in which we know someone "
"keeps his marijuana, but he does n't discover"
" its false bottom. He stands and looks around, his eyes",
],
"sent1": [
"Someone leans out of the drive - thru "
"window, grinning at her, holding bags filled with fast food.",
"Someone looks up suddenly when he hears.",
"Someone drives; someone sits beside her.",
"He opens the drawer in which we know"
" someone keeps his marijuana, but he does n't discover its false bottom.",
],
"sent2": [
"The Counter Girl",
"He",
"They",
"He stands and looks around, his eyes",
],
"gold-source": ["gold", "gold", "gold", "gold"],
"ending0": [
"stands next to him, staring blankly.",
"puts his spatula down.",
"rise someone's feet up.",
"moving to the side, the houses rapidly stained.",
],
"ending1": [
"with auditorium, filmed, singers the club.",
"bumps into a revolver and drops surreptitiously into his weapon.",
"lift her and they are alarmed.",
"focused as the sight of someone making his way down a trail.",
],
"ending2": [
"attempts to block her ransacked.",
"talks using the phone and walks away for a few seconds.",
"are too involved with each other to "
"notice someone watching them from the drive - thru window.",
"finally landing on: the digicam and a stack of cassettes on a shelf.",
],
"ending3": [
"is eating solid and stinky.",
"bundles the flaxen powder beneath the car.",
"sit at a table with a beer from a table.",
"deep and continuing, its bleed - length sideburns pressing on him.",
],
"label": [0, 0, 2, 2],
}
train_dataset = pd.DataFrame(train_data)
dev_dataset = pd.DataFrame(dev_data)
test_dataset = pd.DataFrame(test_data)
custom_sent_keys = [
"sent1",
"sent2",
"ending0",
"ending1",
"ending2",
"ending3",
"gold-source",
"video-id",
"startphrase",
"fold-ind",
]
label_key = "label"
X_train = train_dataset[custom_sent_keys]
y_train = train_dataset[label_key]
X_val = dev_dataset[custom_sent_keys]
y_val = dev_dataset[label_key]
X_test = test_dataset[custom_sent_keys]
y_test = test_dataset[label_key]
return X_train, y_train, X_val, y_val, X_test, y_test
def get_toy_data_seqregression():
train_data = {
"sentence1": [
"A plane is taking off.",
"A man is playing a large flute.",
"A man is spreading shreded cheese on a pizza.",
"Three men are playing chess.",
],
"sentence2": [
"An air plane is taking off.",
"A man is playing a flute.",
"A man is spreading shredded cheese on an uncooked pizza.",
"Two men are playing chess.",
],
"label": [5.0, 3.799999952316284, 3.799999952316284, 2.5999999046325684],
"idx": [0, 1, 2, 3],
}
train_dataset = | pd.DataFrame(train_data) | pandas.DataFrame |
import pandas as pd
import numpy as np
import os
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from sklearn.model_selection import train_test_split
from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Dropout, AveragePooling2D
from keras.models import Sequential
from keras.regularizers import l2
from keras.callbacks import EarlyStopping
os.chdir('/home/ubuntu/Thesis-KNMI-FoggyGAN/')
df = | pd.read_pickle('data/raw/train_annotations.pkl') | pandas.read_pickle |
'''
Scrape Robospect output and do some processing of the results
'''
import os
import sys
import glob
import logging
import pandas as pd
import numpy as np
import matplotlib
from astropy.io.fits import getdata
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import matplotlib.pylab as pl
from . import *
class Scraper():
'''
Scrape all the equivalent width info from the Robospect *robolines files
'''
def __init__(self,
subdir=config_red["data_dirs"]["DIR_ROBO_OUTPUT"],
file_scraped_info=config_red["data_dirs"]["DIR_EW_PRODS"]+config_red["file_names"]["SCRAPED_EW_ALL_DATA"],
orig_spec_list = config_red["data_dirs"]["DIR_SRC"] + config_red["file_names"]["LIST_SPEC_PHASE"],
verbose=False):
'''
INPUTS:
subdir:
file_scraped_info:
orig_spec_list: the file containing the original file names of the spectra
'''
# directory containing the *.fits.robolines
# files with the EW info
self.stem = '.' ## ##
# subdirectory containing the *.c.dat files
self.subdir = subdir ## ##
# get list of filenames without the path
## ## note the string being sought here is specific to RW's synthetic spectra; this is a weakness here and needs to be fixed later!
file_list_long = glob.glob(self.subdir+'/'+'*robolines')
file_list_unsorted = [os.path.basename(x) for x in file_list_long]
self.file_list = sorted(file_list_unsorted)
# read in original file names
input_list = pd.read_csv(orig_spec_list)
self.orig_spec_list = input_list["orig_spec_file_name"]
# EW info will get scraped into this
self.write_out_filename = file_scraped_info
# return tables of EW data?
self.verbose = verbose
def __call__(self):
def line_order_check(line_centers):
'''
Sanity check: are the lines listed in order?
N.b. This checks the wavelengths using the given line list
values (and not the fitted centers)
'''
logging.info('Verifying line centers...')
logging.info(line_centers[0])
glitch_count = int(0) # boolean for bookeeping
if ((line_centers[0] < 3933.660-10) or
(line_centers[0] > 3933.660+10)): # CaIIK
logging.warning('CaIIK line center does not match!')
glitch_count = int(1) # boolean for bookeeping
if ((line_centers[1] < 3970.075-10) or
(line_centers[1] > 3970.075+10)): # H-epsilon (close to CaIIH)
logging.warning('H-epsilon center (close to CaIIH) line does not match!')
glitch_count = int(1) # boolean for bookeeping
if ((line_centers[2] < 4101.7100-10) or
(line_centers[2] > 4101.7100+10)): # H-delta
logging.warning('H-delta line center does not match!')
glitch_count = int(1) # boolean for bookeeping
if ((line_centers[3] < 4340.472-10) or
(line_centers[3] > 4340.472+10)): # H-gamma
logging.warning('H-gamma line center does not match!')
glitch_count = int(1) # boolean for bookeeping
if ((line_centers[4] < 4861.290-10) or
(line_centers[4] > 4861.290+10)): # H-beta
logging.warning('H-beta line center does not match!')
glitch_count = 1 # boolean for bookeeping
if (glitch_count == int(0)):
logging.info('CaIIK, H-eps, H-del, H-gam, h_beta line centers are consistent')
return
df_master = pd.DataFrame() # initialize
# loop over all filenames of realizations of empirical spectra, extract line data
#import ipdb; ipdb.set_trace()
for t in range(0, len(self.file_list)):
# read in Robospect output
logging.info("--------------------")
logging.info("Reading in Robospect output from directory")
logging.info(self.subdir)
'''
The following parses lines from Robospect *robolines output files,
which look like the following, as of the v0.76 tag of Robospect:
## Units
##AA [ AA AA None] [ AA AA None] [ AA AA None] mAA mAA None None None None
## Headers
##wave_0 [ gaussianMu gaussianSigma gaussianAmp] [ uncertaintyMu uncertaintySigma uncertaintyAmp] [ priorMu priorSigma priorAmp] EQW uncertaintyEQW chiSqr flags blendGroup comment
3933.6600 [ 3933.618556 1.636451 -0.338310] [ 0.043767 0.045441 0.008054] [ 3934.427147 1.754001 0.384793] 1.387738 0.127230 0.004045 0x10020 0 CaII-K
3970.0750 [ 3969.912002 6.497202 -0.626854] [ 0.245555 0.237816 0.023196] [ 3971.262223 4.535872 0.781687] 10.208984 1.331932 0.117392 0x10020 0 H-eps
4101.7100 [ 4101.728498 6.829899 -0.596311] [ 0.335244 0.327236 0.025288] [ 4102.885050 4.878668 0.734648] 10.208852 1.637334 0.220112 0x10020 0 H-del
4340.4720 [ 4340.374387 7.365172 -0.557777] [ 0.395447 0.378434 0.025443] [ 4340.943149 4.961159 0.689719] 10.297539 1.773505 0.300238 0x10020 0 H-gam
4861.2900 [ 4861.316520 7.570797 -0.505060] [ 0.441626 0.426212 0.025690] [ 4861.746895 4.898021 0.635582] 9.584604 1.822847 0.377350 0x10020 0 H-beta
'''
df = pd.read_csv(self.subdir+'/'+self.file_list[t],
skiprows=19,
delim_whitespace=True,
index_col=False,
usecols=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18],
names= ["wavel_stated_center","[1","wavel_found_center","gaussianSigma","gaussianAmp",
"[2","uncertaintyMu","uncertaintySigma","uncertaintyAmp",
"[3","priorMu","priorSigma","priorAmp","EQW","uncertaintyEQW",
"chiSqr","flags","blendGroup","line_name"])
# remove dummy columns
df = df.drop(columns=["[1","[2","[3"])
# remove Robospect delimiter strings from columns and cast contents as floats
logging.info("Parsing " + self.file_list[t])
try:
# this will fail if there are infs in the EWs
df["gaussianAmp"] = df["gaussianAmp"].str.replace("]","")
df["gaussianAmp"] = df["gaussianAmp"].astype(np.float)
df["uncertaintyAmp"] = df["uncertaintyAmp"].str.replace("]","")
df["uncertaintyAmp"] = df["uncertaintyAmp"].astype(np.float)
df["priorAmp"] = df["priorAmp"].str.replace("]","")
df["priorAmp"] = df["priorAmp"].astype(np.float)
except:
# skip this file
logging.error("Parsing error! " + self.file_list[t])
continue
# check lines are in the right order
# if they are not, a warning is printed in the log
line_order_check(df['wavel_found_center'])
# add two cols on the left: the filename, and the name of the line
#s_length = len(df['mean']) # number of lines (should be 5)
# file names
df['robolines_file_name'] = pd.Series(self.file_list[t],
index=df.index)
# names of empirical spectra realizations (multiple ones
# correspond to one empirical spectrum)
# remove .robolines extension
df['realization_spec_file_name'] = pd.Series(self.file_list[t].split(".robolines")[0],
index=df.index)
# names of the absorption lines
df['line_name'] = ['CaIIK', 'Heps', 'Hdel', 'Hgam', 'Hbet']
# print progress
logging.info('Out of '+str(len(self.file_list))+' files, '+str(t+1)+' scraped...')
# if this is the first list, start a master copy from it to concatenate stuff to it
if (t == 0):
df_master = df.copy()
else:
df_master = pd.concat([df_master, df])
del df # clear variable
# write to csv, while resetting the indices
# note THIS TABLE INCLUDES ALL DATA, GOOD AND BAD
#df_master_reset = df_master.reset_index(drop=True).copy()
# this is effectively the same, but gets written out
df_master.reset_index(drop=True).to_csv(self.write_out_filename,index=False)
logging.info("Table of ALL EW info written to " + str(self.write_out_filename))
#if self.verbose:
# return df_master_reset, df_master_reset_drop_bad_spectra
return
def add_synthetic_meta_data(input_list = config_red["data_dirs"]["DIR_SRC"] + config_red["file_names"]["LIST_SPEC_PHASE"],
read_in_filename = config_red["data_dirs"]["DIR_EW_PRODS"]+config_red["file_names"]["RESTACKED_EW_DATA_W_NET_BALMER_ERRORS"],
write_out_filename = config_red["data_dirs"]["DIR_EW_PRODS"]+config_red["file_names"]["RESTACKED_EW_DATA_W_METADATA"]):
'''
For the generation of a calibration, this reads in a file with spectrum file
names and other info like Fe/H, and adds everything to the table with EWs
INPUTS:
input_list: file name of list containing original spectrum names and meta-data
read_in_filename: file name of table containing EW data including Balmer lines and their errors
write_out_filename: file name with everything together to write out
'''
# read in metadata
input_data_arr = pd.read_csv(input_list)
# read in EW data
all_data = pd.read_csv(read_in_filename)
# add rows of meta-data table to EW data table, based on matchings of original spectrum file names
combined_data = all_data.merge(input_data_arr,how="left",on="orig_spec_file_name")
# write out
combined_data.to_csv(write_out_filename,index=False)
logging.info("Table of EW info with meta-data written to " + str(write_out_filename))
return
def quality_check(
read_in_filename = config_red["data_dirs"]["DIR_EW_PRODS"]+config_red["file_names"]["SCRAPED_EW_ALL_DATA"],
write_out_filename = config_red["data_dirs"]["DIR_EW_PRODS"]+config_red["file_names"]["SCRAPED_EW_DATA_GOOD_ONLY"]):
'''
This reads in all the scraped EW data in raw form, removes spectra that have fits
which are bad based on multiple criteria, and writes out another data_table
INPUTS:
read_in_filename: file name of the table with ALL scraped data from Robospect
write_out_filename: file name of the table with spectra with any bad line fits removed
'''
# read in data
all_data = | pd.read_csv(read_in_filename) | pandas.read_csv |
# -*- coding: utf-8 -*-
import numpy as np
import pickle
from constants import *
import time
import json
import sys
import os
import utils
import base64
import generate_tf_record
import pandas as pd
import lightgbm as lgb
from datetime import datetime
import math
from collections import defaultdict
EVAL_NUM = 5
def cal_IDCG(n):
assert(n >= 1)
res = 0
for i in range(1, n + 1):
res += 1 / math.log(i+1)
return res
def cal_DCG(hit, k=EVAL_NUM):
assert(len(hit) == k)
res = 0
for idx, h in enumerate(hit):
res += h / math.log(idx + 2)
return res
IDCG = {}
for i in range(1, EVAL_NUM + 1):
IDCG[i] = cal_IDCG(i)
def create_sample(features,qidpid_vecs,wv):
features_id = [features[0], features[5], features[1],features[6], features[7], features[8], features[9], features[3], features[4]]
qid_pid_vec = qidpid_vecs[(features[0], features[5])]
features_vec = np.concatenate([np.array([wv[i] for i in features[2]]).mean(axis=0), \
np.array(features[10]).reshape(-1,4).mean(axis=0), np.array(features[11]).reshape(-1,2048).mean(axis=0),\
np.array(qid_pid_vec[0]),
qid_pid_vec[1],qid_pid_vec[2],
qid_pid_vec[3],qid_pid_vec[4]
])
return features_id,features_vec
def create_dataframe(example):
columns1 = ["origin_query_id","origin_product_id","query_id","product_id","image_h","image_w","num_boxes","words_len","lastword"]
feas = [i[0] for i in example]
fea_vecs = [i[1] for i in example]
columns2 = ["vec{}".format(i) for i in range(fea_vecs[0].shape[0])]
df1 = pd.DataFrame(np.stack(feas),columns=columns1)
df2 = pd.DataFrame(np.stack(fea_vecs),columns=columns2)
df = | pd.concat([df1,df2],axis=1) | pandas.concat |
import sys
import traceback
import json
from copy import deepcopy
from uuid import uuid1
from datetime import datetime, timedelta
from time import sleep
from threading import Thread
from multiprocessing.dummy import Pool
from typing import Dict
import pandas as pd
from vnpy.event import Event
from vnpy.rpc import RpcClient
from vnpy.trader.gateway import BaseGateway
from vnpy.trader.object import (
TickData,
BarData,
ContractData,
SubscribeRequest,
CancelRequest,
OrderRequest
)
from vnpy.trader.event import (
EVENT_TICK,
EVENT_TRADE,
EVENT_ORDER,
EVENT_POSITION,
EVENT_ACCOUNT,
EVENT_CONTRACT,
EVENT_LOG)
from vnpy.trader.constant import Exchange, Product
from vnpy.amqp.consumer import subscriber
from vnpy.amqp.producer import task_creator
from vnpy.data.tdx.tdx_common import get_stock_type_sz, get_stock_type_sh
STOCK_CONFIG_FILE = 'tdx_stock_config.pkb2'
from pytdx.hq import TdxHq_API
# 通达信股票行情
from vnpy.data.tdx.tdx_common import get_cache_config, get_tdx_market_code
from vnpy.trader.utility import get_stock_exchange
from pytdx.config.hosts import hq_hosts
from pytdx.params import TDXParams
class StockRpcGateway(BaseGateway):
"""
股票交易得RPC接口
交易使用RPC实现,
行情1:
使用RabbitMQ订阅获取
需要启动单独得进程运行stock_tick_publisher
Cta_Stock => 行情订阅 =》StockRpcGateway =》RabbitMQ (task)=》 stock_tick_publisher =》订阅(worker)
stock_tick_publisher => restful接口获取股票行情 =》RabbitMQ(pub) => StockRpcGateway =>on_tick event
行情2:
使用tdx进行bar订阅
"""
default_setting = {
"主动请求地址": "tcp://127.0.0.1:2014",
"推送订阅地址": "tcp://127.0.0.1:4102",
"远程接口名称": "pb01"
}
exchanges = list(Exchange)
def __init__(self, event_engine, gateway_name='StockRPC'):
"""Constructor"""
super().__init__(event_engine, gateway_name)
self.symbol_gateway_map = {}
self.client = RpcClient()
self.client.callback = self.client_callback
self.rabbit_api = None
self.tdx_api = None
self.rabbit_dict = {}
# 远程RPC端,gateway_name
self.remote_gw_name = gateway_name
def connect(self, setting: dict):
""""""
req_address = setting["主动请求地址"]
pub_address = setting["推送订阅地址"]
self.remote_gw_name = setting['远程接口名称']
self.write_log(f'请求地址:{req_address},订阅地址:{pub_address},远程接口:{self.remote_gw_name}')
# 订阅事件
self.client.subscribe_topic("")
# self.client.subscribe_topic(EVENT_TRADE)
# self.client.subscribe_topic(EVENT_ORDER)
# self.client.subscribe_topic(EVENT_POSITION)
# self.client.subscribe_topic(EVENT_ACCOUNT)
# self.client.subscribe_topic(EVENT_CONTRACT)
# self.client.subscribe_topic(EVENT_LOG)
self.client.start(req_address, pub_address)
self.status.update({"con":True})
self.rabbit_dict = setting.get('rabbit', {})
if len(self.rabbit_dict) > 0:
self.write_log(f'激活RabbitMQ行情接口.配置:\n{self.rabbit_dict}')
self.rabbit_api = SubMdApi(gateway=self)
self.rabbit_api.connect(self.rabbit_dict)
else:
self.write_log(f'激活tdx行情订阅接口')
self.tdx_api = TdxMdApi(gateway=self)
self.tdx_api.connect()
self.write_log("服务器连接成功,开始初始化查询")
self.query_all()
def check_status(self):
if self.client:
pass
if self.rabbit_api:
self.rabbit_api.check_status()
return True
def subscribe(self, req: SubscribeRequest):
"""行情订阅"""
if self.tdx_api:
self.tdx_api.subscribe(req)
return
self.write_log(f'创建订阅任务=> rabbitMQ')
host = self.rabbit_dict.get('host', 'localhost')
port = self.rabbit_dict.get('port', 5672)
user = self.rabbit_dict.get('user', 'admin')
password = self.rabbit_dict.get('password', '<PASSWORD>')
exchange = 'x_work_queue'
queue_name = 'subscribe_task_queue'
routing_key = 'stock_subscribe'
task = task_creator(
host=host,
port=port,
user=user,
password=password,
exchange=exchange,
queue_name=queue_name,
routing_key=routing_key)
mission = {}
mission.update({'id': str(uuid1())})
mission.update({'action': "subscribe"})
mission.update({'vt_symbol': req.vt_symbol})
mission.update({'is_stock': True})
msg = json.dumps(mission)
self.write_log(f'[=>{host}:{port}/{exchange}/{queue_name}/{routing_key}] create task :{msg}')
task.pub(msg)
task.close()
# gateway_name = self.symbol_gateway_map.get(req.vt_symbol, "")
# self.client.subscribe(req, gateway_name)
if self.rabbit_api:
self.rabbit_api.registed_symbol_set.add(req.vt_symbol)
def send_order(self, req: OrderRequest):
"""
RPC远程发单
:param req:
:return:
"""
self.write_log(f'使用prc委托:{req.__dict__}')
ref = self.client.send_order(req, self.remote_gw_name)
local_ref = ref.replace(f'{self.remote_gw_name}.', f'{self.gateway_name}.')
self.write_log(f'委托返回:{ref}=> {local_ref}')
return local_ref
def cancel_order(self, req: CancelRequest):
""""""
self.write_log(f'委托撤单:{req.__dict__}')
# gateway_name = self.symbol_gateway_map.get(req.vt_symbol, "")
self.client.cancel_order(req, self.remote_gw_name)
def query_account(self):
""""""
pass
def query_position(self):
""""""
pass
def query_all(self):
""""""
contracts = self.client.get_all_contracts()
for contract in contracts:
self.symbol_gateway_map[contract.vt_symbol] = contract.gateway_name
contract.gateway_name = self.gateway_name
self.on_contract(contract)
self.write_log("合约信息查询成功")
accounts = self.client.get_all_accounts()
for account in accounts:
account.gateway_name = self.gateway_name
self.on_account(account)
self.write_log("资金信息查询成功")
positions = self.client.get_all_positions()
for position in positions:
position.gateway_name = self.gateway_name
# 更换 vt_positionid得gateway前缀
position.vt_positionid = position.vt_positionid.replace(f'{position.gateway_name}.',
f'{self.gateway_name}.')
# 更换 vt_accountid得gateway前缀
position.vt_accountid = position.vt_accountid.replace(f'{position.gateway_name}.', f'{self.gateway_name}.')
self.on_position(position)
self.write_log("持仓信息查询成功")
orders = self.client.get_all_orders()
for order in orders:
# 更换gateway
order.gateway_name = self.gateway_name
# 更换 vt_orderid得gateway前缀
order.vt_orderid = order.vt_orderid.replace(f'{self.remote_gw_name}.', f'{self.gateway_name}.')
# 更换 vt_accountid得gateway前缀
order.vt_accountid = order.vt_accountid.replace(f'{self.remote_gw_name}.', f'{self.gateway_name}.')
self.on_order(order)
self.write_log("委托信息查询成功")
trades = self.client.get_all_trades()
for trade in trades:
trade.gateway_name = self.gateway_name
# 更换 vt_orderid得gateway前缀
trade.vt_orderid = trade.vt_orderid.replace(f'{self.remote_gw_name}.', f'{self.gateway_name}.')
# 更换 vt_orderid得gateway前缀
trade.vt_orderid = trade.vt_orderid.replace(f'{self.remote_gw_name}.', f'{self.gateway_name}.')
# 更换 vt_accountid得gateway前缀
trade.vt_accountid = trade.vt_accountid.replace(f'{self.remote_gw_name}.', f'{self.gateway_name}.')
self.on_trade(trade)
self.write_log("成交信息查询成功")
def close(self):
""""""
self.client.stop()
self.client.join()
def client_callback(self, topic: str, event: Event):
""""""
if event is None:
print("none event", topic, event)
return
if event.type == EVENT_TICK:
return
event = deepcopy(event)
data = event.data
if hasattr(data, "gateway_name"):
data.gateway_name = self.gateway_name
if hasattr(data, 'vt_orderid'):
rpc_vt_orderid = data.vt_orderid.replace(f'{self.remote_gw_name}.', f'{self.gateway_name}.')
self.write_log(f' vt_orderid :{data.vt_orderid} => {rpc_vt_orderid}')
data.vt_orderid = rpc_vt_orderid
if hasattr(data, 'vt_tradeid'):
rpc_vt_tradeid = data.vt_tradeid.replace(f'{self.remote_gw_name}.', f'{self.gateway_name}.')
self.write_log(f' vt_tradeid :{data.vt_tradeid} => {rpc_vt_tradeid}')
data.vt_tradeid = rpc_vt_tradeid
if hasattr(data, 'vt_accountid'):
data.vt_accountid = data.vt_accountid.replace(f'{self.remote_gw_name}.', f'{self.gateway_name}.')
if hasattr(data, 'vt_positionid'):
data.vt_positionid = data.vt_positionid.replace(f'{self.remote_gw_name}.', f'{self.gateway_name}.')
if event.type in [EVENT_ORDER, EVENT_TRADE]:
self.write_log(f'{self.remote_gw_name} => {self.gateway_name} event:{data.__dict__}')
self.event_engine.put(event)
# 代码 <=> 中文名称
symbol_name_map: Dict[str, str] = {}
# 代码 <=> 交易所
symbol_exchange_map: Dict[str, Exchange] = {}
class TdxMdApi(object):
"""通达信行情和基础数据"""
def __init__(self, gateway: StockRpcGateway):
""""""
super().__init__()
self.gateway: StockRpcGateway = gateway
self.gateway_name: str = gateway.gateway_name
self.connect_status: bool = False
self.login_status: bool = False
self.req_interval = 0.5 # 操作请求间隔500毫秒
self.req_id = 0 # 操作请求编号
self.connection_status = False # 连接状态
self.symbol_exchange_dict = {} # tdx合约与vn交易所的字典
self.symbol_market_dict = {} # tdx合约与tdx市场的字典
self.symbol_vn_dict = {} # tdx合约与vtSymbol的对应
self.symbol_bar_dict = {} # tdx合约与最后一个bar得字典
self.registed_symbol_set = set()
self.config = get_cache_config(STOCK_CONFIG_FILE)
self.symbol_dict = self.config.get('symbol_dict', {})
# 最佳IP地址
self.best_ip = self.config.get('best_ip', {})
# 排除的异常地址
self.exclude_ips = self.config.get('exclude_ips', [])
# 选择时间
self.select_time = self.config.get('select_time', datetime.now() - timedelta(days=7))
# 缓存时间
self.cache_time = self.config.get('cache_time', datetime.now() - timedelta(days=7))
self.commission_dict = {}
self.contract_dict = {}
# self.queue = Queue() # 请求队列
self.pool = None # 线程池
# self.req_thread = None # 定时器线程
# copy.copy(hq_hosts)
self.ip_list = [{'ip': "192.168.127.12", 'port': 7709},
{'ip': "192.168.127.12", 'port': 7709},
{'ip': "172.16.58.3", 'port': 80},
{'ip': "172.16.58.3", 'port': 7709},
{'ip': "192.168.3.11", 'port': 7709},
{'ip': "172.16.31.10", 'port': 80},
{'ip': "172.16.31.10", 'port': 7709},
{'ip': "172.16.17.32", 'port': 7709},
{'ip': "192.168.3.11", 'port': 7709},
{'ip': "192.168.127.12", 'port': 7709},
{'ip': "172.16.58.3", 'port': 7709},
{'ip': "172.16.31.10", 'port': 7709},
{'ip': "172.16.31.10", 'port': 7709},
# {'ip': "192.168.3.11", 'port': 7709},
{'ip': "172.16.58.3", 'port': 7709},
{'ip': "172.16.58.3", 'port': 7709},
{'ip': "172.16.58.3", 'port': 7709},
# {'ip': '172.16.31.10', 'port': 7709},
# {'ip': '172.16.17.32', 'port': 7709}
]
self.best_ip = {'ip': None, 'port': None}
self.api_dict = {} # API 的连接会话对象字典
self.last_bar_dt = {} # 记录该合约的最后一个bar(结束)时间
self.last_api_bar_dict = {} # 记录会话最后一个bar的时间
self.security_count = 50000
# 股票code name列表
self.stock_codelist = None
def ping(self, ip, port=7709):
"""
ping行情服务器
:param ip:
:param port:
:param type_:
:return:
"""
apix = TdxHq_API()
__time1 = datetime.now()
try:
with apix.connect(ip, port):
if apix.get_security_count(TDXParams.MARKET_SZ) > 9000: # 0:深市 股票数量 = 9260
_timestamp = datetime.now() - __time1
self.gateway.write_log('服务器{}:{},耗时:{}'.format(ip, port, _timestamp))
return _timestamp
else:
self.gateway.write_log(u'该服务器IP {}无响应'.format(ip))
return timedelta(9, 9, 0)
except:
self.gateway.write_error(u'tdx ping服务器,异常的响应{}'.format(ip))
return timedelta(9, 9, 0)
def select_best_ip(self, ip_list, proxy_ip="", proxy_port=0, exclude_ips=[]):
"""
选取最快的IP
:param ip_list:
:param proxy_ip: 代理
:param proxy_port: 代理端口
:param exclude_ips: 排除清单
:return:
"""
from pytdx.util.best_ip import ping
data = [ping(ip=x['ip'], port=x['port'], type_='stock', proxy_ip=proxy_ip, proxy_port=proxy_port) for x in
ip_list if x['ip'] not in exclude_ips]
results = []
for i in range(len(data)):
# 删除ping不通的数据
if data[i] < timedelta(0, 9, 0):
results.append((data[i], ip_list[i]))
else:
if ip_list[i].get('ip') not in self.exclude_ips:
self.exclude_ips.append(ip_list[i].get('ip'))
# 按照ping值从小大大排序
results = [x[1] for x in sorted(results, key=lambda x: x[0])]
return results[0]
def connect(self, n=3):
"""
连接通达讯行情服务器
:param n:
:return:
"""
if self.connection_status:
for api in self.api_dict:
if api is not None or getattr(api, "client", None) is not None:
self.gateway.write_log(u'当前已经连接,不需要重新连接')
return
self.gateway.write_log(u'开始通达信行情服务器')
if len(self.symbol_dict) == 0:
self.gateway.write_error(f'本地没有股票信息的缓存配置文件')
else:
self.cov_contracts()
# 选取最佳服务器
if self.best_ip['ip'] is None and self.best_ip['port'] is None:
self.best_ip = self.select_best_ip(ip_list=self.ip_list,
proxy_ip="",
proxy_port=0,
exclude_ips=self.exclude_ips)
# 创建n个api连接对象实例
for i in range(n):
try:
api = TdxHq_API(heartbeat=True, auto_retry=True, raise_exception=True)
api.connect(self.best_ip['ip'], self.best_ip['port'])
# 尝试获取市场合约统计
c = api.get_security_count(TDXParams.MARKET_SZ)
if c is None or c < 10:
err_msg = u'该服务器IP {}/{}无响应'.format(self.best_ip['ip'], self.best_ip['port'])
self.gateway.write_error(err_msg)
else:
self.gateway.write_log(u'创建第{}个tdx连接'.format(i + 1))
self.api_dict[i] = api
self.last_bar_dt[i] = datetime.now()
self.connection_status = True
self.security_count = c
# if len(symbol_name_map) == 0:
# self.get_stock_list()
except Exception as ex:
self.gateway.write_error(u'连接服务器tdx[{}]异常:{},{}'.format(i, str(ex), traceback.format_exc()))
self.gateway.status.update({"tdx_status":False, "tdx_error":str(ex)})
return
# 创建连接池,每个连接都调用run方法
self.pool = Pool(n)
self.pool.map_async(self.run, range(n))
# 设置上层的连接状态
self.gateway.status.update({"tdx_con":True, 'tdx_con_time':datetime.now().strftime('%H:%M:%S')})
def reconnect(self, i):
"""
重连
:param i:
:return:
"""
try:
self.best_ip = self.select_best_ip(ip_list=self.ip_list, exclude_ips=self.exclude_ips)
api = TdxHq_API(heartbeat=True, auto_retry=True)
api.connect(self.best_ip['ip'], self.best_ip['port'])
# 尝试获取市场合约统计
c = api.get_security_count(TDXParams.MARKET_SZ)
if c is None or c < 10:
err_msg = u'该服务器IP {}/{}无响应'.format(self.best_ip['ip'], self.best_ip['port'])
self.gateway.write_error(err_msg)
else:
self.gateway.write_log(u'重新创建第{}个tdx连接'.format(i + 1))
self.api_dict[i] = api
sleep(1)
except Exception as ex:
self.gateway.write_error(u'重新连接服务器tdx[{}]异常:{},{}'.format(i, str(ex), traceback.format_exc()))
self.gateway.status.update({"tdx_status":False, "tdx_error":str(ex)})
return
def close(self):
"""退出API"""
self.connection_status = False
# 设置上层的连接状态
self.gateway.status.update({'tdx_con':False})
if self.pool is not None:
self.pool.close()
self.pool.join()
def subscribe(self, req):
"""订阅合约"""
# 这里的设计是,如果尚未登录就调用了订阅方法
# 则先保存订阅请求,登录完成后会自动订阅
vn_symbol = str(req.symbol)
if '.' in vn_symbol:
vn_symbol = vn_symbol.split('.')[0]
self.gateway.write_log(u'通达信行情订阅 {}'.format(str(vn_symbol)))
tdx_symbol = vn_symbol # [0:-2] + 'L9'
tdx_symbol = tdx_symbol.upper()
self.gateway.write_log(u'{}=>{}'.format(vn_symbol, tdx_symbol))
self.symbol_vn_dict[tdx_symbol] = vn_symbol
if tdx_symbol not in self.registed_symbol_set:
self.registed_symbol_set.add(tdx_symbol)
# 查询股票信息
self.qry_instrument(vn_symbol)
self.check_status()
def check_status(self):
"""
tdx行情接口状态监控
:return:
"""
self.gateway.write_log(u'检查tdx接口状态')
try:
# 一共订阅的数量
self.gateway.status.update({"tdx_symbols_count":len(self.registed_symbol_set)})
dt_now = datetime.now()
if len(self.registed_symbol_set) > 0 and '0935' < dt_now.strftime("%H%M") < '1500':
# 若还没有启动连接,就启动连接
over_time = [((dt_now - dt).total_seconds() > 60) for dt in self.last_api_bar_dict.values()]
if not self.connection_status or len(self.api_dict) == 0 or any(over_time):
self.gateway.write_log(u'tdx还没有启动连接,就启动连接')
self.close()
self.pool = None
self.api_dict = {}
pool_cout = getattr(self.gateway, 'tdx_pool_count', 3)
self.connect(pool_cout)
api_bar_times = [f'{k}:{v.hour}:{v.minute}' for k,v in self.last_api_bar_dict.items()]
if len(api_bar_times) > 0:
self.gateway.status.update({"tdx_api_dt":api_bar_times,'tdx_status':True})
#self.gateway.write_log(u'tdx接口状态正常')
except Exception as ex:
msg = f'检查tdx接口时异常:{str(ex)}' + traceback.format_exc()
self.gateway.write_error(msg)
def qry_instrument(self, symbol):
"""
查询/更新股票信息
:return:
"""
if not self.connection_status:
return
api = self.api_dict.get(0)
if api is None:
self.gateway.write_log(u'取不到api连接,更新合约信息失败')
return
# TODO: 取得股票的中文名
market_code = get_tdx_market_code(symbol)
api.to_df(api.get_finance_info(market_code, symbol))
# 如果有预定的订阅合约,提前订阅
# if len(all_contacts) > 0:
# cur_folder = os.path.dirname(__file__)
# export_file = os.path.join(cur_folder,'contracts.csv')
# if not os.path.exists(export_file):
# df = pd.DataFrame(all_contacts)
# df.to_csv(export_file)
def cov_contracts(self):
"""转换本地缓存=》合约信息推送"""
for symbol_marketid, info in self.symbol_dict.items():
symbol, market_id = symbol_marketid.split('_')
exchange = info.get('exchange', '')
if len(exchange) == 0:
continue
vn_exchange_str = get_stock_exchange(symbol)
# 排除通达信的指数代码
if exchange != vn_exchange_str:
continue
exchange = Exchange(exchange)
if info['stock_type'] == 'stock_cn':
product = Product.EQUITY
elif info['stock_type'] in ['bond_cn', 'cb_cn']:
product = Product.BOND
elif info['stock_type'] == 'index_cn':
product = Product.INDEX
elif info['stock_type'] == 'etf_cn':
product = Product.ETF
else:
product = Product.EQUITY
volume_tick = info['volunit']
if symbol.startswith('688'):
volume_tick = 200
contract = ContractData(
gateway_name=self.gateway_name,
symbol=symbol,
exchange=exchange,
name=info['name'],
product=product,
pricetick=round(0.1 ** info['decimal_point'], info['decimal_point']),
size=1,
min_volume=volume_tick,
margin_rate=1
)
if product != Product.INDEX:
# 缓存 合约 =》 中文名
symbol_name_map.update({contract.symbol: contract.name})
# 缓存代码和交易所的印射关系
symbol_exchange_map[contract.symbol] = contract.exchange
self.contract_dict.update({contract.symbol: contract})
self.contract_dict.update({contract.vt_symbol: contract})
# 推送
self.gateway.on_contract(contract)
def get_stock_list(self):
"""股票所有的code&name列表"""
api = self.api_dict.get(0)
if api is None:
self.gateway.write_log(u'取不到api连接,更新合约信息失败')
return None
self.gateway.write_log(f'查询所有的股票信息')
data = pd.concat(
[pd.concat([api.to_df(api.get_security_list(j, i * 1000)).assign(sse='sz' if j == 0 else 'sh').set_index(
['code', 'sse'], drop=False) for i in range(int(api.get_security_count(j) / 1000) + 1)], axis=0) for j
in range(2)], axis=0)
sz = data.query('sse=="sz"')
sh = data.query('sse=="sh"')
sz = sz.assign(sec=sz.code.apply(get_stock_type_sz))
sh = sh.assign(sec=sh.code.apply(get_stock_type_sh))
temp_df = | pd.concat([sz, sh]) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# Reads in photometry from different sources, normalizes them, and puts them
# onto a BJD time scale
# Created 2021 Dec. 28 by E.S.
import numpy as np
import pandas as pd
from astropy.time import Time
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
file_name_photometry_input = "./all_photometry_program_stars/polished/vx_her_aavso_polished.txt"
period_input = 0.45535897 # based on NDL's phase-folding of AAVSO data
# read in photometry
df_test2 = pd.read_csv(file_name_photometry_input)
# phase-folded data
df_test2["epoch_start_zero"] = np.subtract(df_test2["BJD"],np.min(df_test2["BJD"]))
df_test2["baseline_div_period"] = np.divide(df_test2["epoch_start_zero"],period_input)
df_phase_folded = | pd.DataFrame(data = [t%1. for t in df_test2["baseline_div_period"]], columns=["phase"]) | pandas.DataFrame |
from pandas import DataFrame
from random import SystemRandom
def prepare_cards(num_decks=8):
"""
Prepare decks
:return: List of shuffled cards as integers, J, Q, K are represented by
11, 12, 13, respectively.
"""
sys_rand = SystemRandom()
# Init 8 decks
cards = [i for i in range(1, 14)]
cards = cards * 4 * num_decks
total_cards = 13 * 4 * num_decks
# Shuffle cards
shuffle_order = [sys_rand.randrange(0, total_cards - i) for i in range(total_cards)]
cards_shuffled = [cards.pop(pos) for pos in shuffle_order]
# Get them out
return cards_shuffled
def play_game(cards, num_cards_to_discard=0):
"""
Play mini-baccarat game according to the rules, and return string as
'P', 'B', or 'T' depending on who the winner is.
:param list cards:
:param int num_cards_to_discard:
:return: String 'P' if player wins, 'B' if bank wins, or 'T' for tie.
"""
if num_cards_to_discard:
cards = cards[num_cards_to_discard:]
card = cards.pop()
player_points = card if card < 10 else 0
card = cards.pop()
bank_points = card if card < 10 else 0
card = cards.pop()
player_points = (card if card < 10 else 0) + player_points
if player_points >= 10:
player_points -= 10
card = cards.pop()
bank_points = (card if card < 10 else 0) + bank_points
if bank_points >= 10:
bank_points -= 10
# Naturals (8 or 9) get evaluated immediately
if player_points >= 8 or bank_points >= 8:
if player_points > bank_points:
return 'P'
if bank_points > player_points:
return 'B'
return 'T'
# By default bank does not draw a 3rd card.
bank_draw = False
# Player stands on 6 or 7
if player_points >= 6:
if bank_points <= 5:
# Bank will draw on 5 or less if player stands.
bank_draw = True
else:
# Player draws
card = cards.pop()
player_points = (card if card < 10 else 0) + player_points
if player_points > 10:
player_points -= 10
if bank_points < 3:
# Bank will always draw on 0, 1, 2
bank_draw = True
elif bank_points == 7:
bank_draw = False
elif bank_points == 3:
bank_draw = card != 8
elif bank_points == 4:
bank_draw = card in [2, 3, 4, 5, 6, 7]
elif bank_points == 5:
bank_draw = card in [4, 5, 6, 7]
elif bank_points == 6:
bank_draw = card in [6, 7]
if bank_draw:
card = cards.pop()
bank_points = (card if card < 10 else 0) + bank_points
if bank_points >= 10:
bank_points -= 10
if player_points > bank_points:
return 'P'
if bank_points > player_points:
return 'B'
return 'T'
def play_games(num_games, num_decks_in_shoe=8, decks_discarded=2, num_cards_to_discard=0, results_to_track_min=5, results_to_track_max=8):
"""
Play multiple games and print statistics.
:param int num_games: Number of games to play.
:param int num_decks_in_shoe: Number of decks to use.
:param int decks_discarded: Number of decks to cut, at which time reshuffle will happen.
:param int num_cards_to_discard: Number of cards to discard per game at start of dealing.
:param int results_to_track_min: Min length of results to track.
:param int results_to_track_max: Max length of results to track.
:return:
"""
cards = prepare_cards(num_decks_in_shoe)
# Placeholder to record result stats
result_signatures = {}
for i in range(results_to_track_min, results_to_track_max + 1):
result_signatures[i] = ''
games_played_for_result_signatures = {}
player_wins_for_result_signatures = {}
bank_wins_for_result_signatures = {}
ties_for_result_signatures = {}
# Play games
while num_games > 0:
num_games -= 1
# Re-shuffle
if len(cards) <= decks_discarded * 13 * 4:
result_signatures = {}
for i in range(results_to_track_min, results_to_track_max + 1):
result_signatures[i] = ''
cards = prepare_cards(num_decks_in_shoe)
# Play game
result = play_game(cards, num_cards_to_discard)
# Record data
for i in range(results_to_track_min, results_to_track_max + 1):
if len(result_signatures[i]) == i:
# Result signature is full and can be used.
result_signature = result_signatures[i]
games_played_for_result_signatures[result_signature] = games_played_for_result_signatures.get(
result_signature, 0) + 1
if result == 'P':
player_wins_for_result_signatures[result_signature] = player_wins_for_result_signatures.get(
result_signature, 0) + 1
elif result == 'B':
bank_wins_for_result_signatures[result_signature] = bank_wins_for_result_signatures.get(
result_signature, 0) + 1
else:
ties_for_result_signatures[result_signature] = ties_for_result_signatures.get(
result_signature, 0) + 1
# Update result signature for next iteration
result_signatures[i] += result
if len(result_signatures[i]) > i:
result_signatures[i] = result_signatures[i][1:]
# Analyze recorded data
data = {}
titles = ['Played', 'PWR', 'BWR', 'TR']
labels = []
for k in games_played_for_result_signatures:
games_played = games_played_for_result_signatures.get(k, 0)
# A small result is going to be statistically insignificant.
# TODO: Implement formula for variance based on num_games
# TODO: Auto set results_to_track based on statistical significance
if games_played < 50000:
continue
player_wins = player_wins_for_result_signatures.get(k, 0)
bank_wins = bank_wins_for_result_signatures.get(k, 0)
ties = ties_for_result_signatures.get(k, 0)
player_win_ratio = round(player_wins / games_played * 100, 2)
bank_win_ratio = round(bank_wins / games_played * 100, 2)
tie_ratio = round(ties / games_played * 100, 2)
# You can win at least $1.6 from player or tie on betting $100.
if player_win_ratio > 50.8 or bank_win_ratio > 53 or tie_ratio > 12.7:
labels.append(k)
data[k] = [games_played, player_win_ratio, bank_win_ratio, tie_ratio]
# Print results
df = | DataFrame(data=data, index=titles) | pandas.DataFrame |
"""正式版"""
import pandas as pd
from pyecharts.chart import Chart
from pyecharts.option import get_all_options
def values2keys(dict):
"""取出任意字典中所有值升序排序后对应的所有键的列表形式"""
temp_list = []
temp_dict = {}
for k, v in dict.items():
temp_dict.setdefault(v, []).append(k)
for i in sorted(list(temp_dict.keys())):
for j in temp_dict[i]:
temp_list.append(j)
return temp_list
class Bar(Chart):
"""
<<< 动态 柱状图/条形图 >>>
柱状/条形图,通过柱形的高度/条形的宽度来表现数据的大小。
"""
def __init__(self, title="", subtitle="", **kwargs):
super(Bar, self).__init__(title, subtitle, **kwargs)
def add(self, *args, **kwargs):
self.__add(*args, **kwargs)
def __add(self, name, x_axis, y_axis, is_stack=False, bar_category_gap="20%", **kwargs):
"""
:param name:
系列名称,用于 tooltip 的显示,legend 的图例筛选。
:param x_axis:
x 坐标轴数据。
:param y_axis:
y 坐标轴数据。
:param is_stack:
数据堆叠,同个类目轴上系列配置相同的 stack 值可以堆叠放置。默认为 False。
:param kwargs:
"""
# 断言数据量是否相同, 一一对应
assert len(x_axis) == len(y_axis)
kwargs.update(x_axis=x_axis)
chart = get_all_options(**kwargs)
if is_stack:
is_stack = "stack_" + str(self._option['series_id'])
else:
is_stack = ""
xaxis, yaxis = chart['xy_axis']
self._option.update(xAxis=xaxis, yAxis=yaxis)
self._option.get('legend')[0].get('data').append(name)
self._option.get('series').append({
"type": "bar",
"name": name,
"data": y_axis,
"stack": is_stack,
"barCategoryGap": bar_category_gap,
"label": chart['label'],
"markPoint": chart['mark_point'],
"markLine": chart['mark_line'],
"seriesId": self._option.get('series_id'),
})
self._config_components(**kwargs)
def screen():
"""
确定x轴坐标, 通过层级菜单, 筛选指定条件
:return: 返回筛选后的指定条件 x_category, category_list, type_list
"""
# 通过层级筛选, 确定筛选条件
category_dict = {'1': '体型', '2': '身高', '3': '体重', '4': '腹型'}
# 循环确定x轴坐标显示的分类
x_id = input("请输入x轴坐标ID{}: ".format(category_dict))
while x_id not in category_dict.keys():
print("输入有误, 请重新输入!")
x_id = input("请输入x轴坐标ID{}: ".format(category_dict))
x_category = category_dict[x_id]
# 循环确定筛选条件
category_list = []
type_list = []
while category_dict:
# 一级筛选
c_id = input("请输入一级分类ID{}: ".format(category_dict))
if c_id in category_dict.keys():
# 二级筛选
# 若一级分类为 身高, 体重, 则进行范围筛选
if c_id in ['2', '3']:
type = input("请输入{}范围(最小值, 最大值, 以空格隔开): ".format(category_dict[c_id]))
# 若一级分类为 体型, 腹型, 则进行二级分类筛选
else:
type = input("请输入{}二级分类(以空格隔开): ".format(category_dict[c_id])).upper()
type_li = type.split() # 以空格分隔字符串, 生成多条件列表
type_list.append(type_li)
category_list.append(category_dict[c_id])
category_dict.pop(c_id)
# 回车键, 返回空字符串, 循环结束
elif c_id == "":
break
else:
print("输入有误, 请重新输入!")
print("x轴坐标: {}".format(x_category))
print("一级分类: {}".format(category_list))
print("二级分类: {}".format(type_list))
return x_category, category_list, type_list
def extract(data, fields, x_category, category_list, type_list):
"""
根据筛选出的条件, 取出符合条件的指定数据, 构造 pandas 的 Series 数据结构
:param data: 读取Excel表中数据
:param fields: 字段列表
:param x_category: x轴坐标
:param category_list: 筛选后的分一级类列表
:param type_list: 筛选后的二级分类列表
:return: x, y
"""
jywc_series = data[15][1:] # 净腰围差的Series数据
id_series = data[0][1:] # id的Series数据
id_list = id_series.values # 所有id列表
x_column = fields.index(x_category) # 指定x轴的列索引值
x_data = data[x_column][1:] # 根据列索引值取到对应x轴的Series数据
temp_dict = dict(zip(id_series.values, x_data.values)) # 构造x轴数据对应id的字典--> id: value
for i in range(len(category_list)): # i = 0;1;2;3
column = fields.index(category_list[i]) # 指定类别字段的列索引值
series = data[column][1:] # 根据列索引值取到对应列字段的Series数据
m = []
# 若一级分类为 身高, 体重, 则遍历范围取值
if category_list[i] in ['身高', '体重']:
# 若身高, 体重为范围
if len(type_list[i]) == 2:
type_min = int(type_list[i][0]) # 范围最小值
type_max = int(type_list[i][1]) # 范围最大值
for j in range(type_min, type_max + 1):
type = j
n = series[series.values == type].index # 取出指定分类字段对应的ID
# 一级分类相同, 二级分类不同, 取并集
m = list(set(m).union(set(n))) # 取相同分类不同子分类的并集
else:
# 若身高, 体重为具体数值
for j in type_list[i]:
type = int(j)
n = series[series.values == type].index # 取出指定分类字段对应的ID
m = list(set(m).union(set(n))) # 取相同分类不同子分类的并集
elif category_list[i] == '体型':
# 若一级分类为 体型, 则遍历二级分类, 末尾加 '体'
for j in type_list[i]:
type = j + '体'
n = series[series.values == type ].index # 取出指定分类字段对应的ID
m = list(set(m).union(set(n))) # 取相同分类不同子分类的并集
else:
# 若一级分类为 腹型, 则遍历二级分类取值
for j in type_list[i]:
type = j
n = series[series.values == type].index # 取出指定分类字段对应的ID
m = list(set(m).union(set(n))) # 取相同分类不同子分类的并集
# 一级分类不同 取交集
id_list = list(set(id_list).intersection(set(m))) # 取不同分类l和m的交集
# 对筛选结果进行判断
id_list.sort()
if id_list:
print("筛选后的id列表: {}".format(id_list))
else:
print("当前筛选条件没有对应数据, 请重新筛选!")
x_dict = dict(zip(id_list, [temp_dict[i] for i in id_list])) # 筛选后x轴数据字典
l = values2keys(x_dict) # 筛选后x轴数据升序排列后的对应id列表
x_list = [str(temp_dict[i]) + "({})".format(i) for i in l] # 最终x轴坐标数据显示
y_list = [(round(i, 2)) for i in jywc_series[l]] # 取出指定ID对应的净腰围差, 且保留两位小数
# 构造pandas中的Series数据结构
x = pd.Series(x_list)
y = pd.Series(y_list)
return x, y
if __name__ == "__main__":
data = | pd.read_excel("test_data.xlsx", header=None) | pandas.read_excel |
# Author: Group 404
# Date: 2020-01-23
#
"""Reads in raw csv data and performs the necessary wrangling and transformations.
Usage: src/EDA.py --path_in=<path_in> --path_out=<path_out>
Options:
--path_in=<path_in> Path (including filename) of where to read source data
--path_out=<path_out> Path (excluding filename) of where to locally write the file
"""
import numpy as np
import pandas as pd
import altair as alt
# from pandas_profiling import ProfileReport
from sklearn.model_selection import train_test_split
from docopt import docopt
import requests
import os
#alt.data_transformers.enable('json')
#alt.renderers.enable('notebook')
opt = docopt(__doc__)
def EDA(path_in, path_out):
try:
assert(type(path_in) == str)
except:
print("Input path should be a string")
try:
assert(type(path_out) == str)
except:
print("Input path should be a string")
data = pd.read_csv(path_in)
# split into training/validation and testing set
train, test = train_test_split(data, test_size=0.45)
train.head()
# Basic description of every column present in the dataframe
| pd.DataFrame.describe(train) | pandas.DataFrame.describe |
#!/usr/bin/env python
import os
import json
import pandas as pd
import xarray as xr
import abc
from typing import Tuple
from tqdm import tqdm
import numpy as np
from icecube.utils.common_utils import (
measure_time,
NumpyEncoder,
assert_metadata_exists,
)
from icecube.utils.logger import Logger
from icecube.bin.sar_cube.sar_datacube_metadata import SARDatacubeMetadata
from icecube.bin.datacube_variables import NAME_BAND
from icecube.utils.logger import Logger
logger = Logger(os.path.basename(__file__))
class LabelsDatacube:
"""
Core class for creating labels cube
"""
def __init__(self):
super().__init__()
self.json_labels = None
self.mask_datatype = None
self.max_shape_azimuth = None
self.max_shape_range = None
@measure_time
def create(self, product_type: str, labels_fpath: str, raster_dir: str):
"""
main method of class to create labels cube
:param product_type: type of product, GRD/SLC
:param labels_fpath: path/to/file.json containing icecube formatted labels
:param raster_dir: path/to/dir containing rasters
"""
metadata_object = SARDatacubeMetadata(self.cube_config)
metadata_object = metadata_object.compute_metdatadf_from_folder(
raster_dir, product_type
)
assert_metadata_exists(metadata_object.metadata_df)
self.json_labels = self.read_json(labels_fpath)
metadata_df = self.replace_unlabelled_bands_by_NaNs(metadata_object.metadata_df)
self.mask_datatype = self.get_mask_dtype(metadata_df)
(
self.max_shape_azimuth,
self.max_shape_range,
) = metadata_object.get_master_shape()
self.xrdataset = self.create_by_metadata(metadata_df)
return self
def create_by_metadata(self, metadata_df: pd.DataFrame):
"""
method to create labels cube using SARDatacubeMetadata object
:param metadata_df: dataframe object containing metadata for rasters in the directory
"""
list_metadata = []
xdataset_seq = []
for i, (df_index, df_row) in enumerate(
tqdm(
metadata_df.iterrows(),
total=metadata_df.shape[0],
desc="processing rasters for labels cube",
)
):
# We don't have image for this timestamp - we create an empty array to cover this date.
if pd.isnull(df_row["product_fpath"]):
dummy_xdataset, dummy_metadata = self.compute_dummy_xrdataset()
xdataset_seq.append(dummy_xdataset)
list_metadata.append(dummy_metadata)
# We do have images and we will fetch the relevant labels for that
else:
# Get the full path
logger.debug(
"Working on {}".format(os.path.basename(df_row["product_fpath"]))
)
product_file = os.path.basename(df_row["product_fpath"])
asset_labels = self.get_product_labels_from_json(product_file)
label_xdataset, label_metadata = self.compute_layer_xrdataset(
asset_labels, product_file
)
list_metadata.append(label_metadata)
xdataset_seq.append(label_xdataset)
# Add TIME coordinates to the datacube as well.
metadata_df[NAME_BAND] = metadata_df["acquisition_date"]
ds = xr.concat(
xdataset_seq,
dim=pd.to_datetime(metadata_df[NAME_BAND]),
data_vars="all",
combine_attrs="drop",
)
super_dict = self.concat_metadata(list_metadata)
# Update attrs for each Datavariable within the datacube
for dv in list(ds.data_vars):
ds[dv].attrs = super_dict
return ds
def concat_metadata(self, list_metadata: list):
"""
Concatenate metadata as list of keys
where keys are superset of dict keys from individual product-files
:param list_metadata: metadata list for each product file to be concatenated in labels cube
"""
possible_keys = {
k for cur_metdata in list_metadata for k, v in cur_metdata.items()
}
super_dict = {possible_key: [] for possible_key in possible_keys}
# fill the metada dict.
for cur_key in possible_keys:
for cur_metdata in list_metadata:
# The image metadata contains the specific keyword.
if cur_key in cur_metdata:
# Transform to string as numpy array cannot be saved as netCDF format
cur_value = cur_metdata[cur_key]
stringified_value = NumpyEncoder.encode(cur_value)
super_dict[cur_key].append(stringified_value)
else:
super_dict[cur_key].append("None")
return super_dict
def replace_unlabelled_bands_by_NaNs(
self, metadata_df: pd.DataFrame
) -> pd.DataFrame:
"""
A user can only provide labels for certain bands in the cube. In such a case, all unlabelled
bands/rasters metadata fields are replaced with NaNs.
Please note that "acquisition_date" columns are retained as values are used for xarray COORDs
:param metadata_df: dataframe object containing metadata for rasters in the directory
returns pd.df with NaNs filled for unavailable rows
"""
json_products = [json_dict["product_file"] for json_dict in self.json_labels]
for indx, row in metadata_df.iterrows():
if | pd.isnull(row["product_fpath"]) | pandas.isnull |
from enum import Enum
from typing import List
import numpy as np
import pandas as pd
class AggregationMode(str, Enum):
"""Enum for different aggregation modes."""
mean = "mean"
max = "max"
min = "min"
median = "median"
AGGREGATION_FN = {
AggregationMode.mean: np.mean,
AggregationMode.max: np.max,
AggregationMode.min: np.min,
AggregationMode.median: np.median,
}
def mrmr(
relevance_table: pd.DataFrame,
regressors: pd.DataFrame,
top_k: int,
relevance_aggregation_mode: str = AggregationMode.mean,
redundancy_aggregation_mode: str = AggregationMode.mean,
atol: float = 1e-10,
) -> List[str]:
"""
Maximum Relevance and Minimum Redundancy feature selection method.
Here relevance for each regressor is calculated as the per-segment aggregation of the relevance
values in relevance_table. The redundancy term for the regressor is calculated as a mean absolute correlation
between this regressor and other ones. The correlation between the two regressors is an aggregated pairwise
correlation for the regressors values in each segment.
Parameters
----------
relevance_table:
dataframe of shape n_segment x n_exog_series with relevance table, where ``relevance_table[i][j]``
contains relevance of j-th ``df_exog`` series to i-th df series
regressors:
dataframe with regressors in etna format
top_k:
num of regressors to select; if there are not enough regressors, then all will be selected
relevance_aggregation_mode:
the method for relevance values per-segment aggregation
redundancy_aggregation_mode:
the method for redundancy values per-segment aggregation
atol:
the absolute tolerance to compare the float values
Returns
-------
selected_features: List[str]
list of ``top_k`` selected regressors, sorted by their importance
"""
relevance_aggregation_fn = AGGREGATION_FN[AggregationMode(relevance_aggregation_mode)]
redundancy_aggregation_fn = AGGREGATION_FN[AggregationMode(redundancy_aggregation_mode)]
relevance = relevance_table.apply(relevance_aggregation_fn).fillna(0)
all_features = relevance.index.to_list()
selected_features: List[str] = []
not_selected_features = all_features.copy()
redundancy_table = pd.DataFrame(np.inf, index=all_features, columns=all_features)
top_k = min(top_k, len(all_features))
for i in range(top_k):
score_numerator = relevance.loc[not_selected_features]
score_denominator = | pd.Series(1, index=not_selected_features) | pandas.Series |
# pylint: disable=E1101
from datetime import datetime, timedelta
from pandas.compat import range, lrange, zip, product
import numpy as np
from pandas import Series, TimeSeries, DataFrame, Panel, isnull, notnull, Timestamp
from pandas.tseries.index import date_range
from pandas.tseries.offsets import Minute, BDay
from pandas.tseries.period import period_range, PeriodIndex, Period
from pandas.tseries.resample import DatetimeIndex, TimeGrouper
import pandas.tseries.offsets as offsets
import pandas as pd
import unittest
import nose
from pandas.util.testing import (assert_series_equal, assert_almost_equal,
assert_frame_equal)
import pandas.util.testing as tm
bday = BDay()
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest
class TestResample(unittest.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(np.random.rand(len(dti)), dti)
def test_custom_grouper(self):
dti = DatetimeIndex(freq='Min', start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10))
s = Series(np.array([1] * len(dti)), index=dti, dtype='int64')
b = TimeGrouper(Minute(5))
g = s.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'ohlc', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
b = TimeGrouper(Minute(5), closed='right', label='right')
g = s.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'ohlc', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
self.assertEquals(g.ngroups, 2593)
self.assert_(notnull(g.mean()).all())
# construct expected val
arr = [1] + [5] * 2592
idx = dti[0:-1:5]
idx = idx.append(dti[-1:])
expect = Series(arr, index=idx)
# GH2763 - return in put dtype if we can
result = g.agg(np.sum)
assert_series_equal(result, expect)
df = DataFrame(np.random.rand(len(dti), 10), index=dti, dtype='float64')
r = df.groupby(b).agg(np.sum)
self.assertEquals(len(r.columns), 10)
self.assertEquals(len(r.index), 2593)
def test_resample_basic(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min',
name='index')
s = Series(np.random.randn(14), index=rng)
result = s.resample('5min', how='mean', closed='right', label='right')
expected = Series([s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
index=date_range('1/1/2000', periods=4, freq='5min'))
assert_series_equal(result, expected)
self.assert_(result.index.name == 'index')
result = s.resample('5min', how='mean', closed='left', label='right')
expected = Series([s[:5].mean(), s[5:10].mean(), s[10:].mean()],
index=date_range('1/1/2000 00:05', periods=3,
freq='5min'))
assert_series_equal(result, expected)
s = self.series
result = s.resample('5Min', how='last')
grouper = TimeGrouper(Minute(5), closed='left', label='left')
expect = s.groupby(grouper).agg(lambda x: x[-1])
assert_series_equal(result, expect)
def test_resample_basic_from_daily(self):
# from daily
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D', name='index')
s = Series(np.random.rand(len(dti)), dti)
# to weekly
result = s.resample('w-sun', how='last')
self.assertEquals(len(result), 3)
self.assert_((result.index.dayofweek == [6, 6, 6]).all())
self.assertEquals(result.irow(0), s['1/2/2005'])
self.assertEquals(result.irow(1), s['1/9/2005'])
self.assertEquals(result.irow(2), s.irow(-1))
result = s.resample('W-MON', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [0, 0]).all())
self.assertEquals(result.irow(0), s['1/3/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
result = s.resample('W-TUE', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [1, 1]).all())
self.assertEquals(result.irow(0), s['1/4/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
result = s.resample('W-WED', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [2, 2]).all())
self.assertEquals(result.irow(0), s['1/5/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
result = s.resample('W-THU', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [3, 3]).all())
self.assertEquals(result.irow(0), s['1/6/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
result = s.resample('W-FRI', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [4, 4]).all())
self.assertEquals(result.irow(0), s['1/7/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
# to biz day
result = s.resample('B', how='last')
self.assertEquals(len(result), 7)
self.assert_((result.index.dayofweek == [4, 0, 1, 2, 3, 4, 0]).all())
self.assertEquals(result.irow(0), s['1/2/2005'])
self.assertEquals(result.irow(1), s['1/3/2005'])
self.assertEquals(result.irow(5), s['1/9/2005'])
self.assert_(result.index.name == 'index')
def test_resample_frame_basic(self):
df = tm.makeTimeDataFrame()
b = TimeGrouper('M')
g = df.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
result = df.resample('A')
assert_series_equal(result['A'], df['A'].resample('A'))
result = df.resample('M')
assert_series_equal(result['A'], df['A'].resample('M'))
df.resample('M', kind='period')
df.resample('W-WED', kind='period')
def test_resample_loffset(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min')
s = Series(np.random.randn(14), index=rng)
result = s.resample('5min', how='mean', closed='right', label='right',
loffset=timedelta(minutes=1))
idx = date_range('1/1/2000', periods=4, freq='5min')
expected = Series([s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
index=idx + timedelta(minutes=1))
assert_series_equal(result, expected)
expected = s.resample(
'5min', how='mean', closed='right', label='right',
loffset='1min')
assert_series_equal(result, expected)
expected = s.resample(
'5min', how='mean', closed='right', label='right',
loffset=Minute(1))
assert_series_equal(result, expected)
self.assert_(result.index.freq == Minute(5))
# from daily
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D')
ser = Series(np.random.rand(len(dti)), dti)
# to weekly
result = ser.resample('w-sun', how='last')
expected = ser.resample('w-sun', how='last', loffset=-bday)
self.assertEqual(result.index[0] - bday, expected.index[0])
def test_resample_upsample(self):
# from daily
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D', name='index')
s = Series(np.random.rand(len(dti)), dti)
# to minutely, by padding
result = s.resample('Min', fill_method='pad')
self.assertEquals(len(result), 12961)
self.assertEquals(result[0], s[0])
self.assertEquals(result[-1], s[-1])
self.assert_(result.index.name == 'index')
def test_upsample_with_limit(self):
rng = date_range('1/1/2000', periods=3, freq='5t')
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample('t', fill_method='ffill', limit=2)
expected = ts.reindex(result.index, method='ffill', limit=2)
assert_series_equal(result, expected)
def test_resample_ohlc(self):
s = self.series
grouper = TimeGrouper(Minute(5))
expect = s.groupby(grouper).agg(lambda x: x[-1])
result = s.resample('5Min', how='ohlc')
self.assertEquals(len(result), len(expect))
self.assertEquals(len(result.columns), 4)
xs = result.irow(-2)
self.assertEquals(xs['open'], s[-6])
self.assertEquals(xs['high'], s[-6:-1].max())
self.assertEquals(xs['low'], s[-6:-1].min())
self.assertEquals(xs['close'], s[-2])
xs = result.irow(0)
self.assertEquals(xs['open'], s[0])
self.assertEquals(xs['high'], s[:5].max())
self.assertEquals(xs['low'], s[:5].min())
self.assertEquals(xs['close'], s[4])
def test_resample_ohlc_dataframe(self):
df = (pd.DataFrame({'PRICE': {Timestamp('2011-01-06 10:59:05', tz=None): 24990,
Timestamp('2011-01-06 12:43:33', tz=None): 25499,
Timestamp('2011-01-06 12:54:09', tz=None): 25499},
'VOLUME': {Timestamp('2011-01-06 10:59:05', tz=None): 1500000000,
Timestamp('2011-01-06 12:43:33', tz=None): 5000000000,
Timestamp('2011-01-06 12:54:09', tz=None): 100000000}})
).reindex_axis(['VOLUME', 'PRICE'], axis=1)
res = df.resample('H', how='ohlc')
exp = pd.concat([df['VOLUME'].resample('H', how='ohlc'),
df['PRICE'].resample('H', how='ohlc')],
axis=1,
keys=['VOLUME', 'PRICE'])
assert_frame_equal(exp, res)
df.columns = [['a', 'b'], ['c', 'd']]
res = df.resample('H', how='ohlc')
exp.columns = pd.MultiIndex.from_tuples([('a', 'c', 'open'), ('a', 'c', 'high'),
('a', 'c', 'low'), ('a', 'c', 'close'), ('b', 'd', 'open'),
('b', 'd', 'high'), ('b', 'd', 'low'), ('b', 'd', 'close')])
assert_frame_equal(exp, res)
# dupe columns fail atm
# df.columns = ['PRICE', 'PRICE']
def test_resample_dup_index(self):
# GH 4812
# dup columns with resample raising
df = DataFrame(np.random.randn(4,12),index=[2000,2000,2000,2000],columns=[ Period(year=2000,month=i+1,freq='M') for i in range(12) ])
df.iloc[3,:] = np.nan
result = df.resample('Q',axis=1)
expected = df.groupby(lambda x: int((x.month-1)/3),axis=1).mean()
expected.columns = [ Period(year=2000,quarter=i+1,freq='Q') for i in range(4) ]
assert_frame_equal(result, expected)
def test_resample_reresample(self):
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D')
s = Series(np.random.rand(len(dti)), dti)
bs = s.resample('B', closed='right', label='right')
result = bs.resample('8H')
self.assertEquals(len(result), 22)
tm.assert_isinstance(result.index.freq, offsets.DateOffset)
self.assert_(result.index.freq == offsets.Hour(8))
def test_resample_timestamp_to_period(self):
ts = _simple_ts('1/1/1990', '1/1/2000')
result = ts.resample('A-DEC', kind='period')
expected = ts.resample('A-DEC')
expected.index = period_range('1990', '2000', freq='a-dec')
assert_series_equal(result, expected)
result = ts.resample('A-JUN', kind='period')
expected = ts.resample('A-JUN')
expected.index = period_range('1990', '2000', freq='a-jun')
assert_series_equal(result, expected)
result = ts.resample('M', kind='period')
expected = ts.resample('M')
expected.index = period_range('1990-01', '2000-01', freq='M')
assert_series_equal(result, expected)
result = ts.resample('M', kind='period')
expected = ts.resample('M')
expected.index = period_range('1990-01', '2000-01', freq='M')
assert_series_equal(result, expected)
def test_ohlc_5min(self):
def _ohlc(group):
if isnull(group).all():
return np.repeat(np.nan, 4)
return [group[0], group.max(), group.min(), group[-1]]
rng = date_range('1/1/2000 00:00:00', '1/1/2000 5:59:50',
freq='10s')
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('5min', how='ohlc', closed='right',
label='right')
self.assert_((resampled.ix['1/1/2000 00:00'] == ts[0]).all())
exp = _ohlc(ts[1:31])
self.assert_((resampled.ix['1/1/2000 00:05'] == exp).all())
exp = _ohlc(ts['1/1/2000 5:55:01':])
self.assert_((resampled.ix['1/1/2000 6:00:00'] == exp).all())
def test_downsample_non_unique(self):
rng = | date_range('1/1/2000', '2/29/2000') | pandas.tseries.index.date_range |
import os, sys, logging, warnings, time
import osmnx
import networkx as nx
import pandas as pd
import geopandas as gpd
import numpy as np
from shapely.geometry import Point
from .core import pandana_snap
from .core import calculate_OD as calc_od
def calculateOD_gdf(G, origins, destinations, fail_value=-1, weight="time", calculate_snap=False, wgs84 = {'init':'epsg:4326'}):
''' Calculate Origin destination matrix from GeoDataframes
Args:
G (networkx graph): describes the road network. Often extracted using OSMNX
origins (geopandas dataframe): source locations for calculating access
destinations (geopandas dataframe): destination locations for calculating access
calculate_snap (boolean, optioinal): variable to add snapping distance to travel time, default is false
wgs84 (CRS dictionary, optional): CRS fo road network to which the GDFs are projected
Returns:
numpy array: 2d OD matrix with columns as index of origins and rows as index of destinations
'''
#Get a list of originNodes and destinationNodes
if origins.crs != wgs84:
origins = origins.to_crs(wgs84)
if destinations.crs != wgs84:
destinations = destinations.to_crs(wgs84)
origins = pandana_snap(G, origins)
destinations = pandana_snap(G, destinations)
oNodes = origins['NN'].unique()
dNodes = destinations['NN'].unique()
od = calc_od(G, oNodes, dNodes, fail_value)
origins['OD_O'] = origins['NN'].apply(lambda x: np.where(oNodes==x)[0][0])
destinations['OD_D'] = destinations['NN'].apply(lambda x: np.where(dNodes==x)[0][0])
outputMatrix = od[origins['OD_O'].values,:][:,destinations['OD_D'].values]
if calculate_snap:
originsUTM = pandana_snap(G, origins, target_crs='epsg:3857')
destinationsUTM = pandana_snap(G, destinations, target_crs='epsg:3857')
originsUTM['tTime_sec'] = originsUTM['NN_dist'] / 1000 / 5 * 60 * 60 # Convert snap distance to walking time in seconds
destinationsUTM['tTime_sec'] = destinationsUTM['NN_dist'] / 1000 / 5 * 60 * 60 # Convert snap distance to walking time in seconds
originsUTM.reset_index(inplace=True)
for idx, row in originsUTM.iterrows():
outputMatrix[idx,:] = outputMatrix[idx,:] + row['tTime_sec']
outputMatrix = outputMatrix
return(outputMatrix)
def calculateOD_csv(G, originCSV, destinationCSV='', oLat="Lat", oLon="Lon", dLat="Lat", dLon="Lon", crs={'init':'epsg:4326'}, fail_value=-1, weight='time', calculate_snap=False):
"""
Calculate OD matrix from csv files of points
:param G: describes the road network. Often extracted using OSMNX
:param string origins: path to csv file with locations for calculating access
:param string destinations: path to csv with destination locations for calculating access
:param string oLat:
:param string oLon:
:param string dLat:
:param string dLon:
:param dict crs: crs of input origins and destinations, defaults to {'init':'epsg:4326'}
:param int fail-value: value to put in OD matrix if no route found, defaults to -1
:param string weight: variable in G used to define edge impedance, defaults to 'time'
:param bool calculate_snap: variable to add snapping distance to travel time, default is false
:returns: numpy array: 2d OD matrix with columns as index of origins and rows as index of destinations
"""
originPts = | pd.read_csv(originCSV) | pandas.read_csv |
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu" at 14:05, 28/01/2021 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Nguyen_Thieu2 %
# Github: https://github.com/thieu1995 %
# ------------------------------------------------------------------------------------------------------%
### Reading all results files to find True pareto-fronts (Reference Fronts)
from time import time
from pathlib import Path
from copy import deepcopy
from config import Config, OptExp, OptParas
from pandas import read_csv, DataFrame, to_numeric
from numpy import array, zeros, vstack, hstack, min, max, mean, std
from utils.io_util import load_tasks, load_nodes
from utils.metric_util import *
from utils.visual.scatter import visualize_front_3d
def inside_loop(my_model, n_trials, n_timebound, epoch, fe, end_paras):
for pop_size in OptExp.POP_SIZE:
if Config.TIME_BOUND_KEY:
path_results = f'{Config.RESULTS_DATA}/{n_timebound}s/task_{my_model["problem"]["n_tasks"]}/{Config.METRICS}/{my_model["name"]}/{n_trials}'
else:
path_results = f'{Config.RESULTS_DATA}/no_time_bound/task_{my_model["problem"]["n_tasks"]}/{Config.METRICS}/{my_model["name"]}/{n_trials}'
name_paras = f'{epoch}_{pop_size}_{end_paras}'
file_name = f'{path_results}/experiment_results/{name_paras}-results.csv'
df = read_csv(file_name, usecols=["Power", "Latency", "Cost"])
return df.values
def getting_results_for_task(models):
matrix_fit = zeros((1, 6))
for n_task in OptExp.N_TASKS:
for my_model in models:
tasks = load_tasks(f'{Config.INPUT_DATA}/tasks_{n_task}.json')
problem = deepcopy(my_model['problem'])
problem["tasks"] = tasks
problem["n_tasks"] = n_task
problem["shape"] = [len(problem["clouds"]) + len(problem["fogs"]), n_task]
my_model['problem'] = problem
for n_trials in range(OptExp.N_TRIALS):
if Config.TIME_BOUND_KEY:
for n_timebound in OptExp.TIME_BOUND_VALUES:
if Config.MODE == "epoch":
for epoch in OptExp.EPOCH:
end_paras = f"{epoch}"
df_matrix = inside_loop(my_model, n_trials, n_timebound, epoch, None, end_paras)
df_name = array([[n_task, my_model["name"], n_trials], ] * len(df_matrix))
matrix = hstack(df_name, df_matrix)
matrix_fit = vstack((matrix_fit, matrix))
else:
if Config.MODE == "epoch":
for epoch in OptExp.EPOCH:
end_paras = f"{epoch}"
df_matrix = inside_loop(my_model, n_trials, None, epoch, None, end_paras)
df_name = array([[n_task, my_model["name"], n_trials], ] * len(df_matrix))
matrix = hstack((df_name, df_matrix))
matrix_fit = vstack((matrix_fit, matrix))
return matrix_fit[1:]
starttime = time()
clouds, fogs, peers = load_nodes(f'{Config.INPUT_DATA}/nodes_2_8_5.json')
problem = {
"clouds": clouds,
"fogs": fogs,
"peers": peers,
"n_clouds": len(clouds),
"n_fogs": len(fogs),
"n_peers": len(peers),
}
models = [
{"name": "NSGA-II", "class": "BaseNSGA_II", "param_grid": OptParas.NSGA_II, "problem": problem},
{"name": "NSGA-III", "class": "BaseNSGA_III", "param_grid": OptParas.NSGA_III, "problem": problem},
{"name": "MO-ALO", "class": "BaseMO_ALO", "param_grid": OptParas.MO_ALO, "problem": problem},
{"name": "MO-SSA", "class": "BaseMO_SSA", "param_grid": OptParas.MO_SSA, "problem": problem},
]
## Load all results of all trials
matrix_results = getting_results_for_task(models)
# df_full = DataFrame(matrix_results, columns=["Task", "Model", "Trial", "Fit1", "Fit2", "Fit3"])
data = {'Task': matrix_results[:, 0],
'Model': matrix_results[:, 1],
'Trial': matrix_results[:, 2],
'Fit1': matrix_results[:, 3],
'Fit2': matrix_results[:, 4],
'Fit3': matrix_results[:, 5],
}
df_full = DataFrame(data)
df_full["Task"] = to_numeric(df_full["Task"])
df_full["Trial"] = to_numeric(df_full["Trial"])
df_full["Fit1"] = to_numeric(df_full["Fit1"])
df_full["Fit2"] = | to_numeric(df_full["Fit2"]) | pandas.to_numeric |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import json
import folium
import requests
import plotly.graph_objects as go
from sklearn.linear_model import LinearRegression
import streamlit as st
from streamlit_folium import folium_static
import streamlit.components.v1 as components
from bs4 import BeautifulSoup
import regex
with st.echo(code_location='below'):
# st.set_page_config(layout="wide")
st.write('Цель данного проекта - рассмотрение статистики по правонарушениям и преступлениям (англ. - offenses) '
'в США в течение последних десяти лет.')
# #BLOCK1
# entrypoint = "https://api.usa.gov/crime/fbi/sapi/api/agencies"
# query = {'api_key': 'e8vEnIM7V1Msff37SGU86c4r27dVzZOUow7LFCiM'}
# r = requests.get(entrypoint, params=query)
# data = r.json()
# columns_all = ['ori', 'agency_name', 'agency_type_name', 'state_name', 'state_abbr', 'division_name', 'region_name',
# 'region_desc', 'county_name', 'nibrs', 'latitude', 'longitude', 'nibrs_start_date']
# summ_all = pd.DataFrame(columns=columns_all)
# for i in data:
# for j in data[i]:
# a = (data[i][j])
# new = []
# for k in a:
# new += [a[k]]
# summ_all.loc[len(summ_all)] = new
# print(summ_all)
summ_all = pd.read_csv("summ_all.csv")
# BLOCK2
summ_all = (summ_all).dropna()
st.write(
'На данной карте представлены все агентства, подключенные к системе NIBRS (Национальная система отчетности об инцидентах) '
'Можно заметить, что данной системой активно пользуются в восточной части страны, а западной части сотаются '
'целые штаты, в которых ни одно агентство не используют NIBRS. '
'Например, в Пенсильвании находится более 1500 агентств, однако системой пользуют только 25 агентств. ')
m = folium.Map([41.75215, -97.61819], zoom_start=4)
for ind, row in summ_all.iterrows():
folium.Circle([row.latitude, row.longitude],
radius=10, control_scale=True).add_to(m)
folium_static(m)
# ct = summ_all[(summ_all['state_abbr'] == "KS")].reset_index().dropna()
# ct["Cases"] = np.nan
# for ori in ct['ori']:
# entrypoint = "https://api.usa.gov/crime/fbi/sapi/api/data/arrest/agencies/offense/" + ori + "/all/2019/2019"
# query = {'api_key': 'e8vEnIM7V1Msff37SGU86c4r27dVzZOUow7LFCiM'}
# data2 = requests.get(entrypoint, params=query).json()
# for h in data2:
# if type(data2[h]) == list and data2[h] != []:
# data2[h][0].pop("data_year")
# data2[h][0].pop("csv_header")
# values = data2[h][0].values()
# ct["Cases"][ct['ori'] == ori] = sum(values)
ct = | pd.read_csv("ct.csv") | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
NAME:
debug_inp.py
DESCRIPTION:
debugs and fixes with user input .inp format files of CIT (sam file) type data.
SYNTAX:
~$ python debug_inp.py $INP_FILE
FLAGS:
-h, --help:
prints this help message
-dx, --dropbox:
Prioritize user's Dropbox folder when searching/debugging sam file paths;
the module will attempt to locate the Dropbox folder automatically.
Options to explicitly set value of inp fields:
--sam_path:
Path to .sam file
--magic_codes:
Magic method codes
--loc:
Site description given in .sam file; commonly location
--nc:
Naming convention; see docstring for debug_inp function.
Set to -1 if you are sure you want to change the current value
but want this module to try to figure out the correct value for you.
** WARNING **
This is not a robust functionality yet; you are safer explicitly
specifying the value.
--term:
Number of terminal characters in sample names (used to define specimens).
Default is 1
--no_ave:
Import all measurements (do not average repeat measurements)
--peak_AF:
Peak AF field used in ARM experiments
"""
import sys
import os
import argparse
import textwrap
import pandas as pd
import pmagpy.controlled_vocabularies3 as cv
from functools import reduce
from time import time, asctime
from funcs import shortpath
import pdb
# global top_dir, pkg_dir, data_dir, data_src, inp_dir, usr_configs_read
try: # get path names if set
from dmgui_au import pkg_dir, data_dir, data_src, inp_dir
usr_configs_read = True
except:
# if setup.py is running, don't issue warning
if sys.argv[0] != 'setup.py':
print("-W- Local path names have not been set. Please run setup.py")
usr_configs_read = False
nc_info_str ="""
Sample naming convention could not be determined. Choose from the list below:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
Enter number here: """
class Logger(object):
"""
log stdout to debug_inp.log
"""
def __init__(self):
self.terminal = sys.stdout
self.log = open("debug_inp.log", "a+")
self.log.write('\n{:-^80}\n\n'.format(' Starting session at {} '.format(asctime())))
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
#this flush method is needed for python 3 compatibility.
#this handles the flush command by doing nothing.
#you might want to specify some extra behavior here.
pass
def start_logger():
sys.stdout = Logger()
def stop_logger():
sys.stdout.log.write('{:-^80}\n'.format(' Closing session '))
sys.stdout.log.close()
sys.stdout = sys.__stdout__
def debug_inp(inp_file, dropbox = False, noinput=False, usr_configs_read=None,
data_src=None, inp_dir=None, **kwargs):
"""Fixes .inp files
Parameters
----------
inp_file : filename
Name of .inp file; can be relative or absolute path.
data_src : path
Top-level directory to search for data (for debugging sam paths).
Defaults to the value provided in dmgui_au.conf, if applicable.
dropbox : bool, default is False
When searching for the correct paths to data files,
prioritize user Dropbox folder. If you have already
specified your data directory in the global configuration
(with setup.py) this does nothing. Defaults to False.
noinput : bool, default is False
bypass all user input; may result in unresolved issues
**kwargs : optional
Manually overwrite certain fields of the .inp file.
Possible fields are abbreviations of the actual header name,
as shown in the table below.
For calculated fields like `nc` and `term`, setting the
keyword argument to -1 will force these to be recalculated
by the module. This functionality is still in development,
so you may prefer to explicitly pass the correct values instead.
-------------------------------------------------------
kwargs --------------> inp fields
-------------------------------------------------------
sam_path sam_path
magic_codes field_magic_codes
loc location
nc naming_convention
term num_terminal_char
no_ave dont_average_replicate_measurements
peak_AF peak_AF
time time_stamp
Returns
-------
New .inp file
"""
inp_directory,inp_file_name = os.path.split(inp_file)
if inp_directory=='': inp_directory = '.'
inp_file = os.path.abspath(inp_file)
print("-I- Running on %s and changing CWD to '%s'" %
(inp_file_name, shortpath(inp_directory)))
os.chdir(inp_directory)
# first deal with any user-specified overrides
kwarg_map = {
'sam_path':'sam_path',
'magic_codes':'field_magic_codes',
'loc':'location',
'nc':'naming_convention',
'term':'num_terminal_char',
'no_ave':'dont_average_replicate_measurements',
'peak_AF':'peak_AF',
'time':'time_stamp'
}
force_rewrite_dict = dict.fromkeys(kwarg_map.values())
for key,value in kwargs.items():
if key in kwarg_map.keys():
force_rewrite_dict[kwarg_map[key]] = value
if any(force_rewrite_dict.values()):
df = | pd.read_csv(inp_file, sep='\t', header=1, dtype=str) | pandas.read_csv |
"""
Genetic algorithm tools
Uses the same conventions as DEAP:
fitness values are stored in
p.fitness.values
p is a list
"""
import os
import numpy as np
import random
from copy import deepcopy
from collections import Sequence
from itertools import repeat
import hashlib
import math
import glob
import re
import pandas as pd
class Fitn():
pass
class Indiv(list):
def __init__(self, *args):
list.__init__(self, *args)
self.fitness=Fitn()
self.fitness.values=[]
# --------------------------------------------------------------------------------}
# --- Gene MAP
# --------------------------------------------------------------------------------{
class GeneMap():
def __init__(self,name,nBases,protein_ranges,kind=None,protein_neutr=None,meta=None,resolution=1000):
"""
A gene is between 0 and 1
A protein is defined by the ranges
"""
self.nBases = nBases
self.kind = kind
self.protein_ranges = protein_ranges
self.protein_neutr = protein_neutr
if protein_neutr is None:
self.protein_neutr=[(m+M)/2 for m,M in protein_ranges]
self.meta = meta
self.resolution = resolution
def pretty_name(n):
if n.find('|')>0:
s=n.split('|')
return s[-1]
elif n.find('\\')>0:
n=n.replace('.dat','')
s=n.split('\\')
return s[-1]
elif n.find('/')>0:
n=n.replace('.dat','')
s=n.split('/')
return s[-1]
else:
return n
self.name = name
self.pretty_name = pretty_name(name)
def __repr__(self):
s=''.join(['x']*self.nBases)+': '+self.name+'\n'
return s
def decode(self,gene,iBase=None):
if iBase is None:
prot =[]
for g,pr in zip(gene,self.protein_ranges):
p=pr[0]+ g*(pr[1]-pr[0])
prot.append(p)
if g<0 or g>1:
print('g:',g, 'pr:',pr, ' -> p:',p)
raise Exception('The gene cannot be decoded properly')
else:
g=gene
pr=self.protein_ranges[iBase]
prot=pr[0]+ g*(pr[1]-pr[0])
if g<0 or g>1:
print('g:',g, 'pr:',pr, ' -> p:',prot)
raise Exception('The base cannot be decoded properly')
return prot
def encode(self,protein):
gene=[]
for p,pr in zip(protein,self.protein_ranges):
g=(p-pr[0])/(pr[1]-pr[0])
gene.append(g)
if g>1 or g<0:
print('p:',p, 'pr:',pr, ' -> g:',g)
raise Exception('The protein cannot be encoded properly')
return gene
def neutralProtein(self):
return self.protein_neutr
def show_full_raw(self,gene):
s='['+' '.join([str(b) for b in gene])+']: '+self.name
return s
def show_full(self,gene):
def pretty(b,pr):
if self.resolution is None:
return str(b)
delta=pr[1]-pr[0]
if delta<=0:
return str(b)
nDec=int(np.log10(delta/self.resolution))
nInt=int(np.log10(pr[1]))
if nInt<0:
nInt=-1
if nDec<0:
fmt='{:'+str(nInt-nDec+3)+'.'+str(-nDec+1)+'f}'
#print(fmt)
return fmt.format(b)
elif nInt>0:
fmt='{:'+str(nInt+1)+'.0f}'
#print(fmt)
return fmt.format(b)
else:
return str(b)
if self.nBases>1:
s=self.pretty_name+': ['+' '.join([pretty(b,rg) for b,rg in zip(self.decode(gene),self.protein_ranges)])+']'
else:
s=self.pretty_name+': '+pretty(self.decode(gene)[0],self.protein_ranges[0])
return s
def geneBounds(self):
return [(0,1)]*self.nBases
def proteinBounds(self):
return [(m,M) for m,M in self.protein_ranges]
class ChromosomeMap(list):
def add(self,d):
self.append(d)
def append(self,d):
super(ChromosomeMap,self).append(d)
# TODO check that
if not isinstance(d,GeneMap):
raise Exception('Can only add `GenMap` types')
@property
def nBases(self):
return sum([gene.nBases for gene in self])
@property
def nGenes(self):
return len(self)
def maxNBasesPerGene(self):
return max([gene.nBases for gene in self])
def neutralChromosome(self):
v=[]
for gm in self:
v+=gm.encode(gm.protein_neutr)
return v
def neutralProtein(self):
v=[]
for gm in self:
v+=gm.protein_neutr
return v
def decode(self,chromosome,iBase=None):
if iBase is None:
v=[]
for gm,gene in zip(self,self.split(chromosome)):
v+=gm.decode(gene)
else:
if iBase>=self.nBases:
raise Exception('iBase should be between 0 and nBases')
i=0
for ig,gm in enumerate(self):
if (iBase>=i) and (iBase<i+gm.nBases):
break
else:
i+=gm.nBases
iBase=iBase-i
#print('New iBase: {} for gene: {} {}'.format(iBase,ig,gm))
v=gm.decode(chromosome,iBase)
return v
def encode(self,protein_chain):
v=[]
for gm,prot in zip(self,self.split(protein_chain)):
v+=gm.encode(prot)
return v
def chromosomeBounds(self):
v=[]
for gm in self:
v+=gm.geneBounds()
return v
def proteinChainBounds(self):
v=[]
for gm in self:
v+=gm.proteinBounds()
return v
def __repr__(self):
s=''
fmt='{:'+str(self.maxNBasesPerGene())+'s}'
for gm in self:
s+=fmt.format(''.join(['x']*gm.nBases))+': '+gm.name+'\n'
return s
def show_full(self,chromosome,sep='\n'):
s=''
for gm,gene in zip(self,self.split(chromosome)):
s+=gm.show_full(gene)+sep
return s
def split(self,chromosome):
genes=[]
n=0
for gm in self:
genes.append(chromosome[n:n+gm.nBases])
n=n+gm.nBases
return genes
# --------------------------------------------------------------------------------}
# --- ID
# --------------------------------------------------------------------------------{
def nparray_hash(x,length=16):
return hashlib.md5((x.tobytes())).hexdigest()[:length]
def chromID(p):
return nparray_hash(np.array(p),length=32)
# --------------------------------------------------------------------------------}
# --- Parametric
# --------------------------------------------------------------------------------{
def parameticGA(fitnessEvalFun,ch_map,nPerBase,nFitness,resolution=None):
"""
Perform a parametric study using the same formalism of the Genetic algorithm
Each base is varies between 0 and 1 as defined by `nPerBase` (a list of values for each base or a single value)
The function `fitnessEvalFun` is evaluated on the population
`resolution` should be a power of 10, like 10, 100, 1000
"""
nBases=ch_map.nBases
if isinstance(nPerBase,list):
if len(nPerBase)!=nBases:
raise Exception('If nPerBase is a list it must be the same length as the number of bases')
else:
nPerBase= [nPerBase]*nBases
nTot = np.prod(nPerBase)
nValuesCum = np.insert(np.cumprod(nPerBase),0,1)[:-1];
vBaseValues=[np.linspace(0,1,n) for n in nPerBase]
print('Parametric values (no rounding:)')
for v in vBaseValues:
print(v)
vProtValues=[np.array([ch_map.decode(g,iBase=j) for g in v]) for j,v in enumerate(vBaseValues)]
print('Prot values (no rounding:)')
for v in vProtValues:
print(v)
if resolution:
vBaseValues=[np.round(resolution*np.linspace(0,1,n))/resolution for n in nPerBase]
print('Parametric values (with rounding:)')
for v in vBaseValues:
print(v)
# we scale
print('Prot values (with rounding:)')
vProtValues=[np.array([ch_map.decode(g,iBase=j) for g in v]) for j,v in enumerate(vBaseValues)]
for v in vProtValues:
print(v)
fits_arr = np.zeros( tuple(nPerBase+[nFitness] ) )
fits_norm = np.zeros( tuple(nPerBase) )
ValFit = np.zeros( tuple(nPerBase) )
print('Creating population of {} individuals...'.format(nTot))
pop=[]
for i in range(nTot):
Indexes=(np.mod(np.floor(i/nValuesCum),nPerBase)).astype(int);
chromosome=Indiv([vBaseValues[j][Indexes[j]] for j in range(nBases) ])
#print(i,Indexes,chromosome)
pop.append(chromosome)
print('Evaluating population...')
for i,p in enumerate(pop):
Indexes=tuple((np.mod(np.floor(i/nValuesCum),nPerBase)).astype(int));
fits = fitnessEvalFun(p,stat='{:4.1f}% - '.format(100.0*i/nTot))
fits_norm[Indexes] = np.linalg.norm(fits)
fits_arr [Indexes] = fits
return fits_norm,fits_arr,pop,vBaseValues,vProtValues
# --------------------------------------------------------------------------------}
# --- Population/individual manipulations
# --------------------------------------------------------------------------------{
def addIfAbsent(pop,ind,sPop='',sInd=''):
if ind not in pop:
pop.append(clone(ind))
if len(sPop)>0:
print('Adding {:8s} to {}'.format(chromID(ind),sPop))
def clone(x):
return deepcopy(x)
def populationTrimAccuracy(pop,nDecimals=None):
for i in range(len(pop)):
for j in range(len(pop[0])):
pop[i][j]=np.around(pop[i][j],decimals=nDecimals)
return pop
def splitstrn(s,n):
return [s[i:i+n] for i in range(0, len(s), n)]
def populationStats(pop,best=None,stats=None):
nVar = len(pop[0].fitness.values)
new_stats=[]
for i in range(nVar):
fits = [p.fitness.values[i] for p in pop]
d=dict()
d['Mean'] = np.mean(fits)
d['Min'] = np.min(fits)
d['Max'] = np.max(fits)
d['Std'] = np.std(fits)
if best is not None:
d['Best'] = best.fitness.values[i]
else:
d['Best'] = np.nan
new_stats.append(pd.DataFrame(d,index=[0]))
if stats is not None:
stats=[ s.append(ns, ignore_index=True) for s,ns in zip(stats,new_stats)]
return new_stats,stats
def populationPrint(pop,nBasePerGene=None,label=''):
print('------------------ {} POPULATION ----------------------'.format(label))
if nBasePerGene is None:
nBasePerGene=len(pop[0])
for p,i in zip(pop,range(len(pop))):
nCharPerBase = 5 # NOTE related to format below..
sBases=' '.join(['{:.2f}'.format(x) for x in p])
splits=splitstrn(sBases,nBasePerGene*nCharPerBase)
sGenes='| '.join(splits)
sFits=' '.join(['{:.3f}'.format(x) for x in p.fitness.values])
print('#{:2d} | {} | {}'.format(i,sFits,sGenes))
def populationSave(pop,directory='',basename='GA_DB_',newfile=False,fileformat='csv',fitsnames=None,basesnames=None,fitsformats=None,basesformats=None):
# Detecting existing files
files=glob.glob(os.path.join(directory,basename+'[0-9]*'+'.'+fileformat))
if newfile:
if len(files)==0:
i=0
else:
i=int(re.search(r'\d+',files[-1]).group())+1
filename=os.path.join(directory,'{}{:04d}.{}'.format(basename,i,fileformat))
readflag='w'
else:
filename=files[-1]
readflag='a'
directory=os.path.dirname(filename)
if len(directory)>0:
if not os.path.exists(directory):
os.mkdir(directory)
# Creating a matrix with the population data
nRow = len(pop)
nBases = len(pop[0])
nFit = len(pop[0].fitness.values)
if fitsformats is None:
fitsformats='{:.5f}'
if not isinstance(fitsformats, list):
fitsformats=[fitsformats]*nFit
if basesformats is None:
basesformats='{:.5f}'
if not isinstance(basesformats, list):
basesformats=[basesformats]*nBases
# writing to disk
with open(filename,readflag) as f:
if fileformat.lower()=='csv':
delim=' '
if newfile:
# Header
if fitsnames is None:
fitsnames=['Fit{:d}'.format(i) for i in range(nFit)]
if basesnames is None:
basesnames=['Base{:d}'.format(i) for i in range(nBases)]
s = 'ID'+delim+delim.join(fitsnames)+delim+delim.join(basesnames)
f.write(s)
f.write('\n')
for i,p in enumerate(pop):
sFits = delim.join([s.format(v) for s,v in zip(fitsformats,p.fitness.values)])
sBases = delim.join([s.format(v) for s,v in zip(basesformats,p)])
f.write(chromID(p)+delim+sFits+delim+sBases+'\n')
else:
raise Exception('Unknown fileformat {}'.format(fileformat))
return filename
def populationLoad(filename=None, nFits=2):
fileformat=os.path.splitext(filename)[1][1:].lower()
if fileformat == 'csv':
df= | pd.read_csv(filename, sep=' ') | pandas.read_csv |
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
import plotly as pl
import re
import requests
from .DataFrameUtil import DataFrameUtil as dfUtil
class CreateDataFrame():
"""Classe de serviços para a criação de dataframes utilizados para a construção dos gráficos"""
def __init__(self):
self.dfTimeSeriesCases = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv')
self.dfTimeSeriesRecover = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv')
self.dfTimeSeriesDeath = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv')
url = 'https://covid19.who.int/WHO-COVID-19-global-table-data.csv'
self.dfRegioes = pd.read_csv(url)
def DataFrameMensal():
pd.options.display.float_format = '{:.0f}'.format # Sem Virgula
# Motando Dataframes
# Coletando dados através de arquivos CSV, disponibilizados online.
url = 'https://covid19.who.int/WHO-COVID-19-global-table-data.csv'
dfRegioes = pd.read_csv(url)
dfTimeSeriesCases = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv')
dfTimeSeriesRecover = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv')
dfTimeSeriesDeath = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv')
# Coletando dados através de web scrapping
html_source = requests.get("https://www.worldometers.info/coronavirus/").text
html_source = re.sub(r'<.*?>', lambda g: g.group(0).upper(), html_source)
table_MN2 = pd.read_html(html_source)
dfWorldMeters = table_MN2[0]
dfWorldMeters.columns = [column.replace(" ", "_").replace(",", "_").replace("-","").replace("__","_") for column in dfWorldMeters.columns]
# Renomeando colunas, padronização
dfTimeSeriesCases.rename(columns={'Country/Region':'Name'}, inplace=True)
dfTimeSeriesRecover.rename(columns={'Country/Region':'Name'}, inplace=True)
dfTimeSeriesDeath.rename(columns={'Country/Region':'Name'}, inplace=True)
# Normalização de nome de países
dfTimeSeriesCases.loc[249,'Name'] = "United States of America"
dfTimeSeriesRecover.loc[249,'Name'] = "United States of America"
dfTimeSeriesDeath.loc[249,'Name'] = "United States of America"
dfWorldMeters.loc[8, 'Country_Other']= "United States of America"
dfWorldMeters.loc[13, 'Country_Other']= "United Kingdom"
dfRegioes.loc[6, 'Name'] ="United Kingdom"
# Filtrando Dataframes
dfRegioes.columns =[column.replace(" ", "_").replace("-","") for column in dfRegioes.columns]
dfRegioes.query('Name != "Global" and Name != "World" and Cases__cumulative_total > 0 and WHO_Region != "NaN"', inplace=True)
dfWorldMeters.query('Country_Other != "Total: " and Country_Other != "World" and ' +
' Country_Other != "North America" and Country_Other != "South America" and Country_Other != "Asia" and Country_Other != "Europe" ' +
'and Country_Other != "Africa" and Country_Other != "Oceania" and Country_Other != "Total:" and Country_Other != "NaN" and Population != "nan" and Population != "NaN"', inplace=True)
# Ordenando Dataframes
dfRegioes.sort_values(['Name'], inplace=True)
dfWorldMeters.sort_values(['Country_Other'], inplace=True)
# Criando novos dataframes manipulados
selected_columns = dfRegioes[["Name", "WHO_Region"]]
dfRegioesNew = selected_columns.copy()
dfRegioesNew.sort_values(['Name'], inplace=True)
listMonth = ['Jan', 'Fev', 'Mar', 'Abr','Mai','Jun',
'Jul', 'Ago','Set','Out','Nov', 'Dez',
'Jan 21', 'Fev 21', 'Mar 21', 'Abr 21']
dfTimeSeriesCases.drop(['Province/State', 'Lat','Long'], axis=1,inplace=True)
dfTimeSeriesRecover.drop(['Province/State', 'Lat','Long'], axis=1,inplace=True)
dfTimeSeriesDeath.drop(['Province/State', 'Lat','Long'], axis=1,inplace=True)
selected_columns = dfTimeSeriesCases[dfUtil.SelectColumnsMensal()]
dfTimeSeriesCases = selected_columns.copy()
selected_columns = dfTimeSeriesRecover[dfUtil.SelectColumnsMensal()]
dfTimeSeriesRecover = selected_columns.copy()
selected_columns = dfTimeSeriesDeath[dfUtil.SelectColumnsMensal()]
dfTimeSeriesDeath = selected_columns.copy()
selected_columns = dfWorldMeters[["Country_Other", "Population"]]
dfWorldMetersNew = selected_columns.copy()
dfWorldMetersNew.sort_values(['Country_Other'], inplace=True)
dfTimeSeriesCases = dfUtil.RenameColsMesAno(dfTimeSeriesCases)
dfTimeSeriesRecover = dfUtil.RenameColsMesAno(dfTimeSeriesRecover)
dfTimeSeriesDeath = dfUtil.RenameColsMesAno(dfTimeSeriesDeath)
# Renomeando colunas, padronização para merge final dos dataframes
dfRegioesNew.rename(columns={'WHO_Region':'Regiao'}, inplace=True)
dfWorldMetersNew.rename(columns={'Country_Other': 'Name'}, inplace=True)
dfWorldMetersNew.rename(columns={'Population': 'Populacao'}, inplace=True)
dfAux = dfTimeSeriesCases
mapping = dfUtil.CreateMappingMensal(dfTimeSeriesCases)
dfTimeSeriesCases = dfAux.rename(columns=mapping)
dfAux = dfTimeSeriesRecover
mapping = dfUtil.CreateMappingMensal(dfTimeSeriesRecover)
dfTimeSeriesRecover = dfAux.rename(columns=mapping)
dfAux = dfTimeSeriesDeath
mapping = dfUtil.CreateMappingMensal(dfTimeSeriesDeath)
dfTimeSeriesDeath = dfAux.rename(columns=mapping)
#Somando resultados montados através das linhas do Dataframe
dfTimeSeriesCasesSomado = dfUtil.SumRows(dfTimeSeriesCases)
dfTimeSeriesRecoverSomado = dfUtil.SumRows(dfTimeSeriesRecover)
dfTimeSeriesDeathSomado = dfUtil.SumRows(dfTimeSeriesDeath)
# Resetando index dos dataframes
dfRegioesNew.reset_index(drop=True)
dfWorldMetersNew.reset_index(drop=True)
dfTimeSeriesCasesSomado.reset_index(drop=True)
dfTimeSeriesRecoverSomado.reset_index(drop=True)
dfTimeSeriesDeathSomado.reset_index(drop=True)
dfTimeSeriesCasesSomado.sort_values(['Name'], inplace=True)
dfTimeSeriesRecoverSomado.sort_values(['Name'], inplace=True)
dfTimeSeriesDeathSomado.sort_values(['Name'], inplace=True)
dfRegioesNew.sort_values(['Name'], inplace=True)
dfWorldMetersNew.sort_values(['Name'], inplace=True)
# Merge dataframe
dfFinalCases = pd.merge(dfTimeSeriesCasesSomado, dfRegioesNew, on="Name")
dfFinalCases.rename(columns={'WHO_Region': 'Regiao'}, inplace=True)
dfFinalRecover = pd.merge(dfTimeSeriesRecoverSomado, dfRegioesNew, on="Name")
dfFinalRecover.rename(columns={'WHO_Region': 'Regiao'}, inplace=True)
dfFinalDeath = pd.merge(dfTimeSeriesDeathSomado, dfRegioesNew, on="Name")
dfFinalDeath.rename(columns={'WHO_Region': 'Regiao'}, inplace=True)
#MONTANDO NOVO DATAFRAME 2
d = {'Name': [] ,'Mes': [] ,'Recuperado': []}
DataFrameRecover = pd.DataFrame(data=d)
# print(dfFinalRecover.to_string())
# # for index, row in dfFinalRecover.query('Name == "United States of America"').iterrows(): #PARA CADA PAÍS
for index, row in dfFinalRecover.iterrows(): #PARA CADA PAÍS
for mes in listMonth: #PERCORRER POR MÊS
DataFrameRecover = DataFrameRecover.append({'Name': dfFinalRecover.loc[index,'Name']
,'Mes': mes
#,'Casos':dfFinalCases.loc[index,mes]
,'Recuperado': dfFinalRecover.loc[index,mes]
#,'Mortos' : dfFinalDeath.loc[index,mes]
}, ignore_index = True)
d = {'Name': []
,'Mes': []
,'Casos': []
}
DataFrameCasos = pd.DataFrame(data=d)
for index, row in dfFinalCases.iterrows(): #PARA CADA PAÍS
for mes in listMonth: #PERCORRER POR MÊS
DataFrameCasos = DataFrameCasos.append({'Name': dfFinalCases.loc[index,'Name']
,'Mes': mes
,'Casos':dfFinalCases.loc[index,mes]
}, ignore_index = True)
d = {'Name': []
,'Mes': []
,'Mortos': []
}
DataFrameDeath = pd.DataFrame(data=d)
for index, row in dfFinalDeath.iterrows(): #PARA CADA PAÍS
for mes in listMonth: #PERCORRER POR MÊS
DataFrameDeath = DataFrameDeath.append({'Name': dfFinalDeath.loc[index,'Name']
,'Mes': mes
,'Mortos':dfFinalDeath.loc[index,mes]
}, ignore_index = True)
dtTimeline = pd.merge(DataFrameCasos, DataFrameDeath, on=['Name','Mes'])
DataFrameTimeline = pd.merge(dtTimeline, DataFrameRecover, on=['Name','Mes'])
dfPre = pd.merge(DataFrameTimeline, dfRegioesNew, on="Name")
dfFinal = | pd.merge(dfPre, dfWorldMetersNew, on="Name") | pandas.merge |
import argparse
import glob
import math
import os
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from numba import jit, prange
from sklearn import metrics
from utils import *
@jit(nopython=True, nogil=True, cache=True, parallel=True, fastmath=True)
def compute_tp_tn_fp_fn(y_true, y_pred):
tp = 0
tn = 0
fp = 0
fn = 0
for i in prange(y_pred.size):
tp += y_true[i] * y_pred[i]
tn += (1-y_true[i]) * (1-y_pred[i])
fp += (1-y_true[i]) * y_pred[i]
fn += y_true[i] * (1-y_pred[i])
return tp, tn, fp, fn
def compute_precision(tp, fp):
return tp / (tp + fp)
def compute_recall(tp, fn):
return tp / (tp + fn)
def compute_f1_score(precision, recall):
try:
return (2*precision*recall) / (precision + recall)
except:
return 0
def compute_fbeta_score(precision, recall, beta):
try:
return ((1 + beta**2) * precision * recall) / (beta**2 * precision + recall)
except:
return 0
def compute_accuracy(tp,tn,fp,fn):
return (tp + tn)/(tp + tn + fp + fn)
def compute_auc(GT, pred):
return metrics.roc_auc_score(GT, pred)
def compute_auprc(GT, pred):
prec, rec, thresholds = metrics.precision_recall_curve(GT, pred)
# print(prec, rec, thresholds)
plt.plot(prec, rec)
plt.show()
# return metrics.auc(prec, rec)
def compute_average_precision(GT, pred):
ratio = sum(GT)/np.size(GT)
return metrics.average_precision_score(GT, pred), ratio
def main(args):
#====== Numba compilation ======
# The 2 lines are important
compute_tp_tn_fp_fn(np.array([0,0,0], dtype=np.uint8), np.array([0,1,0], dtype=np.uint8))
compute_tp_tn_fp_fn(np.array([0,0,0], dtype=np.float32), np.array([0,1,0], dtype=np.float32))
#===============================
out = args.out
if not os.path.exists(os.path.dirname(out)):
os.makedirs(os.path.dirname(out))
model_name = args.model_name
number_epochs = args.epochs
batch_size = args.batch_size
NumberFilters = args.number_filters
lr = args.learning_rate
cv_fold = args.cv_fold
model_params = ['Number Epochs', 'Batch Size', 'Number Filters', 'Learning Rate', 'Empty col', 'Empty col2', 'Empty col3', 'CV']
param_values = [number_epochs, batch_size, NumberFilters, lr, '', '', '', '']
Params = pd.Series(param_values, index=model_params, name='Params values')
metrics_names = ['AUPRC','AUPRC - Baseline','F1_Score','Fbeta_Score','Accuracy','Recall','Precision','CV fold']
Metrics = pd.Series(metrics_names, index=model_params, name='Model\Metrics')
if not os.path.exists(out):
Folder_Metrics = pd.DataFrame(columns = model_params)
Image_Metrics = pd.DataFrame(columns = model_params)
else:
Metrics_file = pd.ExcelFile(out)
Folder_Metrics = pd.read_excel(Metrics_file, 'Sheet1', index_col=0, header=None)
Folder_Metrics = Folder_Metrics[Folder_Metrics.columns[:8]]
Folder_Metrics.columns = model_params
Image_Metrics = pd.read_excel(Metrics_file, 'Sheet2', index_col=0, header=None)
Image_Metrics.columns = model_params
matching_values = (Folder_Metrics.values[:,:4] == Params.values[:4]).all(1)
if not matching_values.any():
Folder_Metrics = Folder_Metrics.append(pd.Series(['Number Epochs', 'Batch Size', 'Number Filters', 'Learning Rate', '', '', '', 'CV'], name='Params', index=model_params), ignore_index=False)
Folder_Metrics = Folder_Metrics.append(Params, ignore_index=False)
Folder_Metrics = Folder_Metrics.append(Metrics, ignore_index=False)
Folder_Metrics = Folder_Metrics.append(pd.Series(name='', dtype='object'), ignore_index=False)
matching_values = (Image_Metrics.values[:,:4] == Params.values[:4]).all(1)
if not matching_values.any():
Image_Metrics = Image_Metrics.append(pd.Series(['Number Epochs', 'Batch Size', 'Number Filters', 'Learning Rate', '', '', '', 'File Name'], name='Params', index=model_params), ignore_index=False)
Image_Metrics = Image_Metrics.append(pd.Series(param_values, index=model_params, name='Params values'), ignore_index=False)
Image_Metrics = Image_Metrics.append(pd.Series(['AUPRC','AUPRC - Baseline','F1_Score','Fbeta_Score','Accuracy','Recall','Precision','File Name'], index=model_params, name='Model\Metrics'), ignore_index=False)
Image_Metrics = Image_Metrics.append(pd.Series(name='', dtype='object'), ignore_index=False)
arrays = [range(len(Folder_Metrics)), Folder_Metrics.index]
Index = | pd.MultiIndex.from_arrays(arrays, names=('number', 'name')) | pandas.MultiIndex.from_arrays |
#!/usr/bin/env python
# -- coding: utf-8 --
# PAQUETES PARA CORRER OP.
import netCDF4
import pandas as pd
import numpy as np
import datetime as dt
import json
import wmf.wmf as wmf
import hydroeval
import glob
import MySQLdb
#modulo pa correr modelo
import hidrologia
from sklearn.linear_model import LinearRegression
import math
import os
#spatial
import cartopy.crs as crs
import geopandas as gpd
import pyproj
from pyproj import transform
from cartopy.feature import ShapelyFeature
import cartopy.crs as ccrs
from cartopy.io.shapereader import Reader
from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import seaborn as sns
sns.set(style="whitegrid")
sns.set_context('notebook', font_scale=1.13)
#FORMATO
# fuente
import matplotlib
matplotlib.use('Agg')
import pylab as pl
#avoid warnings
import warnings
warnings.filterwarnings('ignore')
#---------------
#Funciones base.
#---------------
def get_rutesList(rutas):
''' Abre el archivo de texto en la ruta: rutas, devuelve una lista de las lineas de ese archivo.
Funcion base.
#Argumentos
rutas: string, path indicado.
'''
f = open(rutas,'r')
L = f.readlines()
f.close()
return L
def set_modelsettings(ConfigList):
ruta_modelset = get_ruta(ConfigList,'ruta_proj')+get_ruta(ConfigList,'ruta_modelset')
# model settings Json
with open(ruta_modelset, 'r') as f:
model_set = json.load(f)
# Model set
wmf.models.max_aquifer = wmf.models.max_gravita * 10
wmf.models.retorno = model_set['retorno']
wmf.models.show_storage = model_set['show_storage']
wmf.models.separate_fluxes = model_set['separate_fluxes']
wmf.models.dt = model_set['dt']
def round_time(date = dt.datetime.now(),round_mins=5):
'''
Rounds datetime object to nearest 'round_time' minutes.
If 'dif' is < 'round_time'/2 takes minute behind, else takesminute ahead.
Parameters
----------
date : date to round
round_mins : round to this nearest minutes interval
Returns
----------
datetime object rounded, datetime object
'''
dif = date.minute % round_mins
if dif <= round_mins/2:
return dt.datetime(date.year, date.month, date.day, date.hour, date.minute - (date.minute % round_mins))
else:
return dt.datetime(date.year, date.month, date.day, date.hour, date.minute - (date.minute % round_mins)) + dt.timedelta(minutes=round_mins)
def get_credentials(ruta_credenciales):
credentials = json.load(open(ruta_credenciales))
#creds para consultas
mysqlServer = credentials['MySql_Siata']
for key in np.sort(list(credentials['MySql_Siata'].keys()))[::-1]: #1:hal, 2:sal
try:
connection = MySQLdb.connect(host=mysqlServer[key]['host'],
user=mysqlServer[key]['user'],
password=mysqlServer[key]['password'],
db=mysqlServer[key]['db'])
print('SERVER_CON: Succesful connection to %s'%(key))
host=mysqlServer[key]['host']
user=mysqlServer[key]['user']
password=mysqlServer[key]['password']
db=mysqlServer[key]['db']
break #si conecta bien a SAL para.
except:
print('SERVER_CON: No connection to %s'%(key))
pass
#creds para copiar a var
user2copy2var = credentials['cred_2copy2var']['user']; host2copy2var = credentials['cred_2copy2var']['host']
return host,user,password,db,user2copy2var,host2copy2var
def coord2hillID(ruta_nc, df_coordxy):
#lee simubasin pa asociar tramos, saca topologia basica
cu = wmf.SimuBasin(rute= ruta_nc)
cu.GetGeo_Cell_Basics()
cu.GetGeo_Parameters()
#saca coordenadas de todo el simubasin y las distancias entre ellas
coordsX = wmf.cu.basin_coordxy(cu.structure,cu.ncells)[0]
coordsY = wmf.cu.basin_coordxy(cu.structure,cu.ncells)[1]
disty = np.unique(np.diff(np.unique(np.sort(coordsY))))
distx = np.unique(np.diff(np.unique(np.sort(coordsX))))
df_ids = pd.DataFrame(index = df_coordxy.index,columns=['id'])
#identifica el id de la ladera donde caen los ptos
for index in df_coordxy.index:
df_ids.loc[index]=cu.hills_own[np.where((coordsY+disty[0]/2>df_coordxy.loc[index].values[1]) & (coordsY-disty[0]/2<df_coordxy.loc[index].values[1]) & (coordsX+distx[0]/2>df_coordxy.loc[index].values[0]) & (coordsX-distx[0]/2<df_coordxy.loc[index].values[0]))[0]].data
return df_ids
#-----------------------------------
#-----------------------------------
#Funciones de lectura del configfile
#-----------------------------------
#-----------------------------------
def get_ruta(RutesList, key):
''' Busca en una lista 'RutesList' la linea que empieza con el key indicado, entrega rutas.
Funcion base.
#Argumentos
RutesList: Lista que devuelve la funcion en este script get_rutesList()
key: string, key indicado para buscar que linea en la lista empieza con el.
'''
if any(i.startswith('- **'+key+'**') for i in RutesList):
for i in RutesList:
if i.startswith('- **'+key+'**'):
return i.split(' ')[-1][:-1]
else:
return 'Aviso: no existe linea con el key especificado'
def get_line(RutesList, key):
''' Busca en una lista 'RutesList' la linea que empieza con el key indicado, entrega lineas.
Funcion base.
#Argumentos
RutesList: Lista que devuelve la funcion en este script get_rutesList()
key: string, key indicado para buscar que linea en la lista empieza con el.
'''
if any(i.startswith('- **'+key+'**') for i in RutesList):
for i in RutesList:
if i.startswith('- **'+key+'**'):
return i[:-1].split(' ')[2:]
else:
return 'Aviso: no existe linea con el key especificado'
def get_modelPlot(RutesList, PlotType = 'Qsim_map'):
''' #Devuelve un diccionario con la informacion de la tabla Plot en el configfile.
#Funcion operacional.
#Argumentos:
- RutesList= lista, es el resultado de leer el configfile con al.get_ruteslist.
- PlotType= boolean, tipo del plot? . Default= 'Qsim_map'.
'''
for l in RutesList:
key = l.split('|')[1].rstrip().lstrip()
if key[3:] == PlotType:
EjecsList = [i.rstrip().lstrip() for i in l.split('|')[2].split(',')]
return EjecsList
return key
def get_modelPars(RutesList):
''' #Devuelve un diccionario con la informacion de la tabla Calib en el configfile.
#Funcion operacional.
#Argumentos:
- RutesList= lista, es el resultado de leer el configfile con al.get_ruteslist.
'''
DCalib = {}
for l in RutesList:
c = [float(i) for i in l.split('|')[3:-1]]
name = l.split('|')[2]
DCalib.update({name.rstrip().lstrip(): c})
return DCalib
def get_modelPaths(List):
''' #Devuelve un diccionario con la informacion de la tabla Calib en el configfile.
#Funcion operacional.
#Argumentos:
- RutesList= lista, es el resultado de leer el configfile con al.get_ruteslist.
'''
DCalib = {}
for l in List:
c = [i for i in l.split('|')[3:-1]]
name = l.split('|')[2]
DCalib.update({name.rstrip().lstrip(): c[0]})
return DCalib
def get_modelStore(RutesList):
''' #Devuelve un diccionario con la informacion de la tabla Store en el configfile.
#Funcion operacional.
#Argumentos:
- RutesList= lista, es el resultado de leer el configfile con al.get_ruteslist.
'''
DStore = {}
for l in RutesList:
l = l.split('|')
DStore.update({l[1].rstrip().lstrip():
{'Nombre': l[2].rstrip().lstrip(),
'Actualizar': l[3].rstrip().lstrip(),
'Tiempo': float(l[4].rstrip().lstrip()),
'Condition': l[5].rstrip().lstrip(),
'Calib': l[6].rstrip().lstrip(),
'BackSto': l[7].rstrip().lstrip(),
'Slides': l[8].rstrip().lstrip()}})
return DStore
def get_modelStoreLastUpdate(RutesList):
''' #Devuelve un diccionario con la informacion de la tabla Update en el configfile.
#Funcion operacional.
#Argumentos:
- RutesList= lista, es el resultado de leer el configfile con al.get_ruteslist.
'''
DStoreUpdate = {}
for l in RutesList:
l = l.split('|')
DStoreUpdate.update({l[1].rstrip().lstrip():
{'Nombre': l[2].rstrip().lstrip(),
'LastUpdate': l[3].rstrip().lstrip()}})
return DStoreUpdate
def get_ConfigLines(RutesList, key, keyTable = None, PlotType = None):
''' #Devuelve un diccionario con la informacion de las tablas en el configfile: Calib, Store, Update, Plot.
#Funcion operacional.
#Argumentos:
- RutesList= lista, es el resultado de leer el configfile con al.get_ruteslist.
- key= string, palabra clave de la tabla que se quiere leer. Puede ser: -s,-t.
- Calib_Storage= string, palabra clave de la tabla que se quiere leer. Puede ser: Calib, Store, Update, Plot.
- PlotType= boolean, tipo del plot? . Default= None.
'''
List = []
for i in RutesList:
if i.startswith('|'+key) or i.startswith('| '+key):
List.append(i)
if len(List)>0:
if keyTable == 'Pars':
return get_modelPars(List)
if keyTable == 'Paths':
return get_modelPaths(List)
if keyTable == 'Store':
return get_modelStore(List)
if keyTable == 'Update':
return get_modelStoreLastUpdate(List)
if keyTable == 'Plot':
return get_modelPlot(List, PlotType=PlotType)
return List
else:
return 'Aviso: no se encuentran lineas con el key de inicio especificado.'
#-----------------------------------
#-----------------------------------
#Funciones generacion de radar
#-----------------------------------
#-----------------------------------
def file_format(start,end):
'''
Returns the file format customized for siata for elements containing
starting and ending point
Parameters
----------
start : initial date
end : final date
Returns
----------
file format with datetimes like %Y%m%d%H%M
Example
----------
'''
start,end = pd.to_datetime(start),pd.to_datetime(end)
format = '%Y%m%d%H%M'
return '%s-%s'%(start.strftime(format),end.strftime(format))
def hdr_to_series(path):
'''
Reads hdr rain files and converts it into pandas Series
Parameters
----------
path : path to .hdr file
Returns
----------
pandas time Series with mean radar rain
'''
s = pd.read_csv(path,skiprows=5,usecols=[2,3]).set_index(' Fecha ')[' Lluvia']
s.index = pd.to_datetime(list(map(lambda x:x.strip()[:10]+' '+x.strip()[11:],s.index)))
return s
def hdr_to_df(path):
'''
Reads hdr rain files and converts it into pandas DataFrame
Parameters
----------
path : path to .hdr file
Returns
----------
pandas DataFrame with mean radar rain
'''
if path.endswith('.hdr') != True:
path = path+'.hdr'
df = pd.read_csv(path,skiprows=5).set_index(' Fecha ')
df.index = pd.to_datetime(list(map(lambda x:x.strip()[:10]+' '+x.strip()[11:],df.index)))
df = df.drop('IDfecha',axis=1)
df.columns = ['record','mean_rain']
return df
def bin_to_df(path,ncells,start=None,end=None,**kwargs):
'''
Reads rain fields (.bin) and converts it into pandas DataFrame
Parameters
----------
path : path to .hdr and .bin file
start : initial date
end : final date
Returns
----------
pandas DataFrame with mean radar rain
Note
----------
path without extension, ejm folder_path/file not folder_path/file.bin,
if start and end is None, the program process all the data
'''
start,end = pd.to_datetime(start),pd.to_datetime(end)
records = df['record'].values
rain_field = []
for count,record in enumerate(records):
if record != 1:
rain_field.append(wmf.models.read_int_basin('%s.bin'%path,record,ncells)[0]/1000.0)
count = count+1
# format = (count*100.0/len(records),count,len(records))
else:
rain_field.append(np.zeros(ncells))
return pd.DataFrame(np.matrix(rain_field),index=df.index)
def get_radar_rain(start,end,Dt,cuenca,codigos,rutaNC,accum=False,path_tif=None,all_radextent=False,
mask=None,meanrain_ALL=True,path_masks_csv=None,complete_naninaccum=False,save_bin=False,
save_class = False,path_res=None,umbral=0.005,
verbose=True, zero_fill = None):
start,end = pd.to_datetime(start),pd.to_datetime(end)
#hora UTC
startUTC,endUTC = start + pd.Timedelta('5 hours'), end + pd.Timedelta('5 hours')
fechaI,fechaF,hora_1,hora_2 = startUTC.strftime('%Y-%m-%d'), endUTC.strftime('%Y-%m-%d'),startUTC.strftime('%H:%M'),endUTC.strftime('%H:%M')
#Obtiene las fechas por dias para listar archivos por dia
datesDias = pd.date_range(fechaI, fechaF,freq='D')
a = pd.Series(np.zeros(len(datesDias)),index=datesDias)
a = a.resample('A').sum()
Anos = [i.strftime('%Y') for i in a.index.to_pydatetime()]
datesDias = [d.strftime('%Y%m%d') for d in datesDias.to_pydatetime()]
#lista los .nc existentes de ese dia: rutas y fechas del nombre del archivo
ListDatesinNC = []
ListRutas = []
for d in datesDias:
try:
L = glob.glob(rutaNC + d + '*.nc')
ListRutas.extend(L)
ListDatesinNC.extend([i.split('/')[-1].split('_')[0] for i in L])
except:
print ('Sin archivos para la fecha %s'%d)
# Organiza las listas de rutas y la de las fechas a las que corresponde cada ruta.
ListRutas.sort()
ListDatesinNC.sort()#con estas fechas se asignaran los barridos a cada timestep.
#index con las fechas especificas de los .nc existentes de radar
datesinNC = [dt.datetime.strptime(d,'%Y%m%d%H%M') for d in ListDatesinNC]
datesinNC = pd.to_datetime(datesinNC)
#Obtiene el index con la resolucion deseada, en que se quiere buscar datos existentes de radar,
textdt = '%d' % Dt
#Agrega hora a la fecha inicial
if hora_1 != None:
inicio = fechaI+' '+hora_1
else:
inicio = fechaI
#agrega hora a la fecha final
if hora_2 != None:
final = fechaF+' '+hora_2
else:
final = fechaF
datesDt = pd.date_range(inicio,final,freq = textdt+'s')
#Obtiene las posiciones de acuerdo al dt para cada fecha, si no hay barrido en ese paso de tiempo se acumula
#elbarrido inmediatamente anterior.
#Saca una lista con las pos de los barridos por cada timestep, y las pega en PosDates
#Si el limite de completar faltantes con barrido anterior es de 10 min, solo se completa si dt=300s
#limite de autocompletar : 10m es decir, solo repito un barrido.
PosDates = []
pos1 = []
pos_completed = []
lim_completed = 3 #ultimos 3 barridos - 15min
for ind,d1,d2 in zip(np.arange(datesDt[:-1].size),datesDt[:-1],datesDt[1:]):
pos2 = np.where((datesinNC<d2) & (datesinNC>=d1))[0].tolist()
# si no hay barridos en el dt de inicio sellena con zero - lista vacia
#y no esta en los primero 3 pasos : 15min.
# si se puede completar
# y si en el los lim_completed pasos atras no hubo más de lim_completed-1 pos con pos_completed=2, lim_completed-1 para que deje correr sólo hasta el lim_completed.
#asi solo se pueded completar y pos_completed=2 una sola vez.
if len(pos2) == 0 and ind not in np.arange(lim_completed) and complete_naninaccum == True and Dt == 300. and np.where(np.array(pos_completed[ind-lim_completed:])==2)[0].size <= lim_completed-1 : #+1 porque coge los ultimos n-1 posiciones.
pos2 = pos1
pos_completed.append(2)
elif len(pos2) == 0:
pos2=[]
pos_completed.append(0)
else:
pos_completed.append(1)
#si se quiere completar y hay barridos en este dt, guarda esta pos para si es necesario completar las pos de dt en el sgte paso
if complete_naninaccum == True and len(pos2) != 0 and Dt == 300. and np.where(np.array(pos_completed[ind-lim_completed:])==2)[0].size <= lim_completed-1 :
pos1 = pos2
else:
pos1 = []
PosDates.append(pos2)
# si se asigna, se agregas dates y PosDates para barridos en cero al final.
if zero_fill is not None:
#se redefinen datesDt luego que los PosDates fueron asignados
final = (pd.to_datetime(final) + pd.Timedelta('%ss'%Dt*zero_fill)).strftime('%Y-%m-%d %H:%M')
datesDt = pd.date_range(inicio,final,freq = textdt+'s')
# se agrega a PosDates pasos del futuro con barridos en cero, y se cambia end.
end = end + pd.Timedelta('%ss'%Dt*zero_fill) #pasos de tiempo:steps, independiente del Dt
for steps in np.arange(zero_fill): PosDates.append([])
# paso a hora local
datesDt = datesDt - dt.timedelta(hours=5)
datesDt = datesDt.to_pydatetime()
#Index de salida en hora local
rng= pd.date_range(start.strftime('%Y-%m-%d %H:%M'),end.strftime('%Y-%m-%d %H:%M'), freq= textdt+'s')
df = pd.DataFrame(index = rng,columns=codigos)
#mascara con shp a parte de wmf
if mask is not None:
#se abre un barrido para sacar la mascara
g = netCDF4.Dataset(ListRutas[PosDates[0][0]])
field = g.variables['Rain'][:].T/(((len(pos)*3600)/Dt)*1000.0)#g['Rain'][:]#
RadProp = [g.ncols, g.nrows, g.xll, g.yll, g.dx, g.dx]
g.close()
longs=np.array([RadProp[2]+0.5*RadProp[4]+i*RadProp[4] for i in range(RadProp[0])])
lats=np.array([RadProp[3]+0.5*RadProp[5]+i*RadProp[5] for i in range(RadProp[1])])
x,y = np.meshgrid(longs,lats)
#mask as a shp
if type(mask) == str:
#boundaries
shp = gpd.read_file(mask)
poly = shp.geometry.unary_union
shp_mask = np.zeros([len(lats),len(longs)])
for i in range(len(lats)):
for j in range(len(longs)):
if (poly.contains(Point(longs[j],lats[i])))==True:
shp_mask[i,j] = 1# Rain_mask es la mascara
l = x[shp_mask==1].min()
r = x[shp_mask==1].max()
d = y[shp_mask==1].min()
a = y[shp_mask==1].max()
#mask as a list with coordinates whithin the radar extent
elif type(mask) == list:
l = mask[0] ; r = mask[1] ; d = mask[2] ; a = mask[3]
x,y = x.T,y.T #aun tengo dudas con el recorte, si en nc queda en la misma pos que los lats,longs.
#boundaries position
x_wh,y_wh = np.where((x>l)&(x<r)&(y>d)&(y<a))
#se redefine sfield con size que corresponde
field = field[np.unique(x_wh)[0]:np.unique(x_wh)[-1],np.unique(y_wh)[0]:np.unique(y_wh)[-1]]
if save_bin and len(codigos)==1 and path_res is not None:
#open nc file
f = netCDF4.Dataset(path_res,'w', format='NETCDF4') #'w' stands for write
tempgrp = f.createGroup('rad_data') # as folder for saving files
lon = longs[np.unique(x_wh)[0]:np.unique(x_wh)[-1]]
lat = lats[np.unique(y_wh)[0]:np.unique(y_wh)[-1]]
#set name and leght of dimensions
tempgrp.createDimension('lon', len(lon))
tempgrp.createDimension('lat', len(lat))
tempgrp.createDimension('time', None)
#building variables
longitude = tempgrp.createVariable('longitude', 'f4', 'lon')
latitude = tempgrp.createVariable('latitude', 'f4', 'lat')
rain = tempgrp.createVariable('rain', 'f4', (('time', 'lat', 'lon')))
time = tempgrp.createVariable('time', 'i4', 'time')
#adding globalattributes
f.description = "Radar rainfall dataset containing one group"
f.history = "Created " + dt.datetime.now().strftime("%d/%m/%y")
#Add local attributes to variable instances
longitude.units = 'degrees east - wgs4'
latitude.units = 'degrees north - wgs4'
time.units = 'minutes since 2020-01-01 00:00'
rain.units = 'mm/h'
#passing data into variables
# use proper indexing when passing values into the variables - just like you would a numpy array.
longitude[:] = lon #The "[:]" at the end of the variable instance is necessary
latitude[:] = lat
else:
# acumular dentro de la cuenca.
cu = wmf.SimuBasin(rute= cuenca)
if save_class:
cuConv = wmf.SimuBasin(rute= cuenca)
cuStra = wmf.SimuBasin(rute= cuenca)
#accumulated in basin
if accum:
if mask is not None:
rvec_accum = np.zeros(field.shape)
dfaccum = pd.DataFrame(index = rng) #este producto no da con mask.
else:
rvec_accum = np.zeros(cu.ncells)
# rvec = np.zeros(cu.ncells)
dfaccum = pd.DataFrame(np.zeros((cu.ncells,rng.size)).T,index = rng)
else:
pass
#all extent
if all_radextent:
radmatrix = np.zeros((1728, 1728))
#ITERA SOBRE LOS BARRIDOS DEL PERIODO Y SE SACAN PRODUCTOS
# print ListRutas
for ind,dates,pos in zip(np.arange(len(datesDt[1:])),datesDt[1:],PosDates):
#escoge como definir el size de rvec
if mask is not None:
rvec = np.zeros(shape = field.shape)
else:
rvec = np.zeros(cu.ncells)
if save_class:
rConv = np.zeros(cu.ncells, dtype = int)
rStra = np.zeros(cu.ncells, dtype = int)
try:
#se lee y agrega lluvia de los nc en el intervalo.
for c,p in enumerate(pos):
#lista archivo leido
if verbose:
print (ListRutas[p])
#Lee la imagen de radar para esa fecha
g = netCDF4.Dataset(ListRutas[p])
rainfield = g.variables['Rain'][:].T/(((len(pos)*3600)/Dt)*1000.0)
RadProp = [g.ncols, g.nrows, g.xll, g.yll, g.dx, g.dx]
#if all extent
if all_radextent:
radmatrix += rainfield
#if mask
if mask is not None and type(mask) == str:
rvec += (rainfield*shp_mask)[np.unique(x_wh)[0]:np.unique(x_wh)[-1],np.unique(y_wh)[0]:np.unique(y_wh)[-1]]
elif mask is not None and type(mask) == list:
rvec += rainfield[np.unique(x_wh)[0]:np.unique(x_wh)[-1],np.unique(y_wh)[0]:np.unique(y_wh)[-1]]
# on WMF.
else:
#Agrega la lluvia en el intervalo
rvec += cu.Transform_Map2Basin(rainfield,RadProp)
if save_class:
ConvStra = cu.Transform_Map2Basin(g.variables['Conv_Strat'][:].T, RadProp)
# 1-stra, 2-conv
rConv = np.copy(ConvStra)
rConv[rConv == 1] = 0; rConv[rConv == 2] = 1
rStra = np.copy(ConvStra)
rStra[rStra == 2] = 0
rvec[(rConv == 0) & (rStra == 0)] = 0
Conv[rvec == 0] = 0
Stra[rvec == 0] = 0
#Cierra el netCDF
g.close()
#muletilla
path = 'bla'
except:
print ('error - no field found ')
path = ''
if accum:
if mask is not None:
rvec += np.zeros(shape = field.shape)
rvec = np.zeros(shape = field.shape)
else:
rvec_accum += np.zeros(cu.ncells)
rvec = np.zeros(cu.ncells)
else:
if mask is not None:
rvec = np.zeros(shape = field.shape)
else:
rvec = np.zeros(cu.ncells)
if save_class:
rConv = np.zeros(cu.ncells)
rStra = np.zeros(cu.ncells)
if all_radextent:
radmatrix += np.zeros((1728, 1728))
#acumula dentro del for que recorre las fechas
if accum:
rvec_accum += rvec
if mask is None: #esto para mask no sirve
dfaccum.loc[dates.strftime('%Y-%m-%d %H:%M:%S')]= rvec
else:
pass
# si se quiere sacar promedios de lluvia de radar en varias cuencas definidas en 'codigos'
#subbasins defined for WMF
if meanrain_ALL and mask is None:
mean = []
df_posmasks = pd.read_csv(path_masks_csv,index_col=0)
for codigo in codigos:
if path == '': # si no hay nc en ese paso de tiempo.
mean.append(np.nan)
else:
mean.append(np.sum(rvec*df_posmasks['%s'%codigo])/float(df_posmasks['%s'%codigo][df_posmasks['%s'%codigo]==1].size))
# se actualiza la media de todas las mascaras en el df.
df.loc[dates.strftime('%Y-%m-%d %H:%M:%S')]=mean
else:
pass
#guarda binario y df, si guardar binaria paso a paso no me interesa rvecaccum
if mask is None and save_bin == True and len(codigos)==1 and path_res is not None:
mean = []
#guarda en binario
dentro = cu.rain_radar2basin_from_array(vec = rvec,
ruta_out = path_res,
fecha = dates,
dt = Dt,
umbral = umbral)
#si guarda nc de ese timestep guarda clasificados
if dentro == 0:
hagalo = True
else:
hagalo = False
#mira si guarda o no los clasificados
if save_class:
#Escribe el binario convectivo
aa = cuConv.rain_radar2basin_from_array(vec = rConv,
ruta_out = path_res+'_conv',
fecha = dates,
dt = Dt,
doit = hagalo)
#Escribe el binario estratiforme
aa = cuStra.rain_radar2basin_from_array(vec = rStra,
ruta_out = path_res+'_stra',
fecha = dates,
dt = Dt,
doit = hagalo)
#guarda en df meanrainfall.
try:
mean.append(rvec.mean())
except:
mean.append(np.nan)
df.loc[dates.strftime('%Y-%m-%d %H:%M:%S')]=mean
elif mask is None and save_bin == True and len(codigos)==1 and path_res is None: #si es una cuenca pero no se quiere guardar binarios.
mean = []
#guarda en df meanrainfall.
try:
mean.append(rvec.mean())
except:
mean.append(np.nan)
df.loc[dates.strftime('%Y-%m-%d %H:%M:%S')]=mean
#guardar .nc con info de recorte de radar: mask.
if mask is not None and save_bin and len(codigos)==1 and path_res is not None:
mean = []
#https://pyhogs.github.io/intro_netcdf4.html
rain[ind,:,:] = rvec.T
time[ind] = int((dates - pd.to_datetime('2010-01-01 00:00')).total_seconds()/60) #min desde 2010
if ind == np.arange(len(datesDt[1:]))[-1]:
f.close()
print ('.nc saved')
#guarda en df meanrainfall.
if path == '': # si no hay nc en ese paso de tiempo.
mean.append(np.nan)
else:
mean.append(np.sum(rvec)/float(shp_mask[shp_mask==1].size))
#save
df.loc[dates.strftime('%Y-%m-%d %H:%M:%S')]=mean
if mask is None and save_bin == True and len(codigos)==1 and path_res is not None:
#Cierrra el binario y escribe encabezado
cu.rain_radar2basin_from_array(status = 'close',ruta_out = path_res)
print ('.bin & .hdr saved')
if save_class:
cuConv.rain_radar2basin_from_array(status = 'close',ruta_out = path_res+'_conv')
cuStra.rain_radar2basin_from_array(status = 'close',ruta_out = path_res+'_stra')
print ('.bin & .hdr escenarios saved')
else:
print ('.bin & .hdr NOT saved')
pass
#elige los retornos.
if accum == True and path_tif is not None:
cu.Transform_Basin2Map(rvec_accum,path_tif)
return df,rvec_accum,dfaccum
elif accum == True and mask is not None:
return df,rvec_accum
elif accum == True and mask is None:
return df,rvec_accum,dfaccum
elif all_radextent:
return df,radmatrix
else:
return df
def get_radar_rain_OP(start,end,Dt,cuenca,codigos,rutaNC,accum=False,path_tif=None,all_radextent=False,
meanrain_ALL=True,complete_naninaccum=False, evs_hist=False,save_bin=False,save_class = False,
path_res=None,umbral=0.005,include_escenarios = None,
verbose=True):
'''
Read .nc's file forn rutaNC:101Radar_Class within assigned period and frequency.
Por ahora solo sirve con un barrido por timestep, operacional a 5 min, melo.
0. It divides by 1000.0 and converts from mm/5min to mm/h.
1. Get mean radar rainfall in basins assigned in 'codigos' for finding masks, if the mask exist.
2. Write binary files if is setted.
- Cannot do both 1 and 2.
- To saving binary files (2) set: meanrain_ALL=False, save_bin=True, path_res= path where to write results,
len('codigos')=1, nc_path aims to the one with dxp and simubasin props setted.
Parameters
----------
start: string, date&time format %Y-%m%-d %H:%M, local time.
end: string, date&time format %Y-%m%-d %H:%M, local time.
Dt: float, timedelta in seconds. For this function it should be lower than 3600s (1h).
cuenca: string, simubasin .nc path with dxp and format from WMF. It should be 260 path if whole catchment analysis is needed, or any other .nc path for saving the binary file.
codigos: list, with codes of stage stations. Needed for finding the mask associated to a basin.
rutaNC: string, path with .nc files from radar meteorology group. Default in amazonas: 101Radar_Class
Optional Parameters
----------
accum: boolean, default False. True for getting the accumulated matrix between start and end.
Change returns: df,rvec (accumulated)
path_tif: string, path of tif to write accumlated basin map. Default None.
all_radextent:boolean, default False. True for getting the accumulated matrix between start and end in the
whole radar extent. Change returns: df,radmatrix.
meanrain_ALL: boolean, defaul True. True for getting the mean radar rainfall within several basins which mask are defined in 'codigos'.
save_bin: boolean, default False. True for saving .bin and .hdr files with rainfall and if len('codigos')=1.
save_class: boolean,default False. True for saving .bin and .hdr for convective and stratiform classification. Applies if len('codigos')=1 and save_bin = True.
path_res: string with path where to write results if save_bin=True, default None.
umbral: float. Minimum umbral for writing rainfall, default = 0.005.
Returns
----------
- df whith meanrainfall of assiged codes in 'codigos'.
- df,rvec if accum = True.
- df,radmatrix if all_radextent = True.
- save .bin and .hdr if save_bin = True, len('codigos')=1 and path_res=path.
'''
#### FECHAS Y ASIGNACIONES DE NC####
start,end = pd.to_datetime(start),pd.to_datetime(end)
#hora UTC
startUTC,endUTC = start + pd.Timedelta('5 hours'), end + pd.Timedelta('5 hours')
fechaI,fechaF,hora_1,hora_2 = startUTC.strftime('%Y-%m-%d'), endUTC.strftime('%Y-%m-%d'),startUTC.strftime('%H:%M'),endUTC.strftime('%H:%M')
#Obtiene las fechas por dias para listar archivos por dia
datesDias = pd.date_range(fechaI, fechaF,freq='D')
a = pd.Series(np.zeros(len(datesDias)),index=datesDias)
a = a.resample('A').sum()
Anos = [i.strftime('%Y') for i in a.index.to_pydatetime()]
datesDias = [d.strftime('%Y%m%d') for d in datesDias.to_pydatetime()]
#lista los .nc existentes de ese dia: rutas y fechas del nombre del archivo
ListDatesinNC = []
ListRutas = []
for d in datesDias:
try:
L = glob.glob(rutaNC + d + '*.nc')
ListRutas.extend(L)
ListDatesinNC.extend([i.split('/')[-1].split('_')[0] for i in L])
except:
print ('Sin archivos para la fecha %s'%d)
# Organiza las listas de dias y de rutas
ListDatesinNC.sort()
ListRutas.sort()
#index con las fechas especificas de los .nc existentes de radar
datesinNC = [dt.datetime.strptime(d,'%Y%m%d%H%M') for d in ListDatesinNC]
datesinNC = pd.to_datetime(datesinNC)
#Obtiene el index con la resolucion deseada, en que se quiere buscar datos existentes de radar,
textdt = '%d' % Dt
#Agrega hora a la fecha inicial
if hora_1 != None:
inicio = fechaI+' '+hora_1
else:
inicio = fechaI
#agrega hora a la fecha final
if hora_2 != None:
final = fechaF+' '+hora_2
else:
final = fechaF
datesDt = pd.date_range(inicio,final,freq = textdt+'s')
#Obtiene las posiciones de acuerdo al dt para cada fecha, si no hay barrido en ese paso de tiempo se acumula
#elbarrido inmediatamente anterior.
PosDates = []
pos1 = [0]
for d1,d2 in zip(datesDt[:-1],datesDt[1:]):
pos2 = np.where((datesinNC<d2) & (datesinNC>=d1))[0].tolist()
if len(pos2) == 0 and complete_naninaccum == True: # si no hay barridos en el dt de inicio ellena con cero
pos2 = pos1
elif complete_naninaccum == True: #si hay barridos en este dt guarda esta pos para si es necesario completar las pos de dt en el sgte paso
pos1 = pos2
elif len(pos2) == 0:
pos2=[]
PosDates.append(pos2)
paths_inperiod = [[ListRutas[p] for c,p in enumerate(pos)] for dates,pos in zip(datesDt[1:],PosDates)]
pospaths_inperiod = [[p for c,p in enumerate(pos)] for dates,pos in zip(datesDt[1:],PosDates)]
######### LISTA EN ORDEN CON ARCHIVOS OBSERVADOS Y ESCENARIOS#############3
##### buscar el ultimo campo de lluvia observado ######
datessss = []
nc010 = []
for date,l_step,lpos_step in zip(datesDt[1:],paths_inperiod,pospaths_inperiod):
for path,pospath in zip(l_step[::-1],lpos_step[::-1]): # que siempre el primer nc leido sea el observado si lo hay
#siempre intenta buscar en cada paso de tiempo el observado, solo si no puede, busca escenarios futuros.
if path.split('/')[-1].split('_')[-1].split('.')[0].endswith('120'):
nc010.append(path)
datessss.append(date)
######punto a partir del cual usar escenarios
#si dentro del periodo existe alguno len(date)>1, sino = 0 (todo el periodo corresponde a forecast)
#si no existe pos_lastradarfield = pos del primer paso de tiempo paraque se cojan todos los archivos
if len(datessss)>0:
pos_lastradarfield = np.where(datesDt[1:]==datessss[-1])[0][0]
else:
pos_lastradarfield = 0
list_paths= []
# escoge rutas y pos organizados para escenarios, por ahora solo sirve con 1 barrido por timestep.
for ind,date,l_step,lpos_step in zip(np.arange(datesDt[1:].size),datesDt[1:],paths_inperiod,pospaths_inperiod):
# pos_step = []; paths_step = []
if len(l_step) == 0:
list_paths.append('')
else:
# ordenar rutas de ncs
for path,pospath in zip(l_step[::-1],lpos_step[::-1]): # que siempre el primer nc leido sea el observado si lo hay
# print (ind,path,pospath)
#si es un evento viejo
if evs_hist:
#primero escanarios futuros.
if include_escenarios is not None and path.split('/')[-1].split('_')[-1].startswith(include_escenarios):
list_paths.append(path)
break
#despues observados.
elif path.split('/')[-1].split('_')[-1].split('.')[0].endswith('120'):
list_paths.append(path)
#si es rigth now
else:
#primero observados y para ahi si se lo encontro
if path.split('/')[-1].split('_')[-1].split('.')[0].endswith('120'):
list_paths.append(path)
break
#despues escenarios futuros, y solo despues que se acaban observados
elif include_escenarios is not None and path.split('/')[-1].split('_')[-1].startswith(include_escenarios) and ind > pos_lastradarfield:
list_paths.append(path)
######### LECTURA DE CUENCA, DATOS Y GUARDADO DE BIN.###########
# acumular dentro de la cuenca.
cu = wmf.SimuBasin(rute= cuenca)
if save_class:
cuConv = wmf.SimuBasin(rute= cuenca)
cuStra = wmf.SimuBasin(rute= cuenca)
# paso a hora local
datesDt = datesDt - dt.timedelta(hours=5)
datesDt = datesDt.to_pydatetime()
#Index de salida en hora local
rng= pd.date_range(start.strftime('%Y-%m-%d %H:%M'),end.strftime('%Y-%m-%d %H:%M'), freq= textdt+'s')
df = pd.DataFrame(index = rng,columns=codigos)
#accumulated in basin
if accum:
rvec_accum = np.zeros(cu.ncells)
rvec = np.zeros(cu.ncells)
dfaccum = pd.DataFrame(np.zeros((cu.ncells,rng.size)).T,index = rng)
else:
pass
#all extent
if all_radextent:
radmatrix = np.zeros((1728, 1728))
#itera sobre ncs abre y guarda ifnfo
for dates,path in zip(datesDt[1:],list_paths):
if verbose:
print (dates,path)
rvec = np.zeros(cu.ncells)
if path != '': #sino hay archivo pone cero.
try:
#Lee la imagen de radar para esa fecha
g = netCDF4.Dataset(path)
#if all extent
if all_radextent:
radmatrix += g.variables['Rain'][:].T/(((1*3600)/Dt)*1000.0)
#on basins --> wmf.
RadProp = [g.ncols, g.nrows, g.xll, g.yll, g.dx, g.dx]
#Agrega la lluvia en el intervalo
rvec += cu.Transform_Map2Basin(g.variables['Rain'][:].T/(((1*3600)/Dt)*1000.0),RadProp)
if save_class:
ConvStra = cu.Transform_Map2Basin(g.variables['Conv_Strat'][:].T, RadProp)
# 1-stra, 2-conv
rConv = np.copy(ConvStra)
rConv[rConv == 1] = 0; rConv[rConv == 2] = 1
rStra = np.copy(ConvStra)
rStra[rStra == 2] = 0
rvec[(rConv == 0) & (rStra == 0)] = 0
rConv[rvec == 0] = 0
rStra[rvec == 0] = 0
#Cierra el netCDF
g.close()
except:
print ('error - zero field ')
if accum:
rvec_accum += np.zeros(cu.ncells)
rvec = np.zeros(cu.ncells)
else:
rvec = np.zeros(cu.ncells)
if save_class:
rConv = np.zeros(cu.ncells)
rStra = np.zeros(cu.ncells)
if all_radextent:
radmatrix += np.zeros((1728, 1728))
else:
print ('error - zero field ')
if accum:
rvec_accum += np.zeros(cu.ncells)
rvec = np.zeros(cu.ncells)
else:
rvec = np.zeros(cu.ncells)
if save_class:
rConv = np.zeros(cu.ncells)
rStra = np.zeros(cu.ncells)
if all_radextent:
radmatrix += np.zeros((1728, 1728))
#acumula dentro del for que recorre las fechas
if accum:
rvec_accum += rvec
dfaccum.loc[dates.strftime('%Y-%m-%d %H:%M:%S')]= rvec
else:
pass
# si se quiere sacar promedios de lluvia de radar en varias cuencas definidas en 'codigos'
if meanrain_ALL:
mean = []
#para todas
for codigo in codigos:
if '%s.tif'%(codigo) in os.listdir('/media/nicolas/Home/nicolas/01_SIATA/info_operacional_cuencas_nivel/red_nivel/tif_mascaras/'):
mask_path = '/media/nicolas/Home/nicolas/01_SIATA/info_operacional_cuencas_nivel/red_nivel/tif_mascaras/%s.tif'%(codigo)
mask_map = wmf.read_map_raster(mask_path)
mask_vect = cu.Transform_Map2Basin(mask_map[0],mask_map[1])
else:
mask_vect = None
if mask_vect is not None:
if path == '': # si no hay nc en ese paso de tiempo.
mean.append(np.nan)
else:
try:
mean.append(np.sum(mask_vect*rvec)/float(mask_vect[mask_vect==1].size))
except: # para las que no hay mascara.
mean.append(np.nan)
# se actualiza la media de todas las mascaras en el df.
df.loc[dates.strftime('%Y-%m-%d %H:%M:%S')]=mean
else:
pass
#guarda binario y df, si guardar binaria paso a paso no me interesa rvecaccum
if save_bin == True and len(codigos)==1 and path_res is not None:
#guarda en binario
dentro = cu.rain_radar2basin_from_array(vec = rvec,
ruta_out = path_res,
fecha = dates,
dt = Dt,
umbral = umbral)
#guarda en df meanrainfall.
mean = []
if path != '':
mean.append(rvec.mean())
else:
mean.append(np.nan)
df.loc[dates.strftime('%Y-%m-%d %H:%M:%S')]=mean
if save_bin == True and len(codigos)==1 and path_res is not None:
#Cierrra el binario y escribe encabezado
cu.rain_radar2basin_from_array(status = 'close',ruta_out = path_res)
print ('.bin & .hdr saved')
if save_class:
cuConv.rain_radar2basin_from_array(status = 'close',ruta_out = path_res+'_conv')
cuStra.rain_radar2basin_from_array(status = 'close',ruta_out = path_res+'_stra')
print ('.bin & .hdr escenarios saved')
else:
print ('.bin & .hdr NOT saved')
#elige los retornos.
if accum == True and path_tif is not None:
cu.Transform_Basin2Map(rvec_accum,path_tif)
return df,rvec_accum,dfaccum
elif accum == True:
return df,rvec_accum,dfaccum
elif all_radextent:
return df,radmatrix
else:
return df
def get_radar_rain_OP_newmasks(start,end,Dt,cuenca,codigos,rutaNC,accum=False,path_tif=None,all_radextent=False,
meanrain_ALL=True,complete_naninaccum=False, evs_hist=False,save_bin=False,save_class = False,
path_res=None,umbral=0.005,include_escenarios = None,
path_masks_csv = None,verbose=True):
'''
Read .nc's file forn rutaNC:101Radar_Class within assigned period and frequency.
Por ahora solo sirve con un barrido por timestep, operacional a 5 min, melo.
0. It divides by 1000.0 and converts from mm/5min to mm/h.
1. Get mean radar rainfall in basins assigned in 'codigos' for finding masks, if the mask exist.
2. Write binary files if is setted.
- Cannot do both 1 and 2.
- To saving binary files (2) set: meanrain_ALL=False, save_bin=True, path_res= path where to write results,
len('codigos')=1, nc_path aims to the one with dxp and simubasin props setted.
Parameters
----------
start: string, date&time format %Y-%m%-d %H:%M, local time.
end: string, date&time format %Y-%m%-d %H:%M, local time.
Dt: float, timedelta in seconds. For this function it should be lower than 3600s (1h).
cuenca: string, simubasin .nc path with dxp and format from WMF. It should be 260 path if whole catchment analysis is needed, or any other .nc path for saving the binary file.
codigos: list, with codes of stage stations. Needed for finding the mask associated to a basin.
rutaNC: string, path with .nc files from radar meteorology group. Default in amazonas: 101Radar_Class
Optional Parameters
----------
accum: boolean, default False. True for getting the accumulated matrix between start and end.
Change returns: df,rvec (accumulated)
path_tif: string, path of tif to write accumlated basin map. Default None.
all_radextent:boolean, default False. True for getting the accumulated matrix between start and end in the
whole radar extent. Change returns: df,radmatrix.
meanrain_ALL: boolean, defaul True. True for getting the mean radar rainfall within several basins which mask are defined in 'codigos'.
save_bin: boolean, default False. True for saving .bin and .hdr files with rainfall and if len('codigos')=1.
save_class: boolean,default False. True for saving .bin and .hdr for convective and stratiform classification. Applies if len('codigos')=1 and save_bin = True.
path_res: string with path where to write results if save_bin=True, default None.
umbral: float. Minimum umbral for writing rainfall, default = 0.005.
include_escenarios: string wth the name of scenarios to use for future.
path_masks_csv: string with path of csv with pos of masks, pos are related tu the shape of the simubasin designated.
Returns
----------
- df whith meanrainfall of assiged codes in 'codigos'.
- df,rvec if accum = True.
- df,radmatrix if all_radextent = True.
- save .bin and .hdr if save_bin = True, len('codigos')=1 and path_res=path.
'''
#### FECHAS Y ASIGNACIONES DE NC####
start,end = pd.to_datetime(start),pd.to_datetime(end)
#hora UTC
startUTC,endUTC = start + pd.Timedelta('5 hours'), end + pd.Timedelta('5 hours')
fechaI,fechaF,hora_1,hora_2 = startUTC.strftime('%Y-%m-%d'), endUTC.strftime('%Y-%m-%d'),startUTC.strftime('%H:%M'),endUTC.strftime('%H:%M')
#Obtiene las fechas por dias para listar archivos por dia
datesDias = pd.date_range(fechaI, fechaF,freq='D')
a = pd.Series(np.zeros(len(datesDias)),index=datesDias)
a = a.resample('A').sum()
Anos = [i.strftime('%Y') for i in a.index.to_pydatetime()]
datesDias = [d.strftime('%Y%m%d') for d in datesDias.to_pydatetime()]
#lista los .nc existentes de ese dia: rutas y fechas del nombre del archivo
ListDatesinNC = []
ListRutas = []
for d in datesDias:
try:
L = glob.glob(rutaNC + d + '*.nc')
ListRutas.extend(L)
ListDatesinNC.extend([i.split('/')[-1].split('_')[0] for i in L])
except:
print ('Sin archivos para la fecha %s'%d)
# Organiza las listas de dias y de rutas
ListDatesinNC.sort()
ListRutas.sort()
#index con las fechas especificas de los .nc existentes de radar
datesinNC = [dt.datetime.strptime(d,'%Y%m%d%H%M') for d in ListDatesinNC]
datesinNC = pd.to_datetime(datesinNC)
#Obtiene el index con la resolucion deseada, en que se quiere buscar datos existentes de radar,
textdt = '%d' % Dt
#Agrega hora a la fecha inicial
if hora_1 != None:
inicio = fechaI+' '+hora_1
else:
inicio = fechaI
#agrega hora a la fecha final
if hora_2 != None:
final = fechaF+' '+hora_2
else:
final = fechaF
datesDt = pd.date_range(inicio,final,freq = textdt+'s')
#Obtiene las posiciones de acuerdo al dt para cada fecha, si no hay barrido en ese paso de tiempo se acumula
#elbarrido inmediatamente anterior.
PosDates = []
pos1 = [0]
for d1,d2 in zip(datesDt[:-1],datesDt[1:]):
pos2 = np.where((datesinNC<d2) & (datesinNC>=d1))[0].tolist()
if len(pos2) == 0 and complete_naninaccum == True: # si no hay barridos en el dt de inicio ellena con cero
pos2 = pos1
elif complete_naninaccum == True: #si hay barridos en este dt guarda esta pos para si es necesario completar las pos de dt en el sgte paso
pos1 = pos2
elif len(pos2) == 0:
pos2=[]
PosDates.append(pos2)
paths_inperiod = [[ListRutas[p] for c,p in enumerate(pos)] for dates,pos in zip(datesDt[1:],PosDates)]
pospaths_inperiod = [[p for c,p in enumerate(pos)] for dates,pos in zip(datesDt[1:],PosDates)]
######### LISTA EN ORDEN CON ARCHIVOS OBSERVADOS Y ESCENARIOS#############3
##### buscar el ultimo campo de lluvia observado ######
datessss = []
nc010 = []
for date,l_step,lpos_step in zip(datesDt[1:],paths_inperiod,pospaths_inperiod):
for path,pospath in zip(l_step[::-1],lpos_step[::-1]): # que siempre el primer nc leido sea el observado si lo hay
#siempre intenta buscar en cada paso de tiempo el observado, solo si no puede, busca escenarios futuros.
if path.split('/')[-1].split('_')[-1].split('.')[0].endswith('120'):
nc010.append(path)
datessss.append(date)
######punto a partir del cual usar escenarios
#si dentro del periodo existe alguno len(date)>1, sino = 0 (todo el periodo corresponde a forecast)
#si no existe pos_lastradarfield = pos del primer paso de tiempo paraque se cojan todos los archivos
if len(datessss)>0:
pos_lastradarfield = np.where(datesDt[1:]==datessss[-1])[0][0]
else:
pos_lastradarfield = 0
list_paths= []
# escoge rutas y pos organizados para escenarios, por ahora solo sirve con 1 barrido por timestep.
for ind,date,l_step,lpos_step in zip(np.arange(datesDt[1:].size),datesDt[1:],paths_inperiod,pospaths_inperiod):
# pos_step = []; paths_step = []
if len(l_step) == 0:
list_paths.append('')
else:
# ordenar rutas de ncs
for path,pospath in zip(l_step[::-1],lpos_step[::-1]): # que siempre el primer nc leido sea el observado si lo hay
# print (ind,path,pospath)
#si es un evento viejo
if evs_hist:
#primero escanarios futuros.
if include_escenarios is not None and path.split('/')[-1].split('_')[-1].startswith(include_escenarios):
list_paths.append(path)
break
#despues observados.
elif path.split('/')[-1].split('_')[-1].split('.')[0].endswith('120'):
list_paths.append(path)
#si es rigth now
else:
#primero observados y para ahi si se lo encontro
if path.split('/')[-1].split('_')[-1].split('.')[0].endswith('120'):
list_paths.append(path)
break
#despues escenarios futuros, y solo despues que se acaban observados
elif include_escenarios is not None and path.split('/')[-1].split('_')[-1].startswith(include_escenarios) and ind > pos_lastradarfield:
list_paths.append(path)
######### LECTURA DE CUENCA, DATOS Y GUARDADO DE BIN.###########
# acumular dentro de la cuenca.
cu = wmf.SimuBasin(rute= cuenca)
if save_class:
cuConv = wmf.SimuBasin(rute= cuenca)
cuStra = wmf.SimuBasin(rute= cuenca)
# paso a hora local
datesDt = datesDt - dt.timedelta(hours=5)
datesDt = datesDt.to_pydatetime()
#Index de salida en hora local
rng= pd.date_range(start.strftime('%Y-%m-%d %H:%M'),end.strftime('%Y-%m-%d %H:%M'), freq= textdt+'s')
df = pd.DataFrame(index = rng,columns=codigos)
#accumulated in basin
if accum:
rvec_accum = np.zeros(cu.ncells)
rvec = np.zeros(cu.ncells)
dfaccum = pd.DataFrame(np.zeros((cu.ncells,rng.size)).T,index = rng)
else:
pass
#all extent
if all_radextent:
radmatrix = np.zeros((1728, 1728))
#itera sobre ncs abre y guarda ifnfo
for dates,path in zip(datesDt[1:],list_paths):
if verbose:
print (dates,path)
rvec = np.zeros(cu.ncells)
if path != '': #sino hay archivo pone cero.
try:
#Lee la imagen de radar para esa fecha
g = netCDF4.Dataset(path)
#if all extent
if all_radextent:
radmatrix += g.variables['Rain'][:].T/(((1*3600)/Dt)*1000.0)
#on basins --> wmf.
RadProp = [g.ncols, g.nrows, g.xll, g.yll, g.dx, g.dx]
#Agrega la lluvia en el intervalo
rvec += cu.Transform_Map2Basin(g.variables['Rain'][:].T/(((1*3600)/Dt)*1000.0),RadProp)
if save_class:
ConvStra = cu.Transform_Map2Basin(g.variables['Conv_Strat'][:].T, RadProp)
# 1-stra, 2-conv
rConv = np.copy(ConvStra)
rConv[rConv == 1] = 0; rConv[rConv == 2] = 1
rStra = np.copy(ConvStra)
rStra[rStra == 2] = 0
rvec[(rConv == 0) & (rStra == 0)] = 0
rConv[rvec == 0] = 0
rStra[rvec == 0] = 0
#Cierra el netCDF
g.close()
except:
print ('error - zero field ')
if accum:
rvec_accum += np.zeros(cu.ncells)
rvec = np.zeros(cu.ncells)
else:
rvec = np.zeros(cu.ncells)
if save_class:
rConv = np.zeros(cu.ncells)
rStra = np.zeros(cu.ncells)
if all_radextent:
radmatrix += np.zeros((1728, 1728))
else:
print ('error - zero field ')
if accum:
rvec_accum += np.zeros(cu.ncells)
rvec = np.zeros(cu.ncells)
else:
rvec = np.zeros(cu.ncells)
if save_class:
rConv = np.zeros(cu.ncells)
rStra = np.zeros(cu.ncells)
if all_radextent:
radmatrix += np.zeros((1728, 1728))
#acumula dentro del for que recorre las fechas
if accum:
rvec_accum += rvec
dfaccum.loc[dates.strftime('%Y-%m-%d %H:%M:%S')]= rvec
else:
pass
# si se quiere sacar promedios de lluvia de radar en varias cuencas definidas en 'codigos'
if meanrain_ALL:
mean = []
#para todas
df_posmasks = pd.read_csv(path_masks_csv,index_col=0)
for codigo in codigos:
if path == '': # si no hay nc en ese paso de tiempo.
mean.append(np.nan)
else:
try:
mean.append(np.sum(rvec*df_posmasks[codigo])/float(df_posmasks[codigo][df_posmasks[codigo]==1].size))
except: # para las que no hay mascara.
mean.append(np.nan)
# se actualiza la media de todas las mascaras en el df.
df.loc[dates.strftime('%Y-%m-%d %H:%M:%S')]=mean
else:
pass
#guarda binario y df, si guardar binaria paso a paso no me interesa rvecaccum
if save_bin == True and len(codigos)==1 and path_res is not None:
#guarda en binario
dentro = cu.rain_radar2basin_from_array(vec = rvec,
ruta_out = path_res,
fecha = dates,
dt = Dt,
umbral = umbral)
#guarda en df meanrainfall.
mean = []
if path != '':
mean.append(rvec.mean())
else:
mean.append(np.nan)
df.loc[dates.strftime('%Y-%m-%d %H:%M:%S')]=mean
if save_bin == True and len(codigos)==1 and path_res is not None:
#Cierrra el binario y escribe encabezado
cu.rain_radar2basin_from_array(status = 'close',ruta_out = path_res)
print ('.bin & .hdr saved')
if save_class:
cuConv.rain_radar2basin_from_array(status = 'close',ruta_out = path_res+'_conv')
cuStra.rain_radar2basin_from_array(status = 'close',ruta_out = path_res+'_stra')
print ('.bin & .hdr escenarios saved')
else:
print ('.bin & .hdr NOT saved')
#elige los retornos.
if accum == True and path_tif is not None:
cu.Transform_Basin2Map(rvec_accum,path_tif)
return df,rvec_accum,dfaccum
elif accum == True:
return df,rvec_accum,dfaccum
elif all_radextent:
return df,radmatrix
else:
return df
def get_rainfall2sim(ConfigList,cu,path_ncbasin,starts_m,end, #se corre el bin mas largo.
Dt= float(wmf.models.dt),include_escenarios=None,
evs_hist= False,
check_file=True,stepback_start = '%ss'%int(wmf.models.dt *1),
complete_naninaccum=True,verbose=False,zero_fill=None):
#generacion o lectura de lluvia
start,end = starts_m[-1],end
start,end = (pd.to_datetime(start)- pd.Timedelta(stepback_start)),pd.to_datetime(end) #start siempre con 1 stepback porque codigo que genera lluvia empieza siempre en paso 1 y no en paso 0.
#se leen rutas
codefile = get_ruta(ConfigList,'name_proj')
rain_path = get_ruta(ConfigList,'ruta_proj')+get_ruta(ConfigList,'ruta_rain')
ruta_rain = '%s%s_%s-%s'%(rain_path,start.strftime('%Y%m%d%H%M'),end.strftime('%Y%m%d%H%M'),codefile)
ruta_out_rain = '%s%s_%s-%s.bin'%(rain_path,start.strftime('%Y%m%d%H%M'),end.strftime('%Y%m%d%H%M'),codefile)
#set model dt
set_modelsettings(ConfigList)
if check_file:
file = os.path.isfile(ruta_rain+'.bin')
else:
file = False
#si ya existe el binario lo abre y saca el df.
if file:
obj = hdr_to_series(ruta_rain+'.hdr')
obj = obj.loc[start:end]
#si no existe el binario
else:
if include_escenarios is not None:
# print ('include escenarios %s'%include_escenarios)
ruta_rain = ruta_out_rain.split('rain_op_py2')[0]+'rain_op_esc/rain_op_esc_%s'%include_escenarios
codigos=[codefile]
print ('WARNING: converting rain data, it may take a while ---- dt:%s'%(Dt))
# obj = get_radar_rain_OP(start,end,Dt,nc_basin,codigos,
# meanrain_ALL=False,save_bin=True,
# path_res=ruta_out_rain,
# umbral=0.005,verbose=verbose,
# evs_hist= evs_hist,complete_naninaccum = complete_naninaccum,
# include_escenarios = include_escenarios)
obj = get_radar_rain(start,end,Dt,path_ncbasin,codigos,rutaNC=get_ruta(ConfigList,'ruta_radardbz'),
meanrain_ALL=False,save_bin=True,
path_res=ruta_rain,
umbral=0.005,verbose=verbose,
complete_naninaccum = complete_naninaccum,
zero_fill=zero_fill)
obj = obj.loc[start:end]
return obj, ruta_out_rain
#-----------------------------------
#-----------------------------------
#Funciones de ejecucion modelo
#-----------------------------------
#-----------------------------------
def get_executionlists_all4all(ConfigList,ruta_out_rain,cu,starts_m,end,windows,warming_steps=48,dateformat_starts = '%Y-%m-%d %H:%M'):
# pars + ci's
DicCI=get_ConfigLines(ConfigList,'-CIr','Paths')
#pars by ci.
DicPars = {}
for name in list(DicCI.keys()):
DicPars.update({'%s'%name:get_ConfigLines(ConfigList,'-par%s'%name,'Pars')})
#rutas denpasos salida (configfile)
ruta_StoOp = get_ruta(ConfigList,'ruta_proj')+get_ruta(ConfigList,'ruta_sto_op')
ruta_QsimOp = get_ruta(ConfigList,'ruta_proj')+get_ruta(ConfigList,'ruta_qsim_op')
ruta_QsimH = get_ruta(ConfigList,'ruta_proj')+get_ruta(ConfigList,'ruta_qsim_hist')
ruta_MS_H = get_ruta(ConfigList,'ruta_proj')+get_ruta(ConfigList,'ruta_MS_hist')
pm = wmf.read_mean_rain(ruta_out_rain.split('.')[0]+'.hdr')
#Prepara las listas para setear las configuraciones
ListEjecs = []
for window,start_m in zip(windows,starts_m):
pos_start = pm.index.get_loc(start_m.strftime(dateformat_starts))
npasos = int((end-start_m).total_seconds()/wmf.models.dt)
STARTid = window #;print STARTid
#CIs
for CIid in np.sort(list(DicCI.keys())):
with open(DicCI[CIid], 'r') as f:
CI_dic = json.load(f)
#pars
for PARid in DicPars[CIid]:
ListEjecs.append([cu, CIid, CI_dic, ruta_out_rain, PARid, DicPars[CIid][PARid], npasos, pos_start, STARTid, ruta_StoOp+PARid+CIid+'-'+STARTid, ruta_QsimOp+PARid+CIid+'-'+STARTid+'.csv', ruta_QsimH+PARid+CIid+'-'+STARTid+'.csv', ruta_MS_H+PARid+CIid+'-'+STARTid+'.csv',warming_steps])
return ListEjecs
def get_executionlists_fromdf(ConfigList,ruta_out_rain,cu,starts_m,end,df_executionprops,
df_xy_estH = None,
warming_steps=48,dateformat_starts = '%Y-%m-%d %H:%M',
path_pant4rules = None,fecha_binsto = None):
#rutas denpasos salida (configfile)
ruta_QsimOp = get_ruta(ConfigList,'ruta_proj')+get_ruta(ConfigList,'ruta_qsim_op')
ruta_QsimH = get_ruta(ConfigList,'ruta_proj')+get_ruta(ConfigList,'ruta_qsim_hist')
ruta_StoOp = get_ruta(ConfigList,'ruta_proj')+get_ruta(ConfigList,'ruta_sto_op')
ruta_MS_H = get_ruta(ConfigList,'ruta_proj')+get_ruta(ConfigList,'ruta_MS_hist')
ruta_HSsim_ests_op = get_ruta(ConfigList,'ruta_proj')+get_ruta(ConfigList,'ruta_HSsim_ests_op')
ruta_HSsim_ests_hist = get_ruta(ConfigList,'ruta_proj')+get_ruta(ConfigList,'ruta_HSsim_ests_hist')
pm = wmf.read_mean_rain(ruta_out_rain.split('.')[0]+'.hdr')
#Prepara las listas para setear las configuraciones
ListEjecs = []
for STARTid,start_m,CIpath,CIid,PARvar,PARid in zip(df_executionprops.start_names.values,starts_m,df_executionprops.CIs.values,df_executionprops.CI_names.values,df_executionprops.pars.values,df_executionprops.pars_names.values):
#define inicio y npasos de simulacion
pos_start = pm.index.get_loc(start_m.strftime(dateformat_starts))
npasos = int((end-start_m).total_seconds()/float(wmf.models.dt))+1 # para que cuadren los pasos del index entre lluvia y qsim
#define condiciones iniciales de ejecucion de acuerdo al tipo
if CIpath == 0.0: #pone ci en ceros
CIvar = [0]*5
elif CIpath[-5:] == '.json': #si se la da la ruta de un .json lo lee
with open(CIpath, 'r') as f:
CIvar = json.load(f)
CIvar = list(CIvar.values())
elif CIpath == 'reglas_pant' and path_pant4rules is not None: #si se asigna, lee escenarios r_rules
#leer paths de CI.
paths_reglaspant = np.sort(glob.glob(get_ruta(ConfigList,'ruta_proj')+get_ruta(ConfigList,'ruta_CI_reglaspant')+'*'))
#lee la lluvia del t_ant que corresponde
pant = wmf.read_mean_rain(path_pant4rules.split('.')[0]+'.hdr')
tws_tanks = [pd.Timedelta(tw) for tw in ['11d','13d','21d','30d','17d']]
pants_sum = [pant.loc[pant.index[-1]- tw_pant:pant.index[-1]].sum() for tw_pant in tws_tanks]
#escoge CI para cada tanque en funcion de la lluvia : rain_CIrules.
CIvar = []
for index,pant_sum,path_ci in zip(np.arange(1,6),pants_sum,paths_reglaspant):
df_tank = df_tank = pd.read_csv(path_ci,index_col=0)
if index == 4: #t4, subte
CIvar.append(df_tank.loc[end.month][0]) # no se escoge por intervalos porque ndias nunca converge. por ahora, se toma la media del mes: cliclo anual.
else:
pos_bin = np.searchsorted(df_tank.index,pant_sum) - 1
CIvar.append(df_tank.loc[df_tank.index[pos_bin]].P50)
elif CIpath[-7:] == '.StOhdr': #si se asinga
f=open(CIpath)
filelines=f.readlines()
f.close()
IDs=np.array([int(i.split(',')[0]) for i in filelines[5:]])
fechas=np.array([i.split(',')[-1].split(' ')[1] for i in filelines[5:]])
chosen_id = [IDs[-1] if fecha_binsto is None else IDs[np.where(fechas == fecha_binsto.strftime('%Y-%m-%d-%H:%M'))[0][0]]][0]
#lectura de pos en el binsto
CIvar,r = wmf.models.read_float_basin_ncol(CIpath.split('.')[0]+'.StObin',chosen_id, cu.ncells, 5)
#Guarda listas de ejecucion
ListEjecs.append([cu, CIid, CIvar, ruta_out_rain, PARid, PARvar, npasos, pos_start, STARTid,
ruta_StoOp+PARid+'-'+CIid+'-'+STARTid, ruta_QsimOp+PARid+'-'+CIid+'-'+STARTid+'.csv',
ruta_QsimH+PARid+'-'+CIid+'-'+STARTid+'.csv',ruta_MS_H+PARid+'-'+CIid+'-'+STARTid+'.csv',
warming_steps,
ruta_HSsim_ests_op+PARid+'-'+CIid+'-'+STARTid+'.csv',
ruta_HSsim_ests_hist+PARid+'-'+CIid+'-'+STARTid+'.csv',
df_xy_estH])
return ListEjecs
def get_qsim(ListEjecs,set_CI=True,save_hist=True,verbose = True):
'''
Nota:
- No esta guardando St0bin.
- falta agregar la parte de guardar MS en las pos de las estaciones de humedad.
'''
for L in ListEjecs:
#read nc_basin
cu=L[0]
#if assigned, set CI.
if set_CI:
cu.set_Storage(L[2][0], 0)
cu.set_Storage(L[2][1], 1)
cu.set_Storage(L[2][2], 2)
cu.set_Storage(L[2][3], 3)
cu.set_Storage(L[2][4], 4)
#run model
res = cu.run_shia(L[5],L[3],L[6],L[7],kinematicN=12,
ruta_storage=L[9]) # se guardan condiciones para la sgte corrida.
#save df_qsimresults
#operational qsim - without warming steps
df_qsim = res[1].loc[res[1].index[L[13]:]]
df_qsim.to_csv(L[10])
#save df_HSsimresults
if L[16] is not None:
#operational HSsim - without warming steps
f=open(L[9]+'.StOhdr')
filelines=f.readlines()
f.close()
IDs=np.array([int(i.split(',')[0]) for i in filelines[5:]])
fechas=np.array([i.split(',')[-1].split(' ')[1] for i in filelines[5:]])
df_HSsim = pd.DataFrame(index = pd.to_datetime(fechas), columns = L[16].index)
for index,ID in zip(pd.to_datetime(fechas),IDs):
v,r = wmf.models.read_float_basin_ncol(L[9]+'.StObin',ID, cu.ncells, 5)
v[2][v[2]> wmf.models.max_gravita[0]] = wmf.models.max_gravita[0][v[2]> wmf.models.max_gravita[0]] #sumideros?
df_HSsim.loc[index] = [wmf.cu.basin_extract_var_by_point(cu.structure,(((v[0]+v[2])/(wmf.models.max_capilar+wmf.models.max_gravita))*100),L[16].values[i_esth],3,1,cu.ncells)[0] for i_esth in range(L[16].shape[0])]
df_HSsim.loc[df_HSsim.index[L[13]:]].to_csv(L[14])
# saving historical data
if save_hist == False:#se crea
# qsim
df_qsim.to_csv(L[11])
# hs_sim_ests
df_HSsim.to_csv(L[15])
# hs_sim
df_hs = pd.read_csv(L[9]+'.StOhdr', header = 4, index_col = 5, parse_dates = True, usecols=(1,2,3,4,5,6))
df_hs.columns = ['t1','t2','t3','t4','t5']
df_hs.index.name = 'fecha'
df_hs.to_csv(L[12])
else:
# qsim_hist
df_qsim0 = pd.read_csv(L[11], index_col=0, parse_dates= True) #abre archivo hist ya creado (con una corrida guardada.)
df_qsim0.index = pd.to_datetime(df_qsim0.index)
df_qsim.index = pd.to_datetime(df_qsim.index)
df_qsim.columns = list(map(str,df_qsim.columns))
df_qsim0= df_qsim0.append(df_qsim)#se agrega corrida actual
df_qsim0 = df_qsim0.reset_index().drop_duplicates(subset='index',keep='last').set_index('index')
df_qsim0 = df_qsim0.dropna(how='all')
df_qsim0 = df_qsim0.sort_index()
df_qsim0.to_csv(L[11]) # se guarda archivo hist. actualizado
# MSsim_hist
df_hs0 = pd.read_csv(L[12], index_col=0, parse_dates= True) #abre archivo hist ya creado (con una corrida guardada.)
df_hs0.index = pd.to_datetime(df_hs0.index)
df_hs = pd.read_csv(L[9].split('.')[0]+'.StOhdr', header = 4, index_col = 5, parse_dates = True, usecols=(1,2,3,4,5,6))#la nueva
df_hs.columns = ['t1','t2','t3','t4','t5']
df_hs.index.name = 'fecha'
df_hs.index = pd.to_datetime(df_hs.index)
df_hs.columns = list(map(str,df_hs.columns))
df_hs0= df_hs0.append(df_hs)#se agrega corrida actual
df_hs0 = df_hs0.reset_index().drop_duplicates(subset='fecha',keep='last').set_index('fecha')
df_hs0 = df_hs0.dropna(how='all')
df_hs0 = df_hs0.sort_index()
df_hs0.to_csv(L[12]) # se guarda archivo hist. actualizado
# HSsim_ests_hist
if L[16] is not None:
df_HSsim0 = pd.read_csv(L[15], index_col=0, parse_dates= True) #abre archivo hist ya creado (con una corrida guardada.)
df_HSsim0.index = pd.to_datetime(df_HSsim0.index)
df_HSsim.index = pd.to_datetime(df_HSsim.index)
df_HSsim.columns = list(map(str,df_HSsim.columns))
df_HSsim0= df_HSsim0.append(df_HSsim)#se agrega corrida actual
df_HSsim0 = df_HSsim0.reset_index().drop_duplicates(subset='index',keep='last').set_index('index')
df_HSsim0 = df_HSsim0.dropna(how='all')
df_HSsim0 = df_HSsim0.sort_index()
df_HSsim0.to_csv(L[15]) # se guarda archivo hist. actualizado
if verbose:
print ('Config. '+L[4]+L[1]+'-'+L[-6]+' ejecutado')
return res
#------------------------------------------------------------------------
#Funciones usadas para graficar y generar productos para geoportal
#------------------------------------------------------------------------
def get_pradar_withinnc(path_r,cu,start,end,Dt,ests,path_masks_csv=None,df_points = None):
# abrir bin y records para recorrerlo
pstruct = wmf.read_rain_struct(path_r)
pstruct = pstruct.loc[start:end]#quitar el wupt
df_pbasins = pd.DataFrame(index=pstruct.index,columns=ests)
path_bin = path_r.split('.')[0]+'.bin'
records = pstruct[' Record'].values
if path_masks_csv is not None:
df_posmasks = pd.read_csv(path_masks_csv, index_col=0)
for index,record in zip(pstruct.index,records):
v,r = wmf.models.read_int_basin(path_bin,record,cu.ncells)
rvec = v/1000.
#para subcuencas
if path_masks_csv is not None: # se actualiza la media de todas las mascaras en el df.
mean = []
for est in ests: mean.append(np.sum(rvec*df_posmasks['%s'%est])/float(df_posmasks['%s'%est][df_posmasks['%s'%est]==1].size))
df_pbasins.loc[index]=mean
elif df_points is not None:
df_pbasins.loc[index] = [wmf.cu.basin_extract_var_by_point(cu.structure,rvec,df_points.loc[ests].values[i_esth],3,1,cu.ncells)[0] for i_esth in range(df_points.loc[ests].shape[0])]
return df_pbasins
def N2Q(level,a,b):
Q = a*(level**b)
return Q
def Q2N(caudal,a,b):
N = (caudal/a)**(1/b)
return N
def consulta_nyqobs(ests,start,end,server,user,passwd,dbname,Dt,df_est_metadatos,ruta_qobs,ruta_nobs,save_hist=True):
#consulta nivel
res = hidrologia.nivel.consulta_nivel(list(map(str,ests)),start,end,server,user,passwd,dbname,
retorna_niveles_riesgo=False)
#cuadrar index y type
levels = res.apply(pd.to_numeric, errors='coerce')
levels.index = pd.to_datetime(levels.index)
levels = levels[~levels.index.duplicated(keep='first')]
rng = pd.date_range(levels.index[0],levels.index[-1],freq='1T')
df_nobs = levels.reindex(rng)
df_nobs = df_nobs.resample(Dt).mean()
#caudal
df_qobs = pd.DataFrame(index=df_nobs.index)
for est,l in zip(ests,df_nobs.T.values):
df_qobs[est]= N2Q(l,df_est_metadatos.loc[est].a,df_est_metadatos.loc[est].b)
# saving historical data
if save_hist == False:#se crea
df_qobs.to_csv(ruta_qobs)
df_nobs.to_csv(ruta_nobs)
else:
#qobs
df_qobs0 = pd.read_csv(ruta_qobs, index_col=0, parse_dates= True) #abre archivo hist ya creado (con una corrida anterior)
df_qobs0.index = pd.to_datetime(df_qobs0.index)
df_qobs.index = pd.to_datetime(df_qobs.index)
df_qobs0= df_qobs0.append(df_qobs)#se agrega consulta actual
df_qobs0 = df_qobs0.reset_index().drop_duplicates(subset='index',keep='last').set_index('index')
df_qobs0.to_csv(ruta_qobs) # se guarda archivo hist. actualizado
#nobs
df_nobs0 = pd.read_csv(ruta_nobs, index_col=0, parse_dates= True) #abre archivo hist ya creado (con una corrida anterior)
df_nobs0.index = | pd.to_datetime(df_nobs0.index) | pandas.to_datetime |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import pandas as pd
import numpy as np
import glob,os
from glob import iglob
#import scanpy as sc
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import RocCurveDisplay
from sklearn.datasets import load_wine
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import metrics
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
import joblib
import time
import random
import matplotlib as mpl
mpl.rcParams['pdf.fonttype']=42
mpl.rcParams['ps.fonttype']=42
# # SLE data for machine learning
# In[55]:
### training data import
sle_bulk=pd.read_csv('../RNA_seq_for_autoimmune_disease/SLE_bulk/GSE72509_SLE_RPKMs.txt.gz',sep='\t')
sle_bulk=sle_bulk.set_index('SYMBOL')
hd1=pd.read_csv('../RNA_seq_for_autoimmune_disease/health_bulk/GSE183204_HC_fpkm.csv',sep=',',index_col=0)
# In[56]:
### feature import
features= | pd.read_csv('../script4paper2/combined_gene_for_machine_learning.csv',index_col=1) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Tests parsers ability to read and parse non-local files
and hence require a network connection to be read.
"""
import os
import nose
import pandas.util.testing as tm
from pandas import DataFrame
from pandas import compat
from pandas.io.parsers import read_csv, read_table
class TestUrlGz(tm.TestCase):
def setUp(self):
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table.csv')
self.local_table = read_table(localtable)
@tm.network
def test_url_gz(self):
url = ('https://raw.github.com/pandas-dev/pandas/'
'master/pandas/io/tests/parser/data/salary.table.gz')
url_table = read_table(url, compression="gzip", engine="python")
tm.assert_frame_equal(url_table, self.local_table)
@tm.network
def test_url_gz_infer(self):
url = 'https://s3.amazonaws.com/pandas-test/salary.table.gz'
url_table = read_table(url, compression="infer", engine="python")
tm.assert_frame_equal(url_table, self.local_table)
class TestS3(tm.TestCase):
def setUp(self):
try:
import boto # noqa
except ImportError:
raise nose.SkipTest("boto not installed")
@tm.network
def test_parse_public_s3_bucket(self):
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
if comp == 'bz2' and compat.PY2:
# The Python 2 C parser can't read bz2 from S3.
self.assertRaises(ValueError, read_csv,
's3://pandas-test/tips.csv' + ext,
compression=comp)
else:
df = read_csv('s3://pandas-test/tips.csv' +
ext, compression=comp)
self.assertTrue(isinstance(df, DataFrame))
self.assertFalse(df.empty)
tm.assert_frame_equal(read_csv(
tm.get_data_path('tips.csv')), df)
# Read public file from bucket with not-public contents
df = read_csv('s3://cant_get_it/tips.csv')
self.assertTrue(isinstance(df, DataFrame))
self.assertFalse(df.empty)
tm.assert_frame_equal(read_csv(tm.get_data_path('tips.csv')), df)
@tm.network
def test_parse_public_s3n_bucket(self):
# Read from AWS s3 as "s3n" URL
df = read_csv('s3n://pandas-test/tips.csv', nrows=10)
self.assertTrue(isinstance(df, DataFrame))
self.assertFalse(df.empty)
tm.assert_frame_equal(read_csv(
tm.get_data_path('tips.csv')).iloc[:10], df)
@tm.network
def test_parse_public_s3a_bucket(self):
# Read from AWS s3 as "s3a" URL
df = read_csv('s3a://pandas-test/tips.csv', nrows=10)
self.assertTrue(isinstance(df, DataFrame))
self.assertFalse(df.empty)
tm.assert_frame_equal(read_csv(
tm.get_data_path('tips.csv')).iloc[:10], df)
@tm.network
def test_parse_public_s3_bucket_nrows(self):
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
if comp == 'bz2' and compat.PY2:
# The Python 2 C parser can't read bz2 from S3.
self.assertRaises(ValueError, read_csv,
's3://pandas-test/tips.csv' + ext,
compression=comp)
else:
df = read_csv('s3://pandas-test/tips.csv' +
ext, nrows=10, compression=comp)
self.assertTrue(isinstance(df, DataFrame))
self.assertFalse(df.empty)
tm.assert_frame_equal(read_csv(
tm.get_data_path('tips.csv')).iloc[:10], df)
@tm.network
def test_parse_public_s3_bucket_chunked(self):
# Read with a chunksize
chunksize = 5
local_tips = read_csv(tm.get_data_path('tips.csv'))
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
if comp == 'bz2' and compat.PY2:
# The Python 2 C parser can't read bz2 from S3.
self.assertRaises(ValueError, read_csv,
's3://pandas-test/tips.csv' + ext,
compression=comp)
else:
df_reader = read_csv('s3://pandas-test/tips.csv' + ext,
chunksize=chunksize, compression=comp)
self.assertEqual(df_reader.chunksize, chunksize)
for i_chunk in [0, 1, 2]:
# Read a couple of chunks and make sure we see them
# properly.
df = df_reader.get_chunk()
self.assertTrue(isinstance(df, DataFrame))
self.assertFalse(df.empty)
true_df = local_tips.iloc[
chunksize * i_chunk: chunksize * (i_chunk + 1)]
tm.assert_frame_equal(true_df, df)
@tm.network
def test_parse_public_s3_bucket_chunked_python(self):
# Read with a chunksize using the Python parser
chunksize = 5
local_tips = read_csv(tm.get_data_path('tips.csv'))
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df_reader = read_csv('s3://pandas-test/tips.csv' + ext,
chunksize=chunksize, compression=comp,
engine='python')
self.assertEqual(df_reader.chunksize, chunksize)
for i_chunk in [0, 1, 2]:
# Read a couple of chunks and make sure we see them properly.
df = df_reader.get_chunk()
self.assertTrue(isinstance(df, DataFrame))
self.assertFalse(df.empty)
true_df = local_tips.iloc[
chunksize * i_chunk: chunksize * (i_chunk + 1)]
tm.assert_frame_equal(true_df, df)
@tm.network
def test_parse_public_s3_bucket_python(self):
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df = read_csv('s3://pandas-test/tips.csv' + ext, engine='python',
compression=comp)
self.assertTrue(isinstance(df, DataFrame))
self.assertFalse(df.empty)
tm.assert_frame_equal(read_csv(
| tm.get_data_path('tips.csv') | pandas.util.testing.get_data_path |
import pandas as pd
import numpy as np
from tqdm import tqdm
from dateutil.relativedelta import relativedelta
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.multioutput import RegressorChain
from sklearn.metrics import fbeta_score, mean_squared_error, r2_score
from sklearn.preprocessing import StandardScaler
from vespid.data.neo4j_tools import Nodes, Relationships
from vespid.models.static_communities import get_cluster_data
from vespid.models.static_communities import jaccard_coefficient
from vespid.models.static_communities import cosine_similarities
from vespid import setup_logger
logger = setup_logger(__name__)
class DynamicCommunities():
'''
Class designed to track, over an entire dynamic graph,
the birth, death, merging, splitting, or simple
continuation of dynamic communities.
'''
DEATH = 'death'
BIRTH = 'birth'
SPLIT = 'split'
MERGE = 'merge'
CONTINUATION = 'continuation'
CONTRACTION = 'contraction'
EXPANSION = 'expansion'
def __init__(
self,
graph,
start_year,
end_year,
window_size=3,
similarity_threshold=0.95,
similarity_scoring='embeddings',
size_change_threshold=0.1,
expire_cycles=3
):
'''
Parameters
----------
graph: Neo4jConnectionHandler object. The graph of interest.
start_year: int. Indicates the beginning year from which data will
be pulled for community building.
end_year: int. Same as start year, but defines the end of the period
of interest. Inclusive.
window_size: int. Number of years to include in a single analytical frame.
Note that this is currently not being used.
similarity_threshold: float in range [0.01, 0.99].
Dictates the minimum similarity score required
between C_t and C_(t+1) to indicate connected
clusters. Note that recommended value for membership-type
scoring is 0.1. For embeddings-type scoring,
recommended value is 0.95.
similarity_scoring: str. Can be one of ['embeddings', 'membership'].
Indicates what you want to compare in order to detect cluster
evolution events.
'embeddings': Use BERT/SPECTER/GraphSAGE/whatever vector
embeddings of the nodes to assess their similarity scores.
The actual similarity mechanism to be used is cosine similarity.
This is most directly useful when you need the embeddings to
make the cluster comparisons over time stateful, e.g.
if your static clustering approach is based on a graph
at a fixed time window `t` such that nodes
that existed before that window began aren't included
in the solution.
'membership': Use actual membership (e.g. unique node IDs) vectors
of each cluster to assess changes. Uses Jaccard similarity as
the metric. This really only works when the static clustering
solutions come from a cumulative graph in which the graph at
time `t` is the result of all graph information prior to `t`.
size_change_threshold: float in range [0.01, 0.99].
Dictates the minimum change in size of a cluster
from t to t+1 to indicate that it has expanded
or contracted.
expire_cycles: int. Number of timesteps a cluster
should be missing from the timeline before declaring
it dead.
'''
self.graph = graph
self.start_year = start_year
self.end_year = end_year
self.window_size = window_size
if similarity_scoring not in ['embeddings', 'membership']:
raise ValueError(f"``embeddings`` received an invalid value of '{self.embeddings}'")
self.similarity_threshold = similarity_threshold
self.similarity_scoring = similarity_scoring
self.size_change_threshold = size_change_threshold
self.expire_cycles = expire_cycles
self._c_t1_column = 'C_t'
self._c_t2_column = 'C_(t+1)'
self._event_column = 'event_type'
def __repr__(self):
output = {k:v for k,v in self.__dict__.items() if k not in ['graph', 'jaccard_scores'] and k[0] != '_'}
return str(output)
def clusters_over_time(self):
'''
Gets the cluster labels associated with each year of the
graph and maps them to the list of papers in that cluster
for that year, generating useful cluster-level metadata
along the way.
Parameters
----------
None.
Returns
-------
pandas DataFrame describing each cluster found in each time window
(e.g. cluster0_2016).
'''
dfs = []
#TODO: change queries below if we stop putting the year in the cluster ID attribute name
for year in tqdm(range(self.start_year, self.end_year + 1),
desc='Pulling down year-by-year data from Neo4j'):
if self.similarity_scoring == 'membership':
query = f"""
MATCH (p:Publication)
WHERE p.clusterID IS NOT NULL
AND p.publicationDate.year = {year}
RETURN toInteger(p.clusterID) AS ClusterLabel,
{year} AS Year,
COUNT(p) AS ClusterSize,
COLLECT(ID(p)) AS Papers
ORDER BY ClusterLabel ASC
"""
dfs.append(self.graph.cypher_query_to_dataframe(query,
verbose=False))
elif self.similarity_scoring == 'embeddings':
dfs.append(get_cluster_data(year, self.graph))
else:
raise ValueError(f"``embeddings`` received an invalid value of '{self.embeddings}'")
output = pd.concat(dfs, ignore_index=True)
return output
def track_cluster_similarity(self, clusters_over_time=None):
'''
Computes the Jaccard coefficient for consecutive year
pairs (e.g. 2017-2018) pairwise between each year's clusters
(e.g. cluster0_2017 compared to cluster1_2018) to determine
how similar each cluster in year t is those in year t+1.
Parameters
----------
clusters_over_time: pandas DataFrame that is equivalent
to the output of DynamicCommunities.clusters_over_time().
If not None, this will be used as the pre-computed result
of running that method. If None, clusters_over_time() will
be run.
Useful for saving time if you already have the pre-computed
result in memory.
Returns
-------
pandas DataFrame that is the result of self.clusters_over_time().
Also, a dict of the form {integer_t+1_year: pandas DataFrame}, wherein
the DataFrame has rows representing each cluster in the year t
and columns representing each cluster in t+1, with the values
reflective the Jaccard coefficient of similarity between each
cluster pair is written to self.similarity_scores. Note that keys
are the t+1 year value, so output[2018] covers the 2017-2018
comparison.
'''
if clusters_over_time is None:
df_clusters_over_time = self.clusters_over_time()
else:
df_clusters_over_time = clusters_over_time
results = {}
#TODO: make this robust to start year > end year
#TODO: combine with to_dataframe functionality so we can loop only once
for year in tqdm(range(self.start_year, self.end_year),
desc='Calculating cluster similarity scores for each t/t+1 pair'):
# Setup DataFrames for t and t+1 that have all cluster labels in them
df_t1 = df_clusters_over_time[
df_clusters_over_time['Year'] == year
].set_index('ClusterLabel')
df_t2 = df_clusters_over_time[
df_clusters_over_time['Year'] == year + 1
].set_index('ClusterLabel')
if self.similarity_scoring == 'membership':
# This will produce np.nan for any cluster label not present in a given year
df_years = pd.DataFrame({
year: df_t1['Papers'],
year + 1: df_t2['Papers']
})
# shape is (max_cluster_num, max_cluster_num)
# Form of [[cluster0_year0 to cluster0_year1], [cluster1_year0 to cluster0_year1], [cluster2_year0 to cluster0_year1],
# [cluster0_year0 to cluster1_year1], [cluster1_year0 to cluster1_year1], [cluster2_year0 to cluster1_year1],
# [cluster0_year0 to cluster2_year1], [], []]
scores = np.full((df_years.shape[0], df_years.shape[0]), np.nan)
#TODO: make this more efficient by avoiding loops!
# Go through each C_t vs. C_(t+1) pair and calculate Jaccard coefficient
for i, papers_past in enumerate(df_years[year]):
# Check for nulls
if isinstance(papers_past, list):
for j, papers_current in enumerate(df_years[year + 1]):
if isinstance(papers_current, list):
scores[i][j] = jaccard_coefficient(papers_past, papers_current)
results[year + 1] = pd.DataFrame(
scores,
index=[f"cluster{i}_{year}" for i in range(scores.shape[1])],
columns=[f"cluster{i}_{year + 1}" for i in range(scores.shape[0])]
).dropna(how='all', axis=0).dropna(how='all', axis=1) # Drop past, then future, clusters that don't exist (all null)
elif self.similarity_scoring == 'embeddings':
#TODO: consider plugging this straight into scoring function if memory is tight
t1 = df_clusters_over_time.loc[
df_clusters_over_time['Year'] == year,
'ClusterEmbedding'
]
t2 = df_clusters_over_time.loc[
df_clusters_over_time['Year'] == year + 1,
'ClusterEmbedding'
]
scores = cosine_similarities(t1, t2)
results[year + 1] = pd.DataFrame(
scores,
index=[f"cluster{i}_{year}" for i in range(scores.shape[0])],
columns=[f"cluster{i}_{year + 1}" for i in range(scores.shape[1])]
)
self.similarity_scores = results
return df_clusters_over_time
def _format_events(self, events):
'''
Reformats events DataFrame to be consistent output
Parameters
----------
events : pandas DataFrame
Original output of any given flagging method
Returns
-------
pandas DataFrame
Formatted output with consistent column order, etc.
'''
return events[[
self._c_t1_column,
self._c_t2_column,
self._event_column
]]
def flag_merge_events(self, year):
'''
Given a set of C_t (cluster at time t) to C_(t+1)
similarity scores and a threshold to dictate what
clusters are similar enough to be connected to one
another through time, return the ones that appear
to be the result of a merge event.
Parameters
----------
year: int. Year `t` that should be compared to
year t+1.
Returns
-------
pandas DataFrame tracking the type of event, the focal cluster
(e.g. the result of a merge or the cause of a split) and the
parents/children of the focal cluster (for merging and splitting,
resp.), if any.
'''
above_threshold_scores = (self.similarity_scores[year] >= self.similarity_threshold)
num_matching_past_clusters = (above_threshold_scores).sum(axis=0)
resulting_merged_clusters = num_matching_past_clusters[num_matching_past_clusters >= 2].index.tolist()
if np.any(num_matching_past_clusters > 1):
# For each column
merge_results = above_threshold_scores[resulting_merged_clusters]\
.apply(lambda column: [column[column].index.tolist()])\
.iloc[0].to_dict()
output = pd.DataFrame([merge_results])\
.transpose().reset_index(drop=False).rename(columns={
'index': self._c_t2_column,
0: self._c_t1_column
})
output[self._event_column] = self.MERGE
return self._format_events(output)
else:
return pd.DataFrame()
def flag_split_events(self, year):
'''
Given a set of C_t (cluster at time t) to C_(t+1)
similarity scores and a threshold to dictate what
clusters are similar enough to be connected to one
another through time, return the ones that appear
to be the result of a split event.
Parameters
----------
year: int. Year `t` that should be compared to
year t+1.
Returns
-------
pandas DataFrame tracking the type of event, the focal cluster
(e.g. the result of a merge or the cause of a split) and the
parents/children of the focal cluster (for merging and splitting,
resp.), if any.
'''
above_threshold_scores = (self.similarity_scores[year] >= self.similarity_threshold)
num_matching_current_clusters = (above_threshold_scores).sum(axis=1)
resulting_split_clusters = num_matching_current_clusters[num_matching_current_clusters >= 2].index.tolist()
if np.any(num_matching_current_clusters > 1):
# For each row AKA C_t cluster that qualified as being above threshold in 2+ cases,
# pull out the column names for the C_(t+1) clusters that are its children
merge_results = above_threshold_scores.loc[resulting_split_clusters]\
.apply(lambda row: row[row].index.tolist(),
axis=1).to_dict()
output = pd.DataFrame([merge_results])\
.transpose().reset_index(drop=False).rename(columns={
'index': self._c_t1_column,
0: self._c_t2_column
})
output[self._event_column] = self.SPLIT
return self._format_events(output)
else:
return pd.DataFrame()
def flag_birth_events(self, year):
'''
Given a set of C_t (cluster at time t) to C_(t+1)
similarity scores and a threshold to dictate what
clusters are similar enough to be connected to one
another through time, return the ones that appear
to have been created for the first time in t+1.
Parameters
----------
year: int. Year `t` that should be compared to
year t+1.
Returns
-------
pandas DataFrame tracking the type of event, the focal cluster
(e.g. the result of a merge or the cause of a split) and the
parents/children of the focal cluster (for merging and splitting,
resp.), if any.
'''
# The question: do any t+1 clusters have no t cluster they are similar to?
# Put in terms of the jaccard_scores DataFrame structure: any column for C_(t+1)
# that is all < similarity_threshold?
above_threshold_scores = (self.similarity_scores[year] >= self.similarity_threshold)
num_matching_current_clusters = (above_threshold_scores).sum(axis=0)
resulting_birth_clusters = num_matching_current_clusters[num_matching_current_clusters < 1].index.tolist()
if np.any(num_matching_current_clusters < 1):
output = pd.DataFrame({
self._c_t1_column: np.nan,
self._c_t2_column: resulting_birth_clusters,
self._event_column: self.BIRTH
})
return self._format_events(output)
else:
return pd.DataFrame()
def flag_death_events(self, year):
'''
Given a set of C_t (cluster at time t) to C_(t+1)
similarity scores and a threshold to dictate what
clusters are similar enough to be connected to one
another through time, return the ones that appear
to have not continued in any form into t+1.
Parameters
----------
year: int. Year `t` that should be compared to
year t+1.
Returns
-------
pandas DataFrame tracking the type of event, the focal cluster
(e.g. the result of a merge or the cause of a split) and the
parents/children of the focal cluster (for merging and splitting,
resp.), if any.
'''
# The question: do any t+1 clusters have no t cluster they are similar to?
# Put in terms of the jaccard_scores DataFrame structure: any column for C_(t+1)
# that is all < similarity_threshold?
above_threshold_scores = (self.similarity_scores[year] >= self.similarity_threshold)
num_matching_current_clusters = (above_threshold_scores).sum(axis=1)
resulting_dead_clusters = num_matching_current_clusters[num_matching_current_clusters < 1].index.tolist()
if np.any(num_matching_current_clusters < 1):
output = pd.DataFrame({
self._c_t1_column: resulting_dead_clusters,
self._c_t2_column: np.nan,
self._event_column: self.DEATH
})
return self._format_events(output)
else:
return pd.DataFrame()
def flag_continuity_events(self, year, cluster_metadata, other_events):
'''
Given a set of C_t (cluster at time t) to C_(t+1)
similarity scores and a threshold to dictate what
clusters are similar enough to be connected to one
another through time, return the ones that appear
to have continued on as a single cluster into t+1,
but that have increased above the relative change
threshold.
Parameters
----------
year: int. Year `t` that should be compared to
year t+1.
cluster_metadata: pandas DataFrame with columns
['ClusterLabel', 'Year', 'ClusterSize'].
other_events: pandas DataFrame of split/merge/etc.
events that can be used to determine what clusters
are left and thus likely continuity events.
Returns
-------
pandas DataFrame tracking the type of event, the focal cluster
(e.g. the result of a merge or the cause of a split) and the
parents/children of the focal cluster (for merging and splitting,
resp.), if any.
'''
above_threshold_scores = (self.similarity_scores[year] >= self.similarity_threshold)
# Find clusters that qualify as very similar to one another
# Need to check that there's only one-to-one mapping from t to t+1
num_matching_t1_clusters = (above_threshold_scores).sum(axis=1)
num_matching_t2_clusters = (above_threshold_scores).sum(axis=0)
if np.any(num_matching_t1_clusters == 1) \
and np.any(num_matching_t2_clusters == 1) \
and not other_events.empty:
# There were other flagged events, so we need to skip them
# Expand cluster columns so we have 1:1 C_t to C_(t+1) mappings
events_expanded = other_events\
.explode(self._c_t1_column)\
.explode(self._c_t2_column)
# Drop any C_t that are part of another event already
num_matching_t1_clusters.drop(
labels=events_expanded[self._c_t1_column],
errors='ignore',
inplace=True
)
# No more events to investigate?
if num_matching_t1_clusters.empty:
return | pd.DataFrame() | pandas.DataFrame |
'''
MIT License
Copyright (c) 2020 Minciencia
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import requests
import utils
import pandas as pd
import datetime as dt
import numpy as np
from itertools import groupby
import time
class vacunacion:
def __init__(self,output,indicador):
self.output = output
self.indicador = indicador
self.my_files = {
'vacunacion_fabricante':
'https://raw.githubusercontent.com/IgnacioAcunaF/covid19-vaccination/master/output/chile-vaccination-type.csv',
'vacunacion_region':
'https://raw.githubusercontent.com/IgnacioAcunaF/covid19-vaccination/master/output/chile-vaccination.csv',
'vacunacion_edad':
'https://github.com/IgnacioAcunaF/covid19-vaccination/raw/master/output/chile-vaccination-ages.csv',
'vacunacion_grupo':
'https://github.com/IgnacioAcunaF/covid19-vaccination/raw/master/output/chile-vaccination-groups.csv',
}
self.path = '../input/Vacunacion'
def get_last(self):
## baja el archivo que corresponde
if self.indicador == 'fabricante':
print('Retrieving files')
print('vacunacion_fabricante')
r = requests.get(self.my_files['vacunacion_fabricante'])
content = r.content
csv_file = open(self.path + '/' + 'vacunacion_fabricante' + '.csv', 'wb')
csv_file.write(content)
csv_file.close()
elif self.indicador == 'campana':
print('Retrieving files')
print('vacunacion_region')
r = requests.get(self.my_files['vacunacion_region'])
content = r.content
csv_file = open(self.path + '/' + 'vacunacion_region' + '.csv', 'wb')
csv_file.write(content)
csv_file.close()
elif self.indicador == 'edad':
print('Retrieving files')
print('vacunacion_edad')
r = requests.get(self.my_files['vacunacion_edad'])
content = r.content
csv_file = open(self.path + '/' + 'vacunacion_edad' + '.csv', 'wb')
csv_file.write(content)
csv_file.close()
elif self.indicador == 'caracteristicas_del_vacunado':
print('Retrieving files')
print('vacunacion_grupo')
r = requests.get(self.my_files['vacunacion_grupo'])
content = r.content
csv_file = open(self.path + '/' + 'vacunacion_grupo' + '.csv', 'wb')
csv_file.write(content)
csv_file.close()
## selecciona el archivo que corresponde
if self.indicador == 'fabricante':
print('reading files')
print('vacunacion_fabricante')
self.last_added = pd.read_csv('../input/Vacunacion/vacunacion_fabricante.csv')
elif self.indicador == 'campana':
print('reading files')
print('vacunacion_region')
self.last_added = pd.read_csv('../input/Vacunacion/vacunacion_region.csv')
elif self.indicador == 'edad':
print('reading files')
print('vacunacion_edad')
self.last_added = pd.read_csv('../input/Vacunacion/vacunacion_edad.csv')
elif self.indicador == 'caracteristicas_del_vacunado':
print('reading files')
print('vacunacion_grupo')
self.last_added = pd.read_csv('../input/Vacunacion/vacunacion_grupo.csv')
elif self.indicador == 'vacunas_region':
print('reading files')
print('vacunacion por region por dia')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_1.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_1_2.csv', sep=';', encoding='ISO-8859-1')
self.last_added = pd.concat([aux, aux_2], ignore_index=True)
elif self.indicador == 'vacunas_comuna':
print('reading files')
print('vacunacion por comuna por dia')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_1.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_1_2.csv', sep=';', encoding='ISO-8859-1')
self.last_added = pd.concat([aux, aux_2], ignore_index=True)
elif self.indicador == 'vacunas_edad_region':
print('reading files')
print('vacunacion por region por edad')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_2.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_2_2.csv', sep=';', encoding='ISO-8859-1')
self.last_added = pd.concat([aux, aux_2], ignore_index=True)
elif self.indicador == 'vacunas_edad_sexo':
print('reading files')
print('vacunacion por sexo por edad')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_3.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_3_2.csv', sep=';', encoding='ISO-8859-1')
self.last_added = pd.concat([aux, aux_2], ignore_index=True)
print('vacunacion por sexo por edad y FECHA')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_6.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_6_2.csv', sep=';', encoding='ISO-8859-1')
self.last_edad_fecha = pd.concat([aux, aux_2], ignore_index=True)
elif self.indicador == 'vacunas_prioridad':
print('reading files')
print('vacunacion por grupos prioritarios')
self.last_added = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_8.csv', sep=';', encoding='ISO-8859-1')
# aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_8_2.csv', sep=';', encoding='ISO-8859-1')
# self.last_added = pd.concat([aux, aux_2], ignore_index=True)
elif self.indicador == 'vacunas_comuna_edad':
print('reading files')
print('vacunacion por comuna por edad')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_2.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_2_2.csv', sep=';', encoding='ISO-8859-1')
self.last_added = pd.concat([aux, aux_2], ignore_index=True)
elif self.indicador == 'vacunas_establecimiento':
print('reading files')
print('vacunacion por establecimiento')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_7.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_7_2.csv', sep=';', encoding='ISO-8859-1')
self.last_added = pd.concat([aux, aux_2], ignore_index=True)
elif self.indicador == 'vacunas_fabricante':
print('reading files')
print('vacunacion por fabricante y fecha')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_7.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_7_2.csv', sep=';', encoding='ISO-8859-1')
self.last_added = pd.concat([aux, aux_2], ignore_index=True)
elif self.indicador == 'vacunas_fabricante_edad':
print('reading files')
print('vacunacion por fabricante y edad')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_9.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_9_2.csv', sep=';', encoding='ISO-8859-1')
self.last_added = pd.concat([aux, aux_2], ignore_index=True)
def last_to_csv(self):
if self.indicador == 'fabricante':
## campana por fabricante
self.last_added.rename(columns={'Dose': 'Dosis'}, inplace=True)
self.last_added.rename(columns={'Type': 'Fabricante'}, inplace=True)
self.last_added["Dosis"] = self.last_added["Dosis"].replace({"First": "Primera",
"Second": "Segunda",
"Third": "Tercera",
"Fourth": "Cuarta",
"Unique": "Unica"
})
identifiers = ['Fabricante', 'Dosis']
variables = [x for x in self.last_added.columns if x not in identifiers]
self.last_added = self.last_added[identifiers + variables]
self.last_added.to_csv(self.output + '.csv', index=False)
df_t = self.last_added.T
df_t.to_csv(self.output + '_t.csv', header=False)
df_std = pd.melt(self.last_added, id_vars=identifiers, value_vars=variables, var_name=['Fecha'],
value_name='Cantidad')
df_std.to_csv(self.output + '_std.csv', index=False)
elif self.indicador == 'campana':
## campana por region
self.last_added.rename(columns={'Dose': 'Dosis'}, inplace=True)
utils.regionName(self.last_added)
self.last_added["Dosis"] = self.last_added["Dosis"].replace({"First": "Primera",
"Second": "Segunda",
"Third": "Tercera",
"Fourth": "Cuarta",
"Unique": "Unica"
})
identifiers = ['Region', 'Dosis']
variables = [x for x in self.last_added.columns if x not in identifiers]
self.last_added = self.last_added[identifiers + variables]
self.last_added.to_csv(self.output + '.csv', index=False)
df_t = self.last_added.T
df_t.to_csv(self.output + '_t.csv', header=False)
df_std = pd.melt(self.last_added, id_vars=identifiers, value_vars=variables, var_name=['Fecha'],
value_name='Cantidad')
df_std.to_csv(self.output + '_std.csv', index=False)
elif self.indicador == 'edad':
## campana por edad
self.last_added.rename(columns={'Dose': 'Dosis',
'Age':'Rango_etario'}, inplace=True)
self.last_added["Dosis"] = self.last_added["Dosis"].replace({"First": "Primera",
"Second": "Segunda"
})
identifiers = ['Rango_etario', 'Dosis']
variables = [x for x in self.last_added.columns if x not in identifiers]
self.last_added = self.last_added[identifiers + variables]
self.last_added.to_csv(self.output + '.csv', index=False)
df_t = self.last_added.T
df_t.to_csv(self.output + '_t.csv', header=False)
df_std = pd.melt(self.last_added, id_vars=identifiers, value_vars=variables, var_name=['Fecha'],
value_name='Cantidad')
df_std.to_csv(self.output + '_std.csv', index=False)
elif self.indicador == 'caracteristicas_del_vacunado':
## campana por caracter del vacunado
self.last_added.rename(columns={'Dose': 'Dosis',
'Group':'Grupo'}, inplace=True)
self.last_added["Dosis"] = self.last_added["Dosis"].replace({"First": "Primera",
"Second": "Segunda"
})
identifiers = ['Grupo', 'Dosis']
variables = [x for x in self.last_added.columns if x not in identifiers]
self.last_added = self.last_added[identifiers + variables]
self.last_added.to_csv(self.output + '.csv', index=False)
df_t = self.last_added.T
df_t.to_csv(self.output + '_t.csv', header=False)
df_std = pd.melt(self.last_added, id_vars=identifiers, value_vars=variables, var_name=['Fecha'],
value_name='Cantidad')
df_std.to_csv(self.output + '_std.csv', index=False)
elif self.indicador == 'vacunas_region':
self.last_added.rename(columns={'REGION_CORTO': 'Region',
'COD_COMUNA_FINAL': 'Comuna',
'FECHA_INMUNIZACION': 'Fecha',
'SUM_of_SUM_of_2aDOSIS': 'Segunda_comuna',
'SUM_of_SUM_of_1aDOSIS': 'Primera_comuna',
'SUM_of_SUM_of_ÚnicaDOSIS':'Unica_comuna',
'SUM_of_4_Dosis':'Cuarta_comuna',
'SUM_of_Refuerzo_DOSIS':'Refuerzo_comuna'}, inplace=True)
self.last_added = self.last_added.dropna(subset=['Fecha'])
self.last_added['Fecha'] = pd.to_datetime(self.last_added['Fecha'],format='%d/%m/%Y').dt.strftime("%Y-%m-%d")
self.last_added.sort_values(by=['Region','Fecha'], inplace=True)
utils.regionName(self.last_added)
regiones = pd.DataFrame(self.last_added['Region'].unique())
#transformar
## agrupar por comuna
self.last_added['Primera'] = self.last_added.groupby(['Region','Fecha'])['Primera_comuna'].transform('sum')
self.last_added['Segunda'] = self.last_added.groupby(['Region','Fecha'])['Segunda_comuna'].transform('sum')
self.last_added['Unica'] = self.last_added.groupby(['Region', 'Fecha'])['Unica_comuna'].transform('sum')
self.last_added['Refuerzo'] = self.last_added.groupby(['Region', 'Fecha'])['Refuerzo_comuna'].transform('sum')
self.last_added['Cuarta'] = self.last_added.groupby(['Region', 'Fecha'])['Cuarta_comuna'].transform(
'sum')
self.last_added = self.last_added[['Region','Fecha','Primera','Segunda','Unica','Refuerzo','Cuarta']]
self.last_added.drop_duplicates(inplace=True)
##llenar fechas para cada region y crear total
idx = pd.date_range(self.last_added['Fecha'].min(), self.last_added['Fecha'].max())
df = pd.DataFrame()
total = pd.DataFrame(columns=['Region','Fecha','Primera','Segunda','Unica','Refuerzo','Cuarta'])
total = utils.fill_in_missing_dates(total, 'Fecha', 0, idx)
total["Region"] = total["Region"].replace({0: 'Total'})
for region in regiones[0]:
df_region = self.last_added.loc[self.last_added['Region'] == region]
df_region = utils.fill_in_missing_dates(df_region,'Fecha',0,idx)
df_region["Region"] = df_region["Region"].replace({0:region})
total['Primera'] = df_region['Primera'] + total['Primera']
total['Segunda'] = df_region['Segunda'] + total['Segunda']
total['Unica'] = df_region['Unica'] + total['Unica']
total['Refuerzo'] = df_region['Refuerzo'] + total ['Refuerzo']
total['Cuarta'] = df_region['Cuarta'] + total['Cuarta']
df = df.append(df_region, ignore_index=True)
total = total.append(df,ignore_index=True)
total['Fecha'] = total['Fecha'].dt.strftime("%Y-%m-%d")
self.last_added = total
##sumar totales
self.last_added['Primera'] = pd.to_numeric(self.last_added['Primera'])
self.last_added['Segunda'] = pd.to_numeric(self.last_added['Segunda'])
self.last_added['Unica'] = pd.to_numeric(self.last_added['Unica'])
self.last_added['Refuerzo'] = pd.to_numeric(self.last_added['Refuerzo'])
self.last_added['Cuarta'] = pd.to_numeric(self.last_added['Cuarta'])
self.last_added['Primera'] = self.last_added.groupby(['Region'])['Primera'].transform('cumsum')
self.last_added['Segunda'] = self.last_added.groupby(['Region'])['Segunda'].transform('cumsum')
self.last_added['Unica'] = self.last_added.groupby(['Region'])['Unica'].transform('cumsum')
self.last_added['Refuerzo'] = self.last_added.groupby(['Region'])['Refuerzo'].transform('cumsum')
self.last_added['Cuarta'] = self.last_added.groupby(['Region'])['Cuarta'].transform('cumsum')
self.last_added['Total'] = self.last_added.sum(numeric_only=True, axis=1)
##transformar en input
df = pd.DataFrame()
regiones = pd.DataFrame(self.last_added['Region'].unique())
for region in regiones[0]:
df_region = self.last_added.loc[self.last_added['Region'] == region]
df_region.set_index('Fecha',inplace=True)
df_region = df_region[['Primera','Segunda','Unica','Refuerzo','Cuarta']].T
df_region.reset_index(drop=True, inplace=True)
df = df.append(df_region, ignore_index=True)
new_col = ['Primera', 'Segunda','Unica','Refuerzo','Cuarta','Primera', 'Segunda','Unica','Refuerzo','Cuarta',
'Primera', 'Segunda','Unica','Refuerzo','Cuarta','Primera', 'Segunda','Unica','Refuerzo','Cuarta',
'Primera', 'Segunda','Unica','Refuerzo','Cuarta','Primera', 'Segunda','Unica','Refuerzo','Cuarta',
'Primera', 'Segunda','Unica','Refuerzo','Cuarta','Primera', 'Segunda','Unica','Refuerzo','Cuarta',
'Primera', 'Segunda','Unica','Refuerzo','Cuarta','Primera', 'Segunda','Unica','Refuerzo','Cuarta',
'Primera', 'Segunda','Unica','Refuerzo','Cuarta','Primera', 'Segunda','Unica','Refuerzo','Cuarta',
'Primera', 'Segunda','Unica','Refuerzo','Cuarta','Primera', 'Segunda','Unica','Refuerzo','Cuarta',
'Primera', 'Segunda','Unica','Refuerzo','Cuarta','Primera', 'Segunda','Unica','Refuerzo','Cuarta',
'Primera', 'Segunda','Unica','Refuerzo','Cuarta']
df.insert(0, column='Dosis', value=new_col)
new_col = pd.DataFrame()
for region in regiones[0]:
col = [region,region,region,region,region]
new_col = new_col.append(col, ignore_index=True)
df.insert(0, column='Region', value=new_col)
self.last_added = df
identifiers = ['Region', 'Dosis']
variables = [x for x in self.last_added.columns if x not in identifiers]
self.last_added = self.last_added[identifiers + variables]
self.last_added.to_csv(self.output + '.csv', index=False)
df_t = self.last_added.T
df_t.to_csv(self.output + '_t.csv', header=False)
df_std = pd.melt(self.last_added, id_vars=identifiers, value_vars=variables, var_name=['Fecha'],
value_name='Cantidad')
df_std.to_csv(self.output + '_std.csv', index=False)
df_std.to_json(self.output + '.json',orient='values',force_ascii=False)
elif self.indicador == 'vacunas_edad_region':
self.last_added.rename(columns={'NOMBRE_REGION': 'Region',
'COD_COMUNA': 'Comuna',
'EDAD_ANOS': 'Edad',
'POBLACION':'Poblacion',
'2aDOSIS_RES': 'Segunda_comuna',
'1aDOSIS_RES': 'Primera_comuna',
'4aDOSIS':'Cuarta_comuna',
'Refuerzo_DOSIS':'Refuerzo_comuna',
'ÚnicaDOSIS':'Unica_comuna'}, inplace=True)
self.last_added.sort_values(by=['Region', 'Edad'], inplace=True)
utils.regionName(self.last_added)
regiones = pd.DataFrame(self.last_added['Region'].unique())
# transformar
## agrupar por comuna
self.last_added['Primera'] = self.last_added.groupby(['Region', 'Edad'])['Primera_comuna'].transform('sum')
self.last_added['Segunda'] = self.last_added.groupby(['Region', 'Edad'])['Segunda_comuna'].transform('sum')
self.last_added['Unica'] = self.last_added.groupby(['Region', 'Edad'])['Unica_comuna'].transform('sum')
self.last_added['Refuerzo'] = self.last_added.groupby(['Region', 'Edad'])['Refuerzo_comuna'].transform('sum')
self.last_added['Cuarta'] = self.last_added.groupby(['Region', 'Edad'])['Cuarta_comuna'].transform('sum')
self.last_added['Poblacion'] = self.last_added.groupby(['Region','Edad'])['Poblacion'].transform('sum')
self.last_added = self.last_added[['Region', 'Edad', 'Poblacion','Primera', 'Segunda','Unica','Refuerzo','Cuarta']]
self.last_added.drop_duplicates(inplace=True)
##crear total
df = pd.DataFrame()
total = pd.DataFrame(columns=['Region', 'Edad','Poblacion','Primera', 'Segunda','Unica','Refuerzo','Cuarta'])
total['Edad'] = list(range(15, 81))
total["Region"] = total["Region"].fillna('Total')
for region in regiones[0]:
df_region = self.last_added.loc[self.last_added['Region'] == region]
df_region.reset_index(drop=True, inplace=True)
total['Primera'] = total.Primera.fillna(0) + df_region.Primera.fillna(0)
total['Segunda'] = total.Segunda.fillna(0) + df_region.Segunda.fillna(0)
total['Unica'] = total.Unica.fillna(0) + df_region.Unica.fillna(0)
total['Refuerzo'] = total.Refuerzo.fillna(0) + df_region.Refuerzo.fillna(0)
total['Cuarta'] = total.Cuarta.fillna(0) + df_region.Cuarta.fillna(0)
total['Poblacion'] = total.Poblacion.fillna(0) + df_region.Poblacion.fillna(0)
df = df.append(df_region, ignore_index=True)
total = total.append(df, ignore_index=True)
self.last_added = total
##transformar en input
df = pd.DataFrame()
regiones = pd.DataFrame(self.last_added['Region'].unique())
for region in regiones[0]:
df_region = self.last_added.loc[self.last_added['Region'] == region]
df_region.set_index('Edad', inplace=True)
df_region = df_region[['Primera', 'Segunda','Unica','Refuerzo','Cuarta']].T
df_region.reset_index(drop=True, inplace=True)
df = df.append(df_region, ignore_index=True)
new_col = ['Primera', 'Segunda', 'Unica','Refuerzo','Cuarta','Primera', 'Segunda', 'Unica','Refuerzo','Cuarta', 'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta', 'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta',
'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta', 'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta', 'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta', 'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta',
'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta', 'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta', 'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta', 'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta',
'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta', 'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta', 'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta', 'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta',
'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta', 'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta']
df.insert(0, column='Dosis', value=new_col)
new_col = pd.DataFrame()
for region in regiones[0]:
col = [region, region,region]
new_col = new_col.append(col, ignore_index=True)
df.insert(0, column='Region', value=new_col)
self.last_added = df
identifiers = ['Region','Dosis']
variables = [x for x in self.last_added.columns if x not in identifiers]
self.last_added = self.last_added[identifiers + variables]
self.last_added.to_csv(self.output + '.csv', index=False)
df_t = self.last_added.T
df_t.to_csv(self.output + '_t.csv', header=False)
df_std = pd.melt(self.last_added, id_vars=identifiers, value_vars=variables, var_name=['Edad'],
value_name='Cantidad')
df_std.to_csv(self.output + '_std.csv', index=False)
df_std.to_json(self.output + '.json',orient='values',force_ascii=False)
elif self.indicador == 'vacunas_edad_sexo':
#Por región, totales
self.last_added.rename(columns={'NOMBRE_REGION': 'Region',
'SEXO1': 'Sexo',
'EDAD_ANOS': 'Edad',
'POBLACION':'Poblacion',
'SUM_of_1aDOSIS': 'Primera',
'SUM_of_2aDOSIS': 'Segunda',
'SUM_of_ÚnicaDOSIS':'Unica',
'SUM_of_Refuerzo_DOSIS':'Refuerzo',
'SUM_of_4_Dosis':'Cuarta'}, inplace=True)
self.last_added.sort_values(by=['Sexo','Edad'], inplace=True)
self.last_added = self.last_added[['Sexo','Edad','Primera','Segunda','Unica','Refuerzo','Cuarta']]
sexo = pd.DataFrame(self.last_added['Sexo'].unique())
##crear total
df = pd.DataFrame()
for sex in sexo[0]:
total = pd.DataFrame(columns=['Sexo', 'Edad', 'Primera', 'Segunda','Unica','Refuerzo','Cuarta'])
total['Edad'] = list(range(self.last_added.Edad.min(), self.last_added.Edad.max() + 1))
df_sex = self.last_added.loc[self.last_added['Sexo'] == sex]
df_sex.reset_index(drop=True, inplace=True)
df_sex.index = df_sex['Edad']
total.index = total['Edad']
total['Sexo'] = total.Sexo.fillna(sex)
total['Primera'] = total.Primera.fillna(0) + df_sex.Primera.fillna(0)
total['Segunda'] = total.Segunda.fillna(0) + df_sex.Segunda.fillna(0)
total['Unica'] = total.Unica.fillna(0) + df_sex.Unica.fillna(0)
total['Refuerzo'] = total.Refuerzo.fillna(0) + df_sex.Refuerzo.fillna(0)
total['Cuarta'] = total.Cuarta.fillna(0) + df_sex.Cuarta.fillna(0)
df = df.append(total, ignore_index=True)
self.last_added = df
##transformar en input
df = pd.DataFrame()
sexo = pd.DataFrame(self.last_added['Sexo'].unique())
for sex in sexo[0]:
df_sex = self.last_added.loc[self.last_added['Sexo'] == sex]
df_sex.set_index('Edad', inplace=True)
df_sex = df_sex[['Primera', 'Segunda','Unica','Refuerzo','Cuarta']].T
df_sex.reset_index(drop=True, inplace=True)
df = df.append(df_sex, ignore_index=True)
new_col = ['Primera', 'Segunda','Unica','Refuerzo','Cuarta', 'Primera', 'Segunda','Unica','Refuerzo','Cuarta', 'Primera', 'Segunda','Unica','Refuerzo','Cuarta', 'Primera', 'Segunda','Unica','Refuerzo','Cuarta']
df.insert(0, column='Dosis', value=new_col)
new_col = pd.DataFrame()
for sex in sexo[0]:
col = [sex, sex,sex]
new_col = new_col.append(col, ignore_index=True)
df.insert(0, column='Sexo', value=new_col)
self.last_added = df
identifiers = ['Sexo','Dosis']
variables = [x for x in self.last_added.columns if x not in identifiers]
self.last_added = self.last_added[identifiers + variables]
self.last_added.to_csv(self.output + '.csv', index=False)
df_t = self.last_added.T
df_t.to_csv(self.output + '_t.csv', header=False)
df_std = pd.melt(self.last_added, id_vars=identifiers, value_vars=variables, var_name=['Edad'],
value_name='Cantidad')
df_std.to_csv(self.output + '_std.csv', index=False)
df_std.to_json(self.output + '.json', orient='values', force_ascii=False)
# Por fecha, totales
self.last_edad_fecha.rename(columns={'FECHA_INMUNIZACION': 'Fecha',
'EDAD_ANOS': 'Edad',
'SUM_of_1aDOSIS': 'Primera',
'SUM_of_2aDOSIS': 'Segunda',
'SUM_of_SUM_of_ÚnicaDOSIS': 'Unica',
'SUM_of_Refuerzo_DOSIS':'Refuerzo',
'SUM_of_4aDOSIS':'Cuarta'}, inplace=True)
self.last_edad_fecha['Fecha'] = pd.to_datetime(self.last_edad_fecha['Fecha'], format='%d/%m/%Y').dt.strftime("%Y-%m-%d")
self.last_edad_fecha.sort_values(by=['Fecha', 'Edad'], inplace=True)
self.last_edad_fecha.reset_index(drop=True,inplace=True)
self.last_edad_fecha.dropna(subset=['Fecha'],inplace=True)
columns_name = self.last_edad_fecha.columns.values
maxSE = self.last_edad_fecha[columns_name[0]].max()
minSE = self.last_edad_fecha[columns_name[0]].min()
#print(minSE, maxSE)
lenSE = (pd.to_datetime(maxSE) - pd.to_datetime(minSE)).days + 1
startdate = pd.to_datetime(minSE)
date_list = pd.date_range(startdate, periods=lenSE).tolist()
date_list = [dt.datetime.strftime(x, "%Y-%m-%d") for x in date_list]
#print(date_list)
self.last_edad_fecha['Total'] = self.last_edad_fecha['Primera'].fillna(0) + self.last_edad_fecha['Segunda'].fillna(0) + self.last_edad_fecha['Unica'].fillna(0) + self.last_edad_fecha['Refuerzo'].fillna(0) + self.last_edad_fecha['Cuarta'].fillna(0)
for k in [2, 3, 4,5,6,7]:
edades = self.last_edad_fecha[columns_name[1]].unique()
edades = edades[~np.isnan(edades)]
edades = np.sort(edades)
df = pd.DataFrame(np.zeros((len(edades), lenSE)))
df.insert(0, 'Edad', edades)
df.set_index('Edad',inplace=True)
dicts = {}
keys = range(lenSE)
for i in keys:
dicts[i] = date_list[i]
df.rename(columns=dicts, inplace=True)
for index, row in self.last_edad_fecha.iterrows():
df[row['Fecha']][row['Edad']] = row[k]
df.reset_index(inplace=True)
if k == 2:
name = '../output/producto78/vacunados_edad_fecha' + '_1eraDosis.csv'
df.to_csv(name, index=False)
dft = df.T
dft.to_csv(name.replace('.csv', '_T.csv'), header=False)
identifiers = ['Edad']
variables = [x for x in df.columns if x not in identifiers]
outputDF2_std = pd.melt(df, id_vars=identifiers, value_vars=variables, var_name='Fecha',
value_name='Primera Dosis')
outputDF2_std.to_csv(name.replace('.csv', '_std.csv'), index=False)
if k == 3:
name = '../output/producto78/vacunados_edad_fecha' + '_2daDosis.csv'
df.to_csv(name, index=False)
dft = df.T
dft.to_csv(name.replace('.csv', '_T.csv'), header=False)
identifiers = ['Edad']
variables = [x for x in df.columns if x not in identifiers]
outputDF2_std = pd.melt(df, id_vars=identifiers, value_vars=variables, var_name='Fecha',
value_name='Segunda Dosis')
outputDF2_std.to_csv(name.replace('.csv', '_std.csv'), index=False)
if k == 4:
name = '../output/producto78/vacunados_edad_fecha' + '_UnicaDosis.csv'
df.to_csv(name, index=False)
dft = df.T
dft.to_csv(name.replace('.csv', '_T.csv'), header=False)
identifiers = ['Edad']
variables = [x for x in df.columns if x not in identifiers]
outputDF2_std = pd.melt(df, id_vars=identifiers, value_vars=variables, var_name='Fecha',
value_name='Unica Dosis')
outputDF2_std.to_csv(name.replace('.csv', '_std.csv'), index=False)
if k == 5:
name = '../output/producto78/vacunados_edad_fecha' + '_Refuerzo.csv'
df.to_csv(name, index=False)
dft = df.T
dft.to_csv(name.replace('.csv', '_T.csv'), header=False)
identifiers = ['Edad']
variables = [x for x in df.columns if x not in identifiers]
outputDF2_std = pd.melt(df, id_vars=identifiers, value_vars=variables, var_name='Fecha',
value_name='Dosis Refuerzo')
outputDF2_std.to_csv(name.replace('.csv', '_std.csv'), index=False)
if k == 6:
name = '../output/producto78/vacunados_edad_fecha' + '_Cuarta.csv'
df.to_csv(name, index=False)
dft = df.T
dft.to_csv(name.replace('.csv', '_T.csv'), header=False)
identifiers = ['Edad']
variables = [x for x in df.columns if x not in identifiers]
outputDF2_std = pd.melt(df, id_vars=identifiers, value_vars=variables, var_name='Fecha',
value_name='Cuarta Dosis')
outputDF2_std.to_csv(name.replace('.csv', '_std.csv'), index=False)
if k == 7:
name = '../output/producto78/vacunados_edad_fecha' + '_total.csv'
df.to_csv(name, index=False)
dft = df.T
dft.to_csv(name.replace('.csv', '_T.csv'), header=False)
identifiers = ['Edad']
variables = [x for x in df.columns if x not in identifiers]
outputDF2_std = pd.melt(df, id_vars=identifiers, value_vars=variables, var_name='Fecha',
value_name='Total vacunados')
outputDF2_std.to_csv(name.replace('.csv', '_std.csv'), index=False)
elif self.indicador == 'vacunas_prioridad':
self.last_added.rename(columns={'CRITERIO': 'Grupo',
'SUB_CRITERIO': 'Subgrupo',
'1aDOSIS1': 'Primera',
'2aDOSIS1': 'Segunda'}, inplace=True)
self.last_added.sort_values(by=['Grupo', 'Subgrupo'], inplace=True)
self.last_added = self.last_added[['Grupo', 'Subgrupo', 'Primera', 'Segunda']]
self.last_added['Primera'] = self.last_added.groupby(['Grupo', 'Subgrupo'])['Primera'].transform('sum')
self.last_added['Segunda'] = self.last_added.groupby(['Grupo', 'Subgrupo'])['Segunda'].transform('sum')
self.last_added = self.last_added[['Grupo', 'Subgrupo', 'Primera', 'Segunda']]
self.last_added.drop_duplicates(inplace=True)
##transformar en input
df = pd.DataFrame()
grupos = pd.DataFrame(self.last_added['Grupo'].unique())
for grupo in grupos[0]:
df_grupo = self.last_added.loc[self.last_added['Grupo'] == grupo]
df_grupo.set_index('Subgrupo', inplace=True)
df_grupo = df_grupo[['Primera', 'Segunda']].T
df_grupo.reset_index(drop=True, inplace=True)
df = df.append(df_grupo, ignore_index=True)
new_col = ['Primera', 'Segunda', 'Primera', 'Segunda', 'Primera', 'Segunda', 'Primera', 'Segunda',
'Primera', 'Segunda', 'Primera', 'Segunda']
df.insert(0, column='Dosis', value=new_col)
new_col = pd.DataFrame()
for grupo in grupos[0]:
col = [grupo, grupo]
new_col = new_col.append(col, ignore_index=True)
df.insert(0, column='Grupo', value=new_col)
self.last_added = df
identifiers = ['Grupo', 'Dosis']
variables = [x for x in self.last_added.columns if x not in identifiers]
self.last_added = self.last_added[identifiers + variables]
self.last_added.to_csv(self.output + '.csv', index=False)
df_t = self.last_added.T
df_t.to_csv(self.output + '_t.csv', header=False)
df_std = pd.melt(self.last_added, id_vars=identifiers, value_vars=variables, var_name=['Subgrupo'],
value_name='Cantidad')
df_std.to_csv(self.output + '_std.csv', index=False)
df_std.to_json(self.output + '.json',orient='values',force_ascii=False)
elif self.indicador == 'vacunas_comuna':
##template por comuna
df_base = pd.read_csv('../input/DistribucionDEIS/baseFiles/DEIS_template.csv')
df_base['Codigo region'] = df_base['Codigo region'].fillna(0)
df_base['Codigo comuna'] = df_base['Codigo comuna'].fillna(0)
df_base['Comuna'] = df_base['Comuna'].fillna(0)
todrop = df_base.loc[df_base['Comuna'] == 0]
df_base.drop(todrop.index, inplace=True)
df_base['Codigo region'] = df_base['Codigo region'].astype(int)
df_base['Codigo comuna'] = df_base['Codigo comuna'].astype(int)
desconocido = df_base['Codigo comuna'] != 0
df_base['Codigo comuna'].where(desconocido, '', inplace=True)
Comp = df_base.loc[df_base['Comuna'] != 'Total']
Comp.reset_index(inplace=True)
utils.desconocidoName(Comp)
# for k in range(len(Comp)):
# if Comp.loc[k, 'Codigo region'] < 10:
# Comp.loc[k, 'Codigo region'] = '0' + str(Comp.loc[k, 'Codigo region'])
# else:
# Comp.loc[k, 'Codigo region'] = str(Comp.loc[k, 'Codigo region'])
#
# if Comp.loc[k, 'Codigo comuna'] != '':
# if Comp.loc[k, 'Codigo comuna'] < 10000:
# Comp.loc[k, 'Codigo comuna'] = '0' + str(Comp.loc[k, 'Codigo comuna'])
# else:
# Comp.loc[k, 'Codigo comuna'] = str(Comp.loc[k, 'Codigo comuna'])
comuna = Comp['Comuna']
self.last_added.rename(columns={'REGION_CORTO': 'region_residencia',
'COD_COMUNA_FINAL': 'Codigo comuna',
'FECHA_INMUNIZACION': 'Fecha',
'SUM_of_SUM_of_2aDOSIS': 'Segunda_comuna',
'SUM_of_SUM_of_1aDOSIS': 'Primera_comuna',
'SUM_of_SUM_of_ÚnicaDOSIS':'Unica_comuna',
'SUM_of_Refuerzo_DOSIS':'Refuerzo_comuna',
'SUM_of_4_Dosis':'Cuarta_comuna'}, inplace=True)
self.last_added = self.last_added.dropna(subset=['Fecha'])
self.last_added['Fecha'] = pd.to_datetime(self.last_added['Fecha'],format='%d/%m/%Y').dt.strftime("%Y-%m-%d")
self.last_added.sort_values(by=['region_residencia','Fecha'], inplace=True)
self.last_added.reset_index(drop=True, inplace=True)
utils.regionDEISName(self.last_added)
# for k in self.last_added.loc[self.last_added['Codigo comuna'] < 10000].index:
# self.last_added.loc[k, 'Codigo comuna'] = '0' + str(self.last_added.loc[k, 'Codigo comuna'])
df_sup = Comp[['Codigo comuna', 'Comuna']]
df_sup['Codigo comuna'] = df_sup['Codigo comuna'].replace('', 0)
self.last_added = self.last_added.merge(df_sup, on="Codigo comuna", how="left")
self.last_added.set_index('Comuna', inplace=True)
columns_name = self.last_added.columns.values
maxSE = self.last_added[columns_name[2]].max()
minSE = self.last_added[columns_name[2]].min()
#print(minSE, maxSE)
lenSE = (pd.to_datetime(maxSE) - pd.to_datetime(minSE)).days + 1
startdate = pd.to_datetime(minSE)
date_list = pd.date_range(startdate, periods=lenSE).tolist()
date_list = [dt.datetime.strftime(x, "%Y-%m-%d") for x in date_list]
#print(date_list)
SE_comuna = self.last_added[columns_name[2]]
def edad2rango(df, comuna):
cols = df.columns.tolist()
df2 = pd.DataFrame(columns=cols)
p = 0
for row in comuna:
aux = df.loc[df.index == row]
aux2 = aux.groupby(['Fecha']).sum()
aux2['Comuna'] = row
aux2.set_index(['Comuna'], inplace=True)
identifiers = ['region_residencia', 'Codigo comuna', 'Fecha']
temp = aux[identifiers].copy()
temp.drop_duplicates(keep='first', inplace=True)
temp2 = pd.concat([temp, aux2], axis=1)
if p == 0:
df2 = temp2
p += 1
else:
df2 = pd.concat([df2, temp2], axis=0)
return df2
dfv = edad2rango(self.last_added, comuna)
for k in [3,4,5,6,7]:
df = pd.DataFrame(np.zeros((len(comuna), lenSE)))
dicts = {}
keys = range(lenSE)
# values = [i for i in range(lenSE)]
for i in keys:
dicts[i] = date_list[i]
df.rename(columns=dicts, inplace=True)
value_comuna = dfv[columns_name[k]]
value_comuna.fillna(0,inplace=True)
SE_comuna = dfv['Fecha'].copy()
i=0
for row in dfv.index:
idx = comuna.loc[comuna == row].index.values
if idx.size > 0:
col = SE_comuna[i]
df[col][idx] = value_comuna[i].astype(int)
i += 1
df_output = pd.concat([Comp, df], axis=1)
df_output.drop(columns=['index'], axis=1, inplace=True)
nComunas = [len(list(group)) for key, group in groupby(df_output['Codigo region'])]
identifiers = ['Region', 'Codigo region', 'Comuna', 'Codigo comuna']
variables = [x for x in df_output.columns if x not in identifiers]
begRow = 0
for i in range(len(nComunas)):
endRow = begRow + nComunas[i]
firstList = df_output[identifiers].iloc[endRow - 1].values.tolist()
firstList[2] = 'Total'
firstList[3] = ''
valuesTotal = df_output[variables][begRow:endRow].sum(axis=0).tolist()
regionTotal = pd.DataFrame((firstList + valuesTotal), index=df_output.columns.values).transpose()
if i < len(nComunas) - 1:
blank_line = pd.Series(np.empty((len(regionTotal), 0)).tolist())
regionTotal = pd.concat([regionTotal, blank_line], axis=0)
regionTotal.drop(columns=0, axis=1, inplace=True)
temp = pd.concat([df_output.iloc[begRow:endRow], regionTotal], axis=0)
if i == 0:
outputDF2 = temp
else:
outputDF2 = pd.concat([outputDF2, temp], axis=0)
if i < len(nComunas) - 1:
begRow = endRow
outputDF2.reset_index(inplace=True)
outputDF2.drop(columns=['index'], axis=1, inplace=True)
outputDF2[variables] = outputDF2[variables].dropna() # .astype(int)
#print(outputDF2.head(20))
outputDF2.dropna(how='all', inplace=True)
todrop = outputDF2.loc[outputDF2['Comuna'] == 'Total']
outputDF2.drop(todrop.index, inplace=True)
if k == 3:
name = self.output + '_1eraDosis.csv'
outputDF2.to_csv(name, index=False)
outputDF2_T = outputDF2.T
outputDF2_T.to_csv(name.replace('.csv', '_T.csv'), header=False)
identifiers = ['Region', 'Codigo region', 'Comuna', 'Codigo comuna']
outputDF2.drop(columns=['Poblacion'],inplace=True)
variables = [x for x in outputDF2.columns if x not in identifiers]
outputDF2_std = pd.melt(outputDF2, id_vars=identifiers, value_vars=variables, var_name='Fecha',
value_name='Primera Dosis')
outputDF2_std.to_csv(name.replace('.csv', '_std.csv'), index=False)
elif k == 4:
name = self.output +'_2daDosis.csv'
outputDF2.to_csv(name, index=False)
outputDF2_T = outputDF2.T
outputDF2_T.to_csv(name.replace('.csv', '_T.csv'), header=False)
identifiers = ['Region', 'Codigo region', 'Comuna', 'Codigo comuna']
outputDF2.drop(columns=['Poblacion'], inplace=True)
variables = [x for x in outputDF2.columns if x not in identifiers]
outputDF2_std = pd.melt(outputDF2, id_vars=identifiers, value_vars=variables, var_name='Fecha',
value_name='Segunda Dosis')
outputDF2_std.to_csv(name.replace('.csv', '_std.csv'), index=False)
elif k == 5:
name = self.output +'_UnicaDosis.csv'
outputDF2.to_csv(name, index=False)
outputDF2_T = outputDF2.T
outputDF2_T.to_csv(name.replace('.csv', '_T.csv'), header=False)
identifiers = ['Region', 'Codigo region', 'Comuna', 'Codigo comuna']
outputDF2.drop(columns=['Poblacion'], inplace=True)
variables = [x for x in outputDF2.columns if x not in identifiers]
outputDF2_std = pd.melt(outputDF2, id_vars=identifiers, value_vars=variables, var_name='Fecha',
value_name='Unica Dosis')
outputDF2_std.to_csv(name.replace('.csv', '_std.csv'), index=False)
elif k == 6:
name = self.output +'_Refuerzo.csv'
outputDF2.to_csv(name, index=False)
outputDF2_T = outputDF2.T
outputDF2_T.to_csv(name.replace('.csv', '_T.csv'), header=False)
identifiers = ['Region', 'Codigo region', 'Comuna', 'Codigo comuna']
outputDF2.drop(columns=['Poblacion'], inplace=True)
variables = [x for x in outputDF2.columns if x not in identifiers]
outputDF2_std = pd.melt(outputDF2, id_vars=identifiers, value_vars=variables, var_name='Fecha',
value_name='Dosis Refuerzo')
outputDF2_std.to_csv(name.replace('.csv', '_std.csv'), index=False)
elif k == 7:
name = self.output + '_4taDosis.csv'
outputDF2.to_csv(name, index=False)
outputDF2_T = outputDF2.T
outputDF2_T.to_csv(name.replace('.csv', '_T.csv'), header=False)
identifiers = ['Region', 'Codigo region', 'Comuna', 'Codigo comuna']
outputDF2.drop(columns=['Poblacion'], inplace=True)
variables = [x for x in outputDF2.columns if x not in identifiers]
outputDF2_std = pd.melt(outputDF2, id_vars=identifiers, value_vars=variables, var_name='Fecha',
value_name='<NAME>')
outputDF2_std.to_csv(name.replace('.csv', '_std.csv'), index=False)
elif self.indicador == 'vacunas_comuna_edad':
##template por comuna
df_base = pd.read_csv('../input/DistribucionDEIS/baseFiles/DEIS_template.csv')
df_base['Codigo region'] = df_base['Codigo region'].fillna(0)
df_base['Codigo comuna'] = df_base['Codigo comuna'].fillna(0)
df_base['Comuna'] = df_base['Comuna'].fillna(0)
todrop = df_base.loc[df_base['Comuna'] == 0]
df_base.drop(todrop.index, inplace=True)
df_base['Codigo region'] = df_base['Codigo region'].astype(int)
df_base['Codigo comuna'] = df_base['Codigo comuna'].astype(int)
desconocido = df_base['Codigo comuna'] != 0
df_base['Codigo comuna'].where(desconocido, '', inplace=True)
Comp = df_base.loc[df_base['Comuna'] != 'Total']
Comp.reset_index(inplace=True)
utils.desconocidoName(Comp)
for k in range(len(Comp)):
if Comp.loc[k, 'Codigo region'] < 10:
Comp.loc[k, 'Codigo region'] = '0' + str(Comp.loc[k, 'Codigo region'])
else:
Comp.loc[k, 'Codigo region'] = str(Comp.loc[k, 'Codigo region'])
if Comp.loc[k, 'Codigo comuna'] != '':
if Comp.loc[k, 'Codigo comuna'] < 10000:
Comp.loc[k, 'Codigo comuna'] = '0' + str(Comp.loc[k, 'Codigo comuna'])
else:
Comp.loc[k, 'Codigo comuna'] = str(Comp.loc[k, 'Codigo comuna'])
comuna = Comp['Comuna']
self.last_added.rename(columns={'NOMBRE_REGION': 'region_residencia',
'COD_COMUNA': 'Codigo comuna',
'EDAD_ANOS': 'Edad',
'2aDOSIS_RES': 'Segunda_comuna',
'1aDOSIS_RES': 'Primera_comuna',
'ÚnicaDOSIS':'Unica_comuna',
'4aDOSIS': 'Cuarta_comuna',
'Refuerzo_DOSIS':'Refuerzo_comuna'}, inplace=True)
utils.regionDEISName(self.last_added)
self.last_added = self.last_added[['region_residencia','Codigo comuna','Edad','Primera_comuna','Segunda_comuna','Unica_comuna','Refuerzo_comuna','Cuarta_comuna']]
for k in range(len(self.last_added)):
if self.last_added.loc[k, 'Codigo comuna'] != '':
if self.last_added.loc[k, 'Codigo comuna'] < 10000:
self.last_added.loc[k, 'Codigo comuna'] = '0' + str(self.last_added.loc[k, 'Codigo comuna'])
else:
self.last_added.loc[k, 'Codigo comuna'] = str(self.last_added.loc[k, 'Codigo comuna'])
df_sup = Comp[['Codigo comuna', 'Comuna']]
df_sup['Codigo comuna'] = df_sup['Codigo comuna'].replace('', 0)
self.last_added = self.last_added.merge(df_sup, on="Codigo comuna", how="left")
self.last_added.set_index('Comuna', inplace=True)
columns_name = self.last_added.columns.values
maxSE = self.last_added[columns_name[2]].max()
minSE = self.last_added[columns_name[2]].min()
#print(minSE, maxSE)
lenSE = maxSE - minSE + 1
date_list = list(range(minSE,maxSE+1))
#print(date_list)
SE_comuna = self.last_added[columns_name[2]]
for k in [3,4,5,6,7]:
df = pd.DataFrame(np.zeros((len(comuna), lenSE)))
dicts = {}
keys = range(lenSE)
# values = [i for i in range(lenSE)]
for i in keys:
dicts[i] = date_list[i]
df.rename(columns=dicts, inplace=True)
value_comuna = self.last_added[columns_name[k]]
value_comuna.fillna(0,inplace=True)
i=0
for row in self.last_added.index:
idx = comuna.loc[comuna == row].index.values
if idx.size > 0:
col = SE_comuna[i]
df[col][idx] = value_comuna[i].astype(int)
i += 1
df_output = pd.concat([Comp, df], axis=1)
df_output.drop(columns=['index'], axis=1, inplace=True)
nComunas = [len(list(group)) for key, group in groupby(df_output['Codigo region'])]
identifiers = ['Region', 'Codigo region', 'Comuna', 'Codigo comuna']
variables = [x for x in df_output.columns if x not in identifiers]
begRow = 0
for i in range(len(nComunas)):
endRow = begRow + nComunas[i]
firstList = df_output[identifiers].iloc[endRow - 1].values.tolist()
firstList[2] = 'Total'
firstList[3] = ''
valuesTotal = df_output[variables][begRow:endRow].sum(axis=0).tolist()
regionTotal = pd.DataFrame((firstList + valuesTotal), index=df_output.columns.values).transpose()
if i < len(nComunas) - 1:
blank_line = pd.Series(np.empty((len(regionTotal), 0)).tolist())
regionTotal = | pd.concat([regionTotal, blank_line], axis=0) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Sun May 3 10:34:57 2020
@author: hcji
"""
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 6 15:29:15 2019
@author: hcji
"""
import json
import numpy as np
import pandas as pd
from tqdm import tqdm
from scipy.sparse import load_npz
from DeepEI.utils import get_score, get_fp_score
from DeepEI.predict import predict_fingerprint
with open('DeepEI/data/split.json', 'r') as js:
split = json.load(js)
keep = np.array(split['keep'])
nist_smiles = np.array(json.load(open('DeepEI/data/all_smiles.json')))[keep]
nist_masses = np.load('DeepEI/data/molwt.npy')[keep]
nist_spec = load_npz('DeepEI/data/peakvec.npz').todense()[keep,:]
nist_fingerprint = load_npz('DeepEI/data/fingerprints.npz').todense()[keep,:]
neims_nist_spec = load_npz('DeepEI/data/neims_spec_nist.npz').todense()[keep,:]
neims_msbk_smiles = np.array(json.load(open('DeepEI/data/neims_msbk_smiles.json')))
neims_msbk_masses = np.load('DeepEI/data/neims_msbk_masses.npy')
neims_msbk_spec = load_npz('DeepEI/data/neims_spec_msbk.npz').todense()
neims_msbk_cdkfps = load_npz('DeepEI/data/neims_msbk_cdkfps.npz').todense()
msbk_smiles = np.array(json.load(open('DeepEI/data/msbk_smiles.json')))
msbk_masses = np.load('DeepEI/data/msbk_masses.npy')
msbk_spec = load_npz('DeepEI/data/msbk_spec.npz').todense()
mlp = pd.read_csv('Fingerprint/results/mlp_result.txt', sep='\t', header=None)
mlp.columns = ['id', 'accuracy', 'precision', 'recall', 'f1']
fpkeep = mlp['id'][np.where(mlp['f1'] > 0.5)[0]]
pred_fps = predict_fingerprint(msbk_spec, fpkeep)
db_smiles = np.array(list(nist_smiles) + list(neims_msbk_smiles))
db_masses = np.append(nist_masses, neims_msbk_masses)
db_spec = np.append(neims_nist_spec, neims_msbk_spec, axis=0)
db_fingerprints = np.append(nist_fingerprint, neims_msbk_cdkfps, axis=0)[:, fpkeep]
if __name__ == '__main__':
output = | pd.DataFrame(columns=['smiles', 'mass', 'score', 'rank', 'inNIST']) | pandas.DataFrame |
import os
os.chdir('osmFISH_Ziesel/')
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('qt5agg')
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
import matplotlib.pyplot as plt
import scipy.stats as st
from matplotlib.lines import Line2D
import pickle
with open ('data/SpaGE_pkl/osmFISH_Cortex.pkl', 'rb') as f:
datadict = pickle.load(f)
osmFISH_data = datadict['osmFISH_data']
del datadict
Gene_Order = osmFISH_data.columns
### SpaGE
SpaGE_imputed_Ziesel = pd.read_csv('Results/SpaGE_LeaveOneOut.csv',header=0,index_col=0,sep=',')
SpaGE_imputed_Ziesel = SpaGE_imputed_Ziesel.loc[:,Gene_Order]
SpaGE_Corr_Ziesel = pd.Series(index = Gene_Order)
for i in Gene_Order:
SpaGE_Corr_Ziesel[i] = st.spearmanr(osmFISH_data[i],SpaGE_imputed_Ziesel[i])[0]
### gimVI
gimVI_imputed_Ziesel = | pd.read_csv('Results/gimVI_LeaveOneOut.csv',header=0,index_col=0,sep=',') | pandas.read_csv |
import google_streetview.api
from google_streetview import helpers
import pandas as pd
import numpy as np
import os
import random
import imageio
import math
import h5py
# define parameters
earth_radius = 6271
grid_size = 6720 # m
fov = 120
res = 224
random.seed(42)
def getGridSample(lat, lon, n):
"""
Get a random sampling of n locations within k km from (lat, lon)
param: lat latitude of grid center point
param: lon longitude of grid center point
param: n number of locations to sample
return: array of length n of latitude, longitude tuples
"""
locations = []
for i in range(n):
# Get a random point in km's
min_delta = -1 * grid_size
max_delta = grid_size
delta_x_kms = (np.random.rand()*(max_delta - min_delta) + min_delta)/1000 # kms
delta_y_kms = (np.random.rand()*(max_delta - min_delta) + min_delta)/1000 # kms
# Convert from km's to latitude/longitude
delta_lat = (delta_x_kms/earth_radius) * 180/math.pi
r = earth_radius*math.cos(lat*math.pi/180.0)
delta_lon = (delta_y_kms/r)*180/math.pi
# Get the new lat/lon point
new_lat = lat + delta_lat
new_lon = lon + delta_lon
locations.append((new_lat, new_lon))
return locations
def downloadImage_v2(params_no_key, name, directory, api_pos):
"""
params_no_key: params dictionary of parameters to pass to google streetview API, without API key
name: name of the image (string)
directory: path where we save the images
api_pos: number of the corresponding api key
#performs the operation, the image as numpy array, the status
#and the metadata in case of error
"""
#adding the relevant api key before running the code
params_no_key['key']= api['key'][api_pos]
#computing the image
results = google_streetview.api.results([params_no_key])
#downloading the image if it exists
if results.metadata[0]['status'] == 'OK':
#using helper function from original code to download the image
helpers.download(results.links[0], directory+"/"+name + '.jpg')
api['uses'][api_pos]+=1 #counting the api key use if we have success
#now returning numpy image and status 1
return np.array(imageio.imread(directory+"/"+name + '.jpg')), 1 , None
else:
#if there is an error, we return no image and status 0
return None, 0, results.metadata[0]
#we import the dataset with the APIs
api=pd.read_pickle('api.pkl')
#we see which API key we are using to start running the code
limit_uses=int(500/7*1000)
if api['uses'][0]+10<limit_uses:
api_pos=0
else:
api_pos=1
#arguments and KEY
split = "train"
n = 10
"""
param: split_csv csv filename with Unique IDs and split name
param: lat_lon_csv csv filename with latitude, longitude, and Unique IDs
param: split name of split to take images for
param: n number of samples per grid
return: numpy array of shape (num_locations_in_split * n)
"""
# Load the Unique IDs for the split
unique_ids = pd.read_csv(r'C:/Users/nsuar/Google Drive/Carbon_emissions/urban_emissions_git/urban_emissions/01_Data/ozone_splits.csv'
, dtype={'Unique_ID': str, 'dataset': str})
unique_ids = unique_ids[unique_ids['dataset'] == split]
# Load the lat/lon coords for each Unique ID
lat_lon = | pd.read_csv(r'C:/Users/nsuar/Google Drive/Carbon_emissions/urban_emissions_git/urban_emissions/01_Data/01_Carbon_emissions/Airnow/World_all_locations_2020_avg_clean.csv' ) | pandas.read_csv |
"""
Class Features
Name: drv_dataset_hmc_io_dynamic_forcing
Author(s): <NAME> (<EMAIL>)
Date: '20200401'
Version: '3.0.0'
"""
#######################################################################################
# Library
import logging
import warnings
import os
import re
import datetime
import numpy as np
import pandas as pd
import xarray as xr
from copy import deepcopy
from hmc.algorithm.io.lib_data_io_generic import swap_darray_dims_xy, create_darray_3d, create_darray_2d, \
write_dset, create_dset
from hmc.algorithm.io.lib_data_zip_gzip import zip_filename
from hmc.algorithm.utils.lib_utils_analysis import compute_domain_mean, \
compute_catchment_mean_serial, compute_catchment_mean_parallel_sync, compute_catchment_mean_parallel_async
from hmc.algorithm.utils.lib_utils_system import split_path, create_folder, copy_file
from hmc.algorithm.utils.lib_utils_string import fill_tags2string
from hmc.algorithm.utils.lib_utils_list import flat_list
from hmc.algorithm.utils.lib_utils_zip import add_zip_extension
from hmc.algorithm.default.lib_default_variables import variable_default_fields as dset_default_base
from hmc.algorithm.default.lib_default_args import logger_name, time_format_algorithm, time_format_datasets
from hmc.driver.dataset.drv_dataset_hmc_io_type import DSetReader, DSetComposer
# Log
log_stream = logging.getLogger(logger_name)
# Debug
# import matplotlib.pylab as plt
#######################################################################################
# -------------------------------------------------------------------------------------
# Class to configure datasets
class DSetManager:
# -------------------------------------------------------------------------------------
# Method to initialize class
def __init__(self, dset,
terrain_values=None, terrain_geo_x=None, terrain_geo_y=None, terrain_transform=None, terrain_bbox=None,
dset_list_format=None,
dset_list_type=None,
dset_list_group=None,
template_time=None, template_analysis_def=None,
model_tag='hmc', datasets_tag='datasets',
coord_name_geo_x='Longitude', coord_name_geo_y='Latitude', coord_name_time='time',
dim_name_geo_x='west_east', dim_name_geo_y='south_north', dim_name_time='time',
dset_write_engine='netcdf4', dset_write_compression_level=9, dset_write_format='NETCDF4_CLASSIC',
file_compression_mode=False, file_compression_ext='.gz',
**kwargs):
if dset_list_format is None:
dset_list_format = ['Gridded', 'Point', 'TimeSeries']
if dset_list_type is None:
dset_list_type = ['OBS', 'FOR']
if dset_list_group is None:
dset_list_group = ['OBS', 'FOR']
self.dset = dset
self.dset_list_format = dset_list_format
self.dset_list_type = dset_list_type
self.dset_list_group = dset_list_group
self.terrain_values = terrain_values
self.terrain_geo_x = terrain_geo_x
self.terrain_geo_y = terrain_geo_y
self.terrain_tranform = terrain_transform
self.terrain_bbox = terrain_bbox
self.da_terrain = create_darray_2d(self.terrain_values, self.terrain_geo_x, self.terrain_geo_y,
coord_name_x=coord_name_geo_x, coord_name_y=coord_name_geo_y,
dim_name_x=dim_name_geo_x, dim_name_y=dim_name_geo_y,
dims_order=[dim_name_geo_y, dim_name_geo_x])
self.model_tag = model_tag
self.datasets_tag = datasets_tag
self.coord_name_time = coord_name_time
self.coord_name_geo_x = coord_name_geo_x
self.coord_name_geo_y = coord_name_geo_y
self.dim_name_time = dim_name_time
self.dim_name_geo_x = dim_name_geo_x
self.dim_name_geo_y = dim_name_geo_y
self.file_name_tag = 'file_name'
self.folder_name_tag = 'folder_name'
self.var_period_tag = 'var_period'
dset_obj = {}
dset_fx = {}
dset_var_dict = {}
dset_vars_list = []
for dset_format in dset_list_format:
if dset_format in self.dset:
dset_tmp = self.dset[dset_format]
dset_obj[dset_format] = {}
dset_obj[dset_format][model_tag] = {}
dset_obj[dset_format][datasets_tag] = {}
dset_fx[dset_format] = {}
dset_fx[dset_format][datasets_tag] = {}
dset_var_dict[dset_format] = {}
dset_var_dict[dset_format][model_tag] = {}
file_name = dset_tmp['hmc_file_name']
file_folder = dset_tmp['hmc_file_folder']
file_format = dset_tmp['hmc_file_format']
file_frequency = dset_tmp['hmc_file_frequency']
file_vars = dset_tmp['hmc_file_variable']
dset_obj[dset_format][model_tag] = {}
dset_obj[dset_format][model_tag][self.file_name_tag] = file_name
dset_obj[dset_format][model_tag]['folder_name'] = file_folder
dset_obj[dset_format][model_tag]['frequency'] = file_format
dset_obj[dset_format][model_tag]['format'] = file_frequency
for dset_type in dset_list_type:
dset_obj[dset_format][datasets_tag][dset_type] = {}
dset_fx[dset_format][datasets_tag][dset_type] = {}
if file_vars[dset_type].__len__() > 0:
var_frequency = file_vars[dset_type]['var_frequency']
var_rounding = file_vars[dset_type]['var_rounding']
var_operation = file_vars[dset_type]['var_operation']
var_period = file_vars[dset_type]['var_period']
var_list = file_vars[dset_type]['var_list']
dset_fx_list = []
for var_key, var_value in var_list.items():
dset_obj[dset_format][datasets_tag][dset_type][var_key] = {}
dset_obj[dset_format][datasets_tag][dset_type][var_key][self.file_name_tag] = \
var_value['var_file_name']
dset_obj[dset_format][datasets_tag][dset_type][var_key][self.folder_name_tag] = \
var_value['var_file_folder']
dset_obj[dset_format][datasets_tag][dset_type][var_key]['var_dset'] = \
var_value['var_file_dset']
dset_obj[dset_format][datasets_tag][dset_type][var_key]['var_format'] = \
var_value['var_file_format']
dset_obj[dset_format][datasets_tag][dset_type][var_key]['var_limits'] = \
var_value['var_file_limits']
dset_obj[dset_format][datasets_tag][dset_type][var_key]['var_units'] = \
var_value['var_file_units']
dset_obj[dset_format][datasets_tag][dset_type][var_key]['var_frequency'] = var_frequency
dset_obj[dset_format][datasets_tag][dset_type][var_key]['var_rounding'] = var_rounding
dset_obj[dset_format][datasets_tag][dset_type][var_key]['var_operation'] = var_operation
dset_obj[dset_format][datasets_tag][dset_type][var_key]['var_period'] = var_period
if not dset_var_dict[dset_format][model_tag]:
dset_var_dict[dset_format][model_tag] = [var_value['var_file_dset']]
else:
value_list_tmp = dset_var_dict[dset_format][model_tag]
value_list_tmp.append(var_value['var_file_dset'])
idx_list_tmp = sorted([value_list_tmp.index(elem) for elem in set(value_list_tmp)])
value_list_filter = [value_list_tmp[idx_tmp] for idx_tmp in idx_list_tmp]
dset_var_dict[dset_format][model_tag] = value_list_filter
dset_vars_list.append(var_key)
dset_fx_list.append(var_operation)
for var_fx_step in dset_fx_list:
for var_fx_key_step, var_fx_flag_step in var_fx_step.items():
if var_fx_key_step not in list(dset_fx[dset_format][datasets_tag][dset_type].keys()):
dset_fx[dset_format][datasets_tag][dset_type][var_fx_key_step] = var_fx_flag_step
else:
var_fx_flag_tmp = dset_fx[dset_format][datasets_tag][dset_type][var_fx_key_step]
if var_fx_flag_tmp != var_fx_flag_step:
log_stream.error(' ===> Variable(s) operation is defined in two different mode!')
raise RuntimeError('Different operations are not allowed for the same group')
else:
dset_obj[dset_format][datasets_tag][dset_type] = None
dset_fx[dset_format][datasets_tag][dset_type] = None
self.dset_obj = dset_obj
self.dset_fx = dset_fx
self.dset_vars = list(set(dset_vars_list))
self.dset_lut = dset_var_dict
self.template_time = template_time
self.var_interp = 'nearest'
self.dset_write_engine = dset_write_engine
self.dset_write_compression_level = dset_write_compression_level
self.dset_write_format = dset_write_format
self.file_compression_mode = file_compression_mode
self.file_compression_ext = file_compression_ext
self.terrain_geo_x_llcorner = self.terrain_bbox[0]
self.terrain_geo_y_llcorner = self.terrain_bbox[1]
self.terrain_geo_cellsize = self.terrain_tranform[0]
self.file_attributes_dict = {'ncols': self.da_terrain.shape[1],
'nrows': self.da_terrain.shape[0],
'nodata_value': -9999.0,
'xllcorner': self.terrain_geo_x_llcorner,
'yllcorner': self.terrain_geo_y_llcorner,
'cellsize': self.terrain_geo_cellsize}
self.column_sep = ';'
self.list_sep = ':'
self.template_analysis_def = template_analysis_def
if self.template_analysis_def is not None:
self.list_variable_selected = ['Rain', 'AirTemperature']
self.tag_variable_fields = '{var_name}:hmc_forcing_datasets:{domain_name}'
self.flag_analysis_ts_domain = True
if 'analysis_catchment' in list(self.template_analysis_def.keys()):
self.flag_analysis_ts_catchment = self.template_analysis_def['analysis_catchment']
else:
self.flag_analysis_ts_catchment = False
if 'analysis_mp' in list(self.template_analysis_def.keys()):
self.flag_analysis_ts_catchment_mode = self.template_analysis_def['analysis_mp']
else:
self.flag_analysis_ts_catchment_mode = False
if 'analysis_cpu' in list(self.template_analysis_def.keys()):
self.flag_analysis_ts_catchment_cpu = self.template_analysis_def['analysis_cpu']
else:
self.flag_analysis_ts_catchment_cpu = 1
else:
self.list_variable_selected = ['Rain', 'AirTemperature']
self.tag_variable_fields = '{var_name}:hmc_forcing_datasets:{domain_name}'
self.flag_analysis_ts_domain = True
self.flag_analysis_ts_catchment = False
self.flag_analysis_ts_catchment_mode = False
self.flag_analysis_ts_catchment_cpu = 1
@staticmethod
def validate_flag(data_name, data_flag, flag_key_expected=None, flag_values_expected=None):
if flag_values_expected is None:
flag_values_expected = [None, True, False]
if flag_key_expected is None:
flag_key_expected = ['merge', 'split', 'dump', 'copy', 'analyze']
if data_flag is not None:
for flag_key, flag_value in data_flag.items():
if flag_key not in flag_key_expected:
log_stream.error(' ===> Datasets flag key "' + flag_key + '" is not allowed.')
raise KeyError('Flag key is not in the list of authorized flag keys')
if flag_value not in flag_values_expected:
log_stream.error(' ===> Datasets flag value "' + str(flag_value) + '" is not allowed.')
raise KeyError('Flag value is not in the list of authorized flag values')
if 'copy' in list(data_flag.keys()) and 'dump' in list(data_flag.keys()):
if data_flag['copy'] and data_flag['dump']:
log_stream.error(' ===> Flags "dump" and "copy" cannot be concurrently selected.')
raise RuntimeError('Flags have to be different using the allowed values' + str(flag_values_expected))
# if 'merge' in list(data_flag.keys()) and 'split' in list(data_flag.keys()):
# if data_flag['merge'] and data_flag['split']:
# log_stream.error(' ===> Flags "merge" and "split" cannot be concurrently selected.')
# raise RuntimeError('Flags have to be different using the allowed values' + str(flag_values_expected))
@staticmethod
def rename_filename(file_path_tmpl, file_path_ref):
folder_name_tmpl, file_name_tmpl = os.path.split(file_path_tmpl)
folder_name_ref, file_name_ref = os.path.split(file_path_ref)
time_match = re.search('\d{4}\d{2}\d{2}\d{2}\d{2}', file_name_ref)
time_stamp = pd.Timestamp(datetime.datetime.strptime(time_match.group(), time_format_datasets))
file_name_def = file_name_tmpl.format(dset_datetime_hmc=time_stamp.strftime(time_format_datasets))
file_path_def = os.path.join(folder_name_ref, file_name_def)
return file_path_def
def copy_data(self, dset_model_dyn, dset_source_dyn, columns_excluded=None, vars_selected=None):
# Starting info
log_stream.info(' -------> Copy data ... ')
if columns_excluded is None:
columns_excluded = ['index', 'File_Type']
var_model_list = list(dset_model_dyn.columns)
var_source_list = list(dset_source_dyn.columns)
var_model_filter = [var for var in var_model_list if var not in columns_excluded]
var_source_filter = [var for var in var_source_list if var not in columns_excluded]
file_dest_list = None
for var_model_step in var_model_filter:
file_model_step = list(dset_model_dyn[var_model_step].values)
for file_name_raw in file_model_step:
if isinstance(file_name_raw, str):
if self.column_sep in file_name_raw:
file_name_model_step = file_name_raw.split(self.column_sep)
else:
file_name_model_step = file_name_raw
if not isinstance(file_name_model_step, list):
file_name_model_step = [file_name_model_step]
if file_dest_list is None:
file_dest_list = [[] for i in range(file_name_model_step.__len__())]
for list_id, file_dest_step in enumerate(file_name_model_step):
if isinstance(file_dest_step, str):
file_dest_list[list_id].append(file_dest_step)
else:
log_stream.warning(' ===> Expected filename is not in string format!')
file_source_list = None
list_id_defined = None
for list_id, var_source_step in enumerate(var_source_filter):
log_stream.info(' --------> Variable ' + var_source_step + ' ... ')
file_source_step = list(dset_source_dyn[var_source_step].values)
file_source_tmp = []
for file_name_raw in file_source_step:
if isinstance(file_name_raw, str):
if self.column_sep in file_name_raw:
file_name_source_step = file_name_raw.split(self.column_sep)
else:
file_name_source_step = file_name_raw
if not isinstance(file_name_source_step, list):
file_name_source_step = [file_name_source_step]
for file_source_step in file_name_source_step:
if isinstance(file_source_step, str):
file_source_tmp.append(file_source_step)
else:
log_stream.warning(' ===> Expected filename is not in string format!')
if file_source_list is None:
file_source_list = []
if not file_source_tmp:
file_source_list.append([]) # condition for empty datasets
# file_source_list = None
else:
file_source_list.append(file_source_tmp)
if list_id_defined is None:
list_id_defined = 0
else:
list_id_defined += 1
if (file_dest_list is not None) and (file_source_list is not None):
if file_dest_list.__len__() > file_source_list.__len__():
file_dest_select = flat_list(file_dest_list)
file_source_select = file_source_list[list_id_defined]
elif file_dest_list.__len__() == file_source_list.__len__():
file_dest_select = file_dest_list[list_id]
file_source_select = file_source_list[list_id]
elif file_dest_list.__len__() < file_source_list.__len__():
file_dest_select = flat_list(file_dest_list)
file_source_select = file_source_list[list_id]
else:
log_stream.error(' ===> Copy failed for unexpected number of destination or source filenames')
raise IOError('Source and destination filenames have to be equal')
if file_dest_select and file_source_select:
warning_message_print = True
for file_path_dest_step, file_path_source_step in zip(file_dest_select, file_source_select):
folder_name_source_step, file_name_source_step = split_path(file_path_source_step)
folder_name_dest_step, file_name_dest_step = split_path(file_path_dest_step)
if os.path.exists(file_path_source_step):
if var_source_step in vars_selected:
if not os.path.exists(file_path_dest_step):
create_folder(folder_name_dest_step)
copy_file(file_path_source_step, file_path_dest_step)
elif var_source_step not in vars_selected:
if warning_message_print:
log_stream.warning(' ===> Variable: ' + var_source_step +
' is not expected for this datasets')
warning_message_print = False
else:
log_stream.warning(' ===> Copy file: ' + file_name_source_step +
' FAILED. File does not exist!')
log_stream.info(' --------> Variable ' + var_source_step + ' ... DONE')
else:
log_stream.warning(' ===> Copy file: ... FAILED. Datasets are undefined')
log_stream.info(' --------> Variable ' + var_source_step + ' ... SKIPPED')
else:
log_stream.warning(' ===> Copy file: ... FAILED. All files do not exist')
log_stream.info(' --------> Variable ' + var_source_step + ' ... SKIPPED')
# Ending info
log_stream.info(' -------> Copy data ... DONE')
def freeze_data(self, dset_expected, dset_def, dset_key_delimiter=':', dset_key_excluded=None):
# Starting info
log_stream.info(' -------> Freeze data ... ')
if dset_key_excluded is None:
dset_key_excluded = ['index', 'File_Type', 'terrain']
if dset_def is not None:
dset_vars_expected = self.dset_vars
dset_check = False
dframe_check = False
if isinstance(dset_def, xr.Dataset):
dset_vars_def = list(dset_def.data_vars)
dset_check = True
elif isinstance(dset_def, pd.DataFrame):
dset_vars_def = list(dset_def.columns)
dframe_check = True
else:
log_stream.error(' ===> Freeze data type is not implemented')
raise NotImplementedError('Data type is unknown for freezing data')
dset_vars_tmp = deepcopy(dset_vars_def)
for dset_var_step in dset_vars_tmp:
if dset_var_step in dset_key_excluded:
dset_vars_def.remove(dset_var_step)
for dset_var_step in dset_vars_def:
if dset_key_delimiter in dset_var_step:
dset_var_root = dset_var_step.split(dset_key_delimiter)[0]
else:
dset_var_root = dset_var_step
if dset_vars_expected[0] == 'ALL':
if dset_check:
if dset_var_step not in ['terrain', 'mask']:
values_nan = np.zeros([dset_expected.index.__len__()])
values_nan[:] = np.nan
dset_expected[dset_var_step] = values_nan
if 'time' in list(dset_def[dset_var_step].dims):
time_array = dset_def[dset_var_step]['time'].values
else:
if 'time' in list(dset_def.dims):
time_array = dset_def['time'].values
else:
log_stream.error(' ===> Freeze time array is not defined for variables')
raise NotImplementedError('Time array is unknown for freezing data')
time_stamp_list = []
for time_step in time_array:
time_stamp = pd.to_datetime(time_step, format='%Y-%m-%d_%H:%M:%S')
time_stamp_list.append(time_stamp)
dset_idx = | pd.DatetimeIndex(time_stamp_list) | pandas.DatetimeIndex |
import wandb
from wandb import data_types
import numpy as np
import pytest
import os
import sys
import datetime
from wandb.sdk.data_types._dtypes import *
class_labels = {1: "tree", 2: "car", 3: "road"}
test_folder = os.path.dirname(os.path.realpath(__file__))
im_path = os.path.join(test_folder, "..", "assets", "test.png")
def test_none_type():
assert TypeRegistry.type_of(None) == NoneType()
assert TypeRegistry.type_of(None).assign(None) == NoneType()
assert TypeRegistry.type_of(None).assign(1) == InvalidType()
def test_string_type():
assert TypeRegistry.type_of("Hello") == StringType()
assert TypeRegistry.type_of("Hello").assign("World") == StringType()
assert TypeRegistry.type_of("Hello").assign(None) == InvalidType()
assert TypeRegistry.type_of("Hello").assign(1) == InvalidType()
def test_number_type():
assert TypeRegistry.type_of(1.2) == NumberType()
assert TypeRegistry.type_of(1.2).assign(1) == NumberType()
assert TypeRegistry.type_of(1.2).assign(None) == InvalidType()
assert TypeRegistry.type_of(1.2).assign("hi") == InvalidType()
def make_datetime():
return datetime.datetime(2000, 12, 1)
def make_date():
return datetime.date(2000, 12, 1)
def make_datetime64():
return np.datetime64("2000-12-01")
def test_timestamp_type():
assert TypeRegistry.type_of(make_datetime()) == TimestampType()
assert (
TypeRegistry.type_of(make_datetime())
.assign(make_date())
.assign(make_datetime64())
== TimestampType()
)
assert TypeRegistry.type_of(make_datetime()).assign(None) == InvalidType()
assert TypeRegistry.type_of(make_datetime()).assign(1) == InvalidType()
def test_boolean_type():
assert TypeRegistry.type_of(True) == BooleanType()
assert TypeRegistry.type_of(True).assign(False) == BooleanType()
assert TypeRegistry.type_of(True).assign(None) == InvalidType()
assert TypeRegistry.type_of(True).assign(1) == InvalidType()
def test_any_type():
assert AnyType() == AnyType().assign(1)
assert AnyType().assign(None) == InvalidType()
def test_never_type():
assert InvalidType().assign(1) == InvalidType()
assert InvalidType().assign("a") == InvalidType()
assert InvalidType().assign(True) == InvalidType()
assert InvalidType().assign(None) == InvalidType()
def test_unknown_type():
assert UnknownType().assign(1) == NumberType()
assert UnknownType().assign(None) == InvalidType()
def test_union_type():
wb_type = UnionType([float, str])
assert wb_type.assign(1) == wb_type
assert wb_type.assign("s") == wb_type
assert wb_type.assign(True) == InvalidType()
wb_type = UnionType([float, AnyType()])
assert wb_type.assign(1) == wb_type
assert wb_type.assign("s") == wb_type
assert wb_type.assign(True) == wb_type
wb_type = UnionType([float, UnknownType()])
assert wb_type.assign(1) == wb_type
assert wb_type.assign("s") == UnionType([float, StringType()])
assert wb_type.assign(None) == InvalidType()
wb_type = UnionType([float, OptionalType(UnknownType())])
assert wb_type.assign(None).assign(True) == UnionType(
[float, OptionalType(BooleanType())]
)
wb_type = UnionType([float, UnionType([str, UnknownType()])])
assert wb_type.assign(1) == wb_type
assert wb_type.assign("s") == wb_type
assert wb_type.assign(True) == UnionType([float, str, bool])
assert wb_type.assign(None) == InvalidType()
def test_const_type():
wb_type = ConstType(1)
assert wb_type.assign(1) == wb_type
assert wb_type.assign("a") == InvalidType()
assert wb_type.assign(2) == InvalidType()
def test_set_const_type():
wb_type = ConstType(set())
assert wb_type.assign(set()) == wb_type
assert wb_type.assign(None) == InvalidType()
assert wb_type.assign({1}) == InvalidType()
assert wb_type.assign([]) == InvalidType()
wb_type = ConstType({1, 2, 3})
assert wb_type.assign(set()) == InvalidType()
assert wb_type.assign(None) == InvalidType()
assert wb_type.assign({1, 2, 3}) == wb_type
assert wb_type.assign([1, 2, 3]) == InvalidType()
def test_object_type():
wb_type = TypeRegistry.type_of(np.random.rand(30))
assert wb_type.assign(np.random.rand(30)) == wb_type
assert wb_type.assign(4) == InvalidType()
def test_list_type():
assert ListType(int).assign([]) == ListType(int, 0)
assert ListType(int).assign([1, 2, 3]) == ListType(int, 3)
assert ListType(int).assign([1, "a", 3]) == InvalidType()
def test_dict_type():
spec = {
"number": float,
"nested": {
"list_str": [str],
},
}
exact = {
"number": 1,
"nested": {
"list_str": ["hello", "world"],
},
}
subset = {"nested": {"list_str": ["hi"]}}
narrow = {"number": 1, "string": "hi"}
wb_type = TypeRegistry.type_of(exact)
assert wb_type.assign(exact) == wb_type
assert wb_type.assign(subset) == InvalidType()
assert wb_type.assign(narrow) == InvalidType()
spec = {
"optional_number": OptionalType(float),
"optional_unknown": OptionalType(UnknownType()),
}
wb_type = TypedDictType(spec)
assert wb_type.assign({}) == wb_type
assert wb_type.assign({"optional_number": 1}) == wb_type
assert wb_type.assign({"optional_number": "1"}) == InvalidType()
assert wb_type.assign({"optional_unknown": "hi"}) == TypedDictType(
{
"optional_number": OptionalType(float),
"optional_unknown": OptionalType(str),
}
)
assert wb_type.assign({"optional_unknown": None}) == TypedDictType(
{
"optional_number": OptionalType(float),
"optional_unknown": OptionalType(UnknownType()),
}
)
wb_type = TypedDictType({"unknown": UnknownType()})
assert wb_type.assign({}) == InvalidType()
assert wb_type.assign({"unknown": None}) == InvalidType()
assert wb_type.assign({"unknown": 1}) == TypedDictType(
{"unknown": float},
)
def test_nested_dict():
notation_type = TypedDictType(
{
"a": float,
"b": bool,
"c": str,
"d": UnknownType(),
"e": {},
"f": [],
"g": [
[
{
"a": float,
"b": bool,
"c": str,
"d": UnknownType(),
"e": {},
"f": [],
"g": [[]],
}
]
],
}
)
expanded_type = TypedDictType(
{
"a": NumberType(),
"b": BooleanType(),
"c": StringType(),
"d": UnknownType(),
"e": TypedDictType({}),
"f": ListType(),
"g": ListType(
ListType(
TypedDictType(
{
"a": NumberType(),
"b": BooleanType(),
"c": StringType(),
"d": UnknownType(),
"e": TypedDictType({}),
"f": ListType(),
"g": ListType(ListType()),
}
)
)
),
}
)
example = {
"a": 1,
"b": True,
"c": "StringType()",
"d": "hi",
"e": {},
"f": [1],
"g": [
[
{
"a": 2,
"b": False,
"c": "StringType()",
"d": 3,
"e": {},
"f": [],
"g": [[5]],
}
]
],
}
real_type = TypedDictType.from_obj(example)
assert notation_type == expanded_type
assert notation_type.assign(example) == real_type
def test_image_type():
wb_type = data_types._ImageFileType()
image_simple = data_types.Image(np.random.rand(10, 10))
wb_type_simple = data_types._ImageFileType.from_obj(image_simple)
image_annotated = data_types.Image(
np.random.rand(10, 10),
boxes={
"box_predictions": {
"box_data": [
{
"position": {
"minX": 0.1,
"maxX": 0.2,
"minY": 0.3,
"maxY": 0.4,
},
"class_id": 1,
"box_caption": "minMax(pixel)",
"scores": {"acc": 0.1, "loss": 1.2},
},
],
"class_labels": class_labels,
},
"box_ground_truth": {
"box_data": [
{
"position": {
"minX": 0.1,
"maxX": 0.2,
"minY": 0.3,
"maxY": 0.4,
},
"class_id": 1,
"box_caption": "minMax(pixel)",
"scores": {"acc": 0.1, "loss": 1.2},
},
],
"class_labels": class_labels,
},
},
masks={
"mask_predictions": {
"mask_data": np.random.randint(0, 4, size=(30, 30)),
"class_labels": class_labels,
},
"mask_ground_truth": {"path": im_path, "class_labels": class_labels},
},
)
wb_type_annotated = data_types._ImageFileType.from_obj(image_annotated)
image_annotated_differently = data_types.Image(
np.random.rand(10, 10),
boxes={
"box_predictions": {
"box_data": [
{
"position": {
"minX": 0.1,
"maxX": 0.2,
"minY": 0.3,
"maxY": 0.4,
},
"class_id": 1,
"box_caption": "minMax(pixel)",
"scores": {"acc": 0.1, "loss": 1.2},
},
],
"class_labels": class_labels,
},
},
masks={
"mask_predictions": {
"mask_data": np.random.randint(0, 4, size=(30, 30)),
"class_labels": class_labels,
},
"mask_ground_truth_2": {"path": im_path, "class_labels": class_labels},
},
)
assert wb_type.assign(image_simple) == wb_type_simple
assert wb_type.assign(image_annotated) == wb_type_annotated
# OK to assign Images with disjoint class set
assert wb_type_annotated.assign(image_simple) == wb_type_annotated
# Merge when disjoint
assert wb_type_annotated.assign(
image_annotated_differently
) == data_types._ImageFileType(
box_layers={"box_predictions": {1, 2, 3}, "box_ground_truth": {1, 2, 3}},
box_score_keys={"loss", "acc"},
mask_layers={
"mask_ground_truth_2": set(),
"mask_ground_truth": set(),
"mask_predictions": {1, 2, 3},
},
class_map={"1": "tree", "2": "car", "3": "road"},
)
def test_classes_type():
wb_classes = data_types.Classes(
[
{"id": 1, "name": "cat"},
{"id": 2, "name": "dog"},
{"id": 3, "name": "horse"},
]
)
wb_class_type = (
wandb.wandb_sdk.data_types.helper_types.classes._ClassesIdType.from_obj(
wb_classes
)
)
assert wb_class_type.assign(1) == wb_class_type
assert wb_class_type.assign(0) == InvalidType()
def test_table_type():
table_1 = wandb.Table(columns=["col"], data=[[1]])
t1 = data_types._TableType.from_obj(table_1)
table_2 = wandb.Table(columns=["col"], data=[[1.3]])
table_3 = wandb.Table(columns=["col"], data=[["a"]])
assert t1.assign(table_2) == t1
assert t1.assign(table_3) == InvalidType()
def test_table_implicit_types():
table = wandb.Table(columns=["col"])
table.add_data(None)
table.add_data(1)
with pytest.raises(TypeError):
table.add_data("a")
table = wandb.Table(columns=["col"], optional=False)
with pytest.raises(TypeError):
table.add_data(None)
table.add_data(1)
with pytest.raises(TypeError):
table.add_data("a")
def test_table_allow_mixed_types():
table = wandb.Table(columns=["col"], allow_mixed_types=True)
table.add_data(None)
table.add_data(1)
table.add_data("a") # No error with allow_mixed_types
table = wandb.Table(columns=["col"], optional=False, allow_mixed_types=True)
with pytest.raises(TypeError):
table.add_data(None) # Still errors since optional is false
table.add_data(1)
table.add_data("a") # No error with allow_mixed_types
def test_tables_with_dicts():
good_data = [
[None],
[
{
"a": [
{
"b": 1,
"c": [
[
{
"d": 1,
"e": wandb.Image(
np.random.randint(255, size=(10, 10))
),
}
]
],
}
]
}
],
[
{
"a": [
{
"b": 1,
"c": [
[
{
"d": 1,
"e": wandb.Image(
np.random.randint(255, size=(10, 10))
),
}
]
],
}
]
}
],
]
bad_data = [
[None],
[
{
"a": [
{
"b": 1,
"c": [
[
{
"d": 1,
"e": wandb.Image(
np.random.randint(255, size=(10, 10))
),
}
]
],
}
]
}
],
[
{
"a": [
{
"b": 1,
"c": [
[
{
"d": 1,
}
]
],
}
]
}
],
]
table = wandb.Table(columns=["A"], data=good_data, allow_mixed_types=True)
table = wandb.Table(columns=["A"], data=bad_data, allow_mixed_types=True)
table = wandb.Table(columns=["A"], data=good_data)
with pytest.raises(TypeError):
table = wandb.Table(columns=["A"], data=bad_data)
def test_table_explicit_types():
table = wandb.Table(columns=["a", "b"], dtype=int)
table.add_data(None, None)
table.add_data(1, 2)
with pytest.raises(TypeError):
table.add_data(1, "a")
table = wandb.Table(columns=["a", "b"], optional=False, dtype=[int, str])
with pytest.raises(TypeError):
table.add_data(None, None)
table.add_data(1, "a")
with pytest.raises(TypeError):
table.add_data("a", "a")
table = wandb.Table(columns=["a", "b"], optional=[False, True], dtype=[int, str])
with pytest.raises(TypeError):
table.add_data(None, None)
with pytest.raises(TypeError):
table.add_data(None, "a")
table.add_data(1, None)
table.add_data(1, "a")
with pytest.raises(TypeError):
table.add_data("a", "a")
def test_table_type_cast():
table = wandb.Table(columns=["type_col"])
table.add_data(1)
wb_classes = data_types.Classes(
[
{"id": 1, "name": "cat"},
{"id": 2, "name": "dog"},
{"id": 3, "name": "horse"},
]
)
table.cast("type_col", wb_classes.get_type())
table.add_data(2)
with pytest.raises(TypeError):
table.add_data(4)
box_annotation = {
"box_predictions": {
"box_data": [
{
"position": {
"minX": 0.1,
"maxX": 0.2,
"minY": 0.3,
"maxY": 0.4,
},
"class_id": 1,
"box_caption": "minMax(pixel)",
"scores": {"acc": 0.1, "loss": 1.2},
},
],
"class_labels": class_labels,
},
"box_ground_truth": {
"box_data": [
{
"position": {
"minX": 0.1,
"maxX": 0.2,
"minY": 0.3,
"maxY": 0.4,
},
"class_id": 1,
"box_caption": "minMax(pixel)",
"scores": {"acc": 0.1, "loss": 1.2},
},
],
"class_labels": class_labels,
},
}
mask_annotation = {
"mask_predictions": {
"mask_data": np.random.randint(0, 4, size=(30, 30)),
"class_labels": class_labels,
},
"mask_ground_truth": {"path": im_path, "class_labels": class_labels},
}
def test_table_specials():
table = wandb.Table(
columns=["image", "table"],
optional=False,
dtype=[data_types.Image, data_types.Table],
)
with pytest.raises(TypeError):
table.add_data(None, None)
# Infers specific types from first valid row
table.add_data(
data_types.Image(
np.random.rand(10, 10),
boxes=box_annotation,
masks=mask_annotation,
),
data_types.Table(data=[[1, True, None]]),
)
# Denies conflict
with pytest.raises(TypeError):
table.add_data(
"hello",
data_types.Table(data=[[1, True, None]]),
)
# Denies conflict
with pytest.raises(TypeError):
table.add_data(
data_types.Image(
np.random.rand(10, 10),
boxes=box_annotation,
masks=mask_annotation,
),
data_types.Table(data=[[1, "True", None]]),
)
# allows further refinement
table.add_data(
data_types.Image(
np.random.rand(10, 10),
boxes=box_annotation,
masks=mask_annotation,
),
data_types.Table(data=[[1, True, 1]]),
)
# allows addition
table.add_data(
data_types.Image(
np.random.rand(10, 10),
boxes=box_annotation,
masks=mask_annotation,
),
data_types.Table(data=[[1, True, 1]]),
)
@pytest.mark.skipif(sys.version_info >= (3, 10), reason="no pandas py3.10 wheel")
def test_nan_non_float():
import pandas as pd
wandb.Table(dataframe=pd.DataFrame(data=[["A"], [np.nan]], columns=["a"]))
def test_table_typing_numpy():
# Pulled from https://numpy.org/devdocs/user/basics.types.html
# Numerics
table = wandb.Table(columns=["A"], dtype=[NumberType])
table.add_data(None)
table.add_data(42)
table.add_data(np.byte(1))
table.add_data(np.short(42))
table.add_data(np.ushort(42))
table.add_data(np.intc(42))
table.add_data(np.uintc(42))
table.add_data(np.int_(42))
table.add_data(np.uint(42))
table.add_data(np.longlong(42))
table.add_data(np.ulonglong(42))
table.add_data(np.half(42))
table.add_data(np.float16(42))
table.add_data(np.single(42))
table.add_data(np.double(42))
table.add_data(np.longdouble(42))
table.add_data(np.csingle(42))
table.add_data(np.cdouble(42))
table.add_data(np.clongdouble(42))
table.add_data(np.int8(42))
table.add_data(np.int16(42))
table.add_data(np.int32(42))
table.add_data(np.int64(42))
table.add_data(np.uint8(42))
table.add_data(np.uint16(42))
table.add_data(np.uint32(42))
table.add_data(np.uint64(42))
table.add_data(np.intp(42))
table.add_data(np.uintp(42))
table.add_data(np.float32(42))
table.add_data(np.float64(42))
table.add_data(np.float_(42))
table.add_data(np.complex64(42))
table.add_data(np.complex128(42))
table.add_data(np.complex_(42))
# Booleans
table = wandb.Table(columns=["A"], dtype=[BooleanType])
table.add_data(None)
table.add_data(True)
table.add_data(False)
table.add_data(np.bool_(True))
# Array of Numerics
table = wandb.Table(columns=["A"], dtype=[[NumberType]])
table.add_data(None)
table.add_data([42])
table.add_data(np.array([1, 0], dtype=np.byte))
table.add_data(np.array([42, 42], dtype=np.short))
table.add_data(np.array([42, 42], dtype=np.ushort))
table.add_data(np.array([42, 42], dtype=np.intc))
table.add_data(np.array([42, 42], dtype=np.uintc))
table.add_data(np.array([42, 42], dtype=np.int_))
table.add_data(np.array([42, 42], dtype=np.uint))
table.add_data(np.array([42, 42], dtype=np.longlong))
table.add_data(np.array([42, 42], dtype=np.ulonglong))
table.add_data(np.array([42, 42], dtype=np.half))
table.add_data(np.array([42, 42], dtype=np.float16))
table.add_data(np.array([42, 42], dtype=np.single))
table.add_data(np.array([42, 42], dtype=np.double))
table.add_data(np.array([42, 42], dtype=np.longdouble))
table.add_data(np.array([42, 42], dtype=np.csingle))
table.add_data(np.array([42, 42], dtype=np.cdouble))
table.add_data(np.array([42, 42], dtype=np.clongdouble))
table.add_data(np.array([42, 42], dtype=np.int8))
table.add_data(np.array([42, 42], dtype=np.int16))
table.add_data(np.array([42, 42], dtype=np.int32))
table.add_data(np.array([42, 42], dtype=np.int64))
table.add_data(np.array([42, 42], dtype=np.uint8))
table.add_data(np.array([42, 42], dtype=np.uint16))
table.add_data(np.array([42, 42], dtype=np.uint32))
table.add_data(np.array([42, 42], dtype=np.uint64))
table.add_data(np.array([42, 42], dtype=np.intp))
table.add_data(np.array([42, 42], dtype=np.uintp))
table.add_data(np.array([42, 42], dtype=np.float32))
table.add_data(np.array([42, 42], dtype=np.float64))
table.add_data(np.array([42, 42], dtype=np.float_))
table.add_data(np.array([42, 42], dtype=np.complex64))
table.add_data(np.array([42, 42], dtype=np.complex128))
table.add_data(np.array([42, 42], dtype=np.complex_))
# Array of Booleans
table = wandb.Table(columns=["A"], dtype=[[BooleanType]])
table.add_data(None)
table.add_data([True])
table.add_data([False])
table.add_data(np.array([True, False], dtype=np.bool_))
# Nested arrays
table = wandb.Table(columns=["A"])
table.add_data([[[[1, 2, 3]]]])
table.add_data(np.array([[[[1, 2, 3]]]]))
@pytest.mark.skipif(sys.version_info >= (3, 10), reason="no pandas py3.10 wheel")
def test_table_typing_pandas():
import pandas as pd
# TODO: Pandas https://pandas.pydata.org/pandas-docs/stable/user_guide/basics.html#basics-dtypes
# Numerics
table = wandb.Table(dataframe=pd.DataFrame([[1], [0]]).astype(np.byte))
table.add_data(1)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.short))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.ushort))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.intc))
table.add_data(42)
table = wandb.Table(dataframe= | pd.DataFrame([[42], [42]]) | pandas.DataFrame |
import pandas as pd
from msi_recal.passes.transform import Transform
from msi_recal.plot import save_spectrum_image
class Normalize(Transform):
CACHE_FIELDS = [
'intensity',
'ref_vals',
]
def __init__(self, params, intensity='median', ref='tic'):
try:
self.intensity = float(intensity)
except ValueError:
self.intensity = None
self.ref = ref
self.ref_vals = {}
def fit(self, X):
if self.intensity is None:
if self.ref == 'tic':
self.intensity = X.groupby('sp').ints.sum().median()
else:
self.intensity = X.groupby('sp').ints.max().median()
return self
def predict(self, X):
assert self.intensity is not None, 'predict called before fit'
if self.ref == 'tic':
self.ref_vals.update(X.groupby('sp').ints.sum())
else:
self.ref_vals.update(X.groupby('sp').ints.max())
ref_vals_s = | pd.Series(self.ref_vals) | pandas.Series |
import datetime
import os
from concurrent.futures import ProcessPoolExecutor
from math import ceil
import pandas as pd
# In[] 读入源数据
def get_source_data():
# 源数据路径
DataPath = 'data/'
# 读入源数据
off_train = pd.read_csv(os.path.join(DataPath, 'ccf_offline_stage1_train.csv'),
parse_dates=['Date_received', 'Date'])
off_train.columns = ['User_id', 'Merchant_id', 'Coupon_id', 'Discount_rate', 'Distance', 'Date_received', 'Date']
on_train = pd.read_csv(os.path.join(DataPath, 'ccf_online_stage1_train.csv'), parse_dates=['Date_received', 'Date'])
on_train.columns = ['User_id', 'Merchant_id', 'Action', 'Coupon_id', 'Discount_rate', 'Date_received', 'Date']
off_test = pd.read_csv(os.path.join(DataPath, 'ccf_offline_stage1_test_revised.csv'), parse_dates=['Date_received'])
off_test.columns = ['User_id', 'Merchant_id', 'Coupon_id', 'Discount_rate', 'Distance', 'Date_received']
print(off_train.info())
print(off_train.head(5))
return off_train, on_train, off_test
# In[] null,na 特殊处理
def null_process_offline(dataset, predict=False):
dataset.Distance.fillna(11, inplace=True)
dataset.Distance = dataset.Distance.astype(int)
dataset.Coupon_id.fillna(0, inplace=True)
dataset.Coupon_id = dataset.Coupon_id.astype(int)
dataset.Date_received.fillna(date_null, inplace=True)
dataset[['discount_rate_x', 'discount_rate_y']] = dataset[dataset.Discount_rate.str.contains(':') == True][
'Discount_rate'].str.split(':', expand=True).astype(int)
dataset['discount_rate'] = 1 - dataset.discount_rate_y / dataset.discount_rate_x
dataset.discount_rate = dataset.discount_rate.fillna(dataset.Discount_rate).astype(float)
if predict:
return dataset
else:
dataset.Date.fillna(date_null, inplace=True)
return dataset
def null_process_online(dataset):
dataset.Coupon_id.fillna(0, inplace=True)
# online.Coupon_id = online.Coupon_id.astype(int)
dataset.Date_received.fillna(date_null, inplace=True)
dataset.Date.fillna(date_null, inplace=True)
return dataset
# In[] 生成交叉训练集
def data_process(off_train, on_train, off_test):
# train feature split
# 交叉训练集一:收到券的日期大于4月14日和小于5月14日
time_range = ['2016-04-16', '2016-05-15']
dataset1 = off_train[(off_train.Date_received >= time_range[0]) & (off_train.Date_received <= time_range[1])].copy()
dataset1['label'] = 0
dataset1.loc[
(dataset1.Date != date_null) & (dataset1.Date - dataset1.Date_received <= datetime.timedelta(15)), 'label'] = 1
# 交叉训练集一特征offline:线下数据中领券和用券日期大于1月1日和小于4月13日
time_range_date_received = ['2016-01-01', '2016-03-31']
time_range_date = ['2016-01-01', '2016-04-15']
feature1_off = off_train[(off_train.Date >= time_range_date[0]) & (off_train.Date <= time_range_date[1]) | (
(off_train.Coupon_id == 0) & (off_train.Date_received >= time_range_date_received[0]) & (
off_train.Date_received <= time_range_date_received[1]))]
# 交叉训练集一特征online:线上数据中领券和用券日期大于1月1日和小于4月13日[on_train.date == 'null' to on_train.coupon_id == 0]
feature1_on = on_train[(on_train.Date >= time_range_date[0]) & (on_train.Date <= time_range_date[1]) | (
(on_train.Coupon_id == 0) & (on_train.Date_received >= time_range_date_received[0]) & (
on_train.Date_received <= time_range_date_received[1]))]
# 交叉训练集二:收到券的日期大于5月15日和小于6月15日
time_range = ['2016-05-16', '2016-06-15']
dataset2 = off_train[(off_train.Date_received >= time_range[0]) & (off_train.Date_received <= time_range[1])]
dataset2['label'] = 0
dataset2.loc[
(dataset2.Date != date_null) & (dataset2.Date - dataset2.Date_received <= datetime.timedelta(15)), 'label'] = 1
# 交叉训练集二特征offline:线下数据中领券和用券日期大于2月1日和小于5月14日
time_range_date_received = ['2016-02-01', '2016-04-30']
time_range_date = ['2016-02-01', '2016-05-15']
feature2_off = off_train[(off_train.Date >= time_range_date[0]) & (off_train.Date <= time_range_date[1]) | (
(off_train.Coupon_id == 0) & (off_train.Date_received >= time_range_date_received[0]) & (
off_train.Date_received <= time_range_date_received[1]))]
# 交叉训练集二特征online:线上数据中领券和用券日期大于2月1日和小于5月14日
feature2_on = on_train[(on_train.Date >= time_range_date[0]) & (on_train.Date <= time_range_date[1]) | (
(on_train.Coupon_id == 0) & (on_train.Date_received >= time_range_date_received[0]) & (
on_train.Date_received <= time_range_date_received[1]))]
# 测试集
dataset3 = off_test
# 测试集特征offline :线下数据中领券和用券日期大于3月15日和小于6月30日的
time_range = ['2016-03-16', '2016-06-30']
feature3_off = off_train[((off_train.Date >= time_range[0]) & (off_train.Date <= time_range[1])) | (
(off_train.Coupon_id == 0) & (off_train.Date_received >= time_range[0]) & (
off_train.Date_received <= time_range[1]))]
# 测试集特征online :线上数据中领券和用券日期大于3月15日和小于6月30日的
feature3_on = on_train[((on_train.Date >= time_range[0]) & (on_train.Date <= time_range[1])) | (
(on_train.Coupon_id == 0) & (on_train.Date_received >= time_range[0]) & (
on_train.Date_received <= time_range[1]))]
# get train feature
ProcessDataSet1 = get_features(dataset1, feature1_off, feature1_on)
ProcessDataSet2 = get_features(dataset2, feature2_off, feature2_on)
ProcessDataSet3 = get_features(dataset3, feature3_off, feature3_on)
return ProcessDataSet1, ProcessDataSet2, ProcessDataSet3
def get_features(dataset, feature_off, feature_on):
dataset = get_offline_features(dataset, feature_off)
return get_online_features(feature_on, dataset)
# In[] 定义获取feature的函数
def get_offline_features(X, offline):
# X = X[:1000]
print(len(X), len(X.columns))
temp = offline[offline.Coupon_id != 0]
coupon_consume = temp[temp.Date != date_null]
coupon_no_consume = temp[temp.Date == date_null]
user_coupon_consume = coupon_consume.groupby('User_id')
X['weekday'] = X.Date_received.dt.weekday
X['day'] = X.Date_received.dt.day
# # 距离优惠券消费次数
# temp = coupon_consume.groupby('Distance').size().reset_index(name='distance_0')
# X = pd.merge(X, temp, how='left', on='Distance')
#
# # 距离优惠券不消费次数
# temp = coupon_no_consume.groupby('Distance').size().reset_index(name='distance_1')
# X = pd.merge(X, temp, how='left', on='Distance')
#
# # 距离优惠券领取次数
# X['distance_2'] = X.distance_0 + X.distance_1
#
# # 距离优惠券消费率
# X['distance_3'] = X.distance_0 / X.distance_2
# temp = coupon_consume[coupon_consume.Distance != 11].groupby('Distance').size()
# temp['d4'] = temp.Distance.sum() / len(temp)
# X = pd.merge(X, temp, how='left', on='Distance')
'''user features'''
# 优惠券消费次数
temp = user_coupon_consume.size().reset_index(name='u2')
X = pd.merge(X, temp, how='left', on='User_id')
# X.u2.fillna(0, inplace=True)
# X.u2 = X.u2.astype(int)
# 优惠券不消费次数
temp = coupon_no_consume.groupby('User_id').size().reset_index(name='u3')
X = pd.merge(X, temp, how='left', on='User_id')
# 使用优惠券次数与没使用优惠券次数比值
X['u19'] = X.u2 / X.u3
# 领取优惠券次数
X['u1'] = X.u2.fillna(0) + X.u3.fillna(0)
# 优惠券核销率
X['u4'] = X.u2 / X.u1
# 普通消费次数
temp = offline[(offline.Coupon_id == 0) & (offline.Date != date_null)]
temp1 = temp.groupby('User_id').size().reset_index(name='u5')
X = pd.merge(X, temp1, how='left', on='User_id')
# 一共消费多少次
X['u25'] = X.u2 + X.u5
# 用户使用优惠券消费占比
X['u20'] = X.u2 / X.u25
# 正常消费平均间隔
temp = pd.merge(temp, temp.groupby('User_id').Date.max().reset_index(name='max'))
temp = pd.merge(temp, temp.groupby('User_id').Date.min().reset_index(name='min'))
temp = pd.merge(temp, temp.groupby('User_id').size().reset_index(name='len'))
temp['u6'] = ((temp['max'] - temp['min']).dt.days / (temp['len'] - 1))
temp = temp.drop_duplicates('User_id')
X = pd.merge(X, temp[['User_id', 'u6']], how='left', on='User_id')
# 优惠券消费平均间隔
temp = pd.merge(coupon_consume, user_coupon_consume.Date.max().reset_index(name='max'))
temp = pd.merge(temp, temp.groupby('User_id').Date.min().reset_index(name='min'))
temp = pd.merge(temp, temp.groupby('User_id').size().reset_index(name='len'))
temp['u7'] = ((temp['max'] - temp['min']).dt.days / (temp['len'] - 1))
temp = temp.drop_duplicates('User_id')
X = pd.merge(X, temp[['User_id', 'u7']], how='left', on='User_id')
# 15天内平均会普通消费几次
X['u8'] = X.u6 / 15
# 15天内平均会优惠券消费几次
X['u9'] = X.u7 / 15
# 领取优惠券到使用优惠券的平均间隔时间
temp = coupon_consume.copy()
temp['days'] = (temp.Date - temp.Date_received).dt.days
temp = (temp.groupby('User_id').days.sum() / temp.groupby('User_id').size()).reset_index(name='u10')
X = pd.merge(X, temp, how='left', on='User_id')
# 在15天内使用掉优惠券的值大小
X['u11'] = X.u10 / 15
# 领取优惠券到使用优惠券间隔小于15天的次数
temp = coupon_consume.copy()
temp['days'] = (temp.Date - temp.Date_received).dt.days
temp = temp[temp.days <= 15]
temp = temp.groupby('User_id').size().reset_index(name='u21')
X = pd.merge(X, temp, how='left', on='User_id')
# 用户15天使用掉优惠券的次数除以使用优惠券的次数
X['u22'] = X.u21 / X.u2
# 用户15天使用掉优惠券的次数除以领取优惠券未消费的次数
X['u23'] = X.u21 / X.u3
# 用户15天使用掉优惠券的次数除以领取优惠券的总次数
X['u24'] = X.u21 / X.u1
# 消费优惠券的平均折率
temp = user_coupon_consume.discount_rate.mean().reset_index(name='u45')
X = pd.merge(X, temp, how='left', on='User_id')
# 用户核销优惠券的最低消费折率
temp = user_coupon_consume.discount_rate.min().reset_index(name='u27')
X = pd.merge(X, temp, how='left', on='User_id')
# 用户核销优惠券的最高消费折率
temp = user_coupon_consume.discount_rate.max().reset_index(name='u28')
X = pd.merge(X, temp, how='left', on='User_id')
# 用户核销过的不同优惠券数量
temp = coupon_consume.groupby(['User_id', 'Coupon_id']).size()
temp = temp.groupby('User_id').size().reset_index(name='u32')
X = pd.merge(X, temp, how='left', on='User_id')
# 用户领取所有不同优惠券数量
temp = offline[offline.Date_received != date_null]
temp = temp.groupby(['User_id', 'Coupon_id']).size().reset_index(name='u47')
X = pd.merge(X, temp, how='left', on=['User_id', 'Coupon_id'])
# 用户核销过的不同优惠券数量占所有不同优惠券的比重
X['u33'] = X.u32 / X.u47
# 用户平均每种优惠券核销多少张
X['u34'] = X.u2 / X.u47
# 核销优惠券用户-商家平均距离
temp = offline[(offline.Coupon_id != 0) & (offline.Date != date_null) & (offline.Distance != 11)]
temp = temp.groupby('User_id').Distance
temp = pd.merge(temp.count().reset_index(name='x'), temp.sum().reset_index(name='y'), on='User_id')
temp['u35'] = temp.y / temp.x
temp = temp[['User_id', 'u35']]
X = pd.merge(X, temp, how='left', on='User_id')
# 用户核销优惠券中的最小用户-商家距离
temp = coupon_consume[coupon_consume.Distance != 11]
temp = temp.groupby('User_id').Distance.min().reset_index(name='u36')
X = | pd.merge(X, temp, how='left', on='User_id') | pandas.merge |
import pandas as pd
from rake_nltk import Rake
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import CountVectorizer
pd.set_option('display.max_columns', 100)
df = pd.read_csv('movie_metadata.csv')
print(df.head())
print(df.shape)
list(df.columns.values)
df = df[['director_name', 'actor_1_name', 'actor_2_name', 'actor_3_name', 'plot_keywords', 'genres', 'movie_title']]
if not df['actor_1_name'].empty or not df['actor_2_name'].empty or not df['actor_3_name'].empty:
df['actors'] = df['actor_1_name'] + "," + df['actor_2_name'] + "," + df['actor_3_name']
df = df[['director_name', 'plot_keywords', 'genres', 'movie_title', 'actors']]
df.dropna()
print(df.head())
df1 = df.where(( | pd.notnull(df) | pandas.notnull |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 2 14:24:25 2017
@author: ajaver
"""
from tierpsy.features.tierpsy_features.helper import get_n_worms_estimate, \
get_delta_in_frames, add_derivatives
from tierpsy.features.tierpsy_features.events import get_event_stats, event_region_labels, event_columns
from tierpsy.features.tierpsy_features.path import get_path_extent_stats
from tierpsy.features.tierpsy_features.features import timeseries_feats_columns, \
ventral_signed_columns, path_curvature_columns, curvature_columns
import pandas as pd
import numpy as np
index_colums = ['worm_index', 'timestamp']
blob_feats_columns = ['blob_area',
'blob_perimeter',
'blob_box_length',
'blob_box_width',
'blob_quirkiness',
'blob_compactness',
'blob_solidity',
'blob_hu0',
'blob_hu1',
'blob_hu2',
'blob_hu3',
'blob_hu4',
'blob_hu5',
'blob_hu6'
]
#get the ratios to be normalized
feats2normalize = {
'L' : [
'head_tail_distance',
'major_axis',
'minor_axis',
'dist_from_food_edge',
'length',
'width_head_base',
'width_midbody',
'width_tail_base'
],
'1/L' : path_curvature_columns + curvature_columns,
'L^2' : ['area']
}
feats2normalize['L'] += [x for x in timeseries_feats_columns if 'radial_velocity' in x]
feats2normalize['L'] += [x for x in timeseries_feats_columns if 'speed' in x]
#add derivatives and make sure there are not duplicates
for k,dat in feats2normalize.items():
dfeats = ['d_' + x for x in dat if not x.startswith('d_')]
feats2normalize[k] = list(set(dat) ^ set(dfeats))
def _normalize_by_w_length(timeseries_data, feats2norm):
'''
Normalize features by body length. This is far from being the most efficient solution, but it is the easier to implement.
'''
def _get_conversion_vec(units_t, median_length_vec):
'''helper function to find how to make the conversion'''
if units_t == 'L':
conversion_vec = 1/median_length_vec
elif units_t == '1/L':
conversion_vec = median_length_vec
elif units_t == 'L^2':
conversion_vec = 1/median_length_vec**2
return conversion_vec
timeseries_data = timeseries_data.copy()
median_length = timeseries_data.groupby('worm_index').agg({'length':'median'})
median_length_vec = timeseries_data['worm_index'].map(median_length['length'])
changed_feats_l = []
for units_t, feats in feats2norm.items():
feats_f = [x for x in timeseries_data if any(x.startswith(f) for f in feats)]
conversion_vec = _get_conversion_vec(units_t, median_length_vec)
for f in feats_f:
timeseries_data[f] *= conversion_vec
changed_feats_l += feats_f
changed_feats = {x: x + '_norm' for x in changed_feats_l}
timeseries_data = timeseries_data.rename(columns = changed_feats)
return timeseries_data, changed_feats
def get_df_quantiles(df,
feats2check = timeseries_feats_columns,
subdivision_dict = {'food_region':['orientation_food_edge']},
feats2norm = feats2normalize,
feats2abs = ventral_signed_columns,
is_remove_subdivided = True,
is_abs_ventral = True,
is_normalize = False
):
'''
Get quantile statistics for all the features given by `feats2check`.
In the features in `feats2abs` we are going to use only the absolute. This is to
deal with worms with unknown dorsal/ventral orientation.
'''
if not feats2check:
return None
q_vals = (0.1, 0.5, 0.9) #percentiles to calculate
iqr_limits = (0.25, 0.75) # range of percentiles used for the interquantile distance
valid_q = q_vals + iqr_limits
df = df.copy() #like this i can modify directoy the df without long lasting consequences
#filter features to be abs
def _filter_ventral_features(feats2check):#%%
valid_f = [x for x in feats2check if any(x.startswith(f) for f in feats2abs)]
return valid_f
#filter default columns in case they are not present
feats2check = [x for x in feats2check if x in df]
#filter default columns in case they are not present. Same for the subdivision dictionary.
subdivision_dict_r = {}
for e_subdivide, feats2subdivide in subdivision_dict.items():
ff = [x for x in feats2check if x in feats2subdivide]
if e_subdivide in df and ff:
subdivision_dict_r[e_subdivide] = ff
subdivision_dict = subdivision_dict_r
#subdivide a feature using the event features
subdivided_df = _get_subdivided_features(df, subdivision_dict = subdivision_dict)
df = df.join(subdivided_df)
feats2check += subdivided_df.columns.tolist()
if is_remove_subdivided:
df = df[[x for x in df if not x in feats2subdivide]]
feats2check = [x for x in feats2check if x not in feats2subdivide]
#add normalized features
if is_normalize:
df, changed_feats = _normalize_by_w_length(df, feats2norm = feats2norm)
feats2check = [x if not x in changed_feats else changed_feats[x] for x in feats2check]
#abs features that are ventral/dorsal side
if is_abs_ventral:
feats2abs = _filter_ventral_features(feats2check)
#find features that match ventral_signed_columns
if feats2abs:
#normalize
if df.size > 0:
df[feats2abs] = df[feats2abs].abs()
#change name
df.columns = [x + '_abs' if x in feats2abs else x for x in df.columns]
feats2check = [x + '_abs' if x in feats2abs else x for x in feats2check]
#calculate quantiles
feat_mean = None
Q = df[feats2check].quantile(valid_q)
feat_mean = pd.concat((feat_mean, Q), axis=1)
#name correctly
dat = []
for q in q_vals:
q_dat = feat_mean.loc[q]
q_str = '_{}th'.format(int(round(q*100)))
for feat, val in q_dat.iteritems():
dat.append((val, feat+q_str))
IQR = feat_mean.loc[0.75] - feat_mean.loc[0.25]
dat += [(val, feat + '_IQR') for feat, val in IQR.iteritems()]
feat_mean_s = pd.Series(*list(zip(*dat)))
return feat_mean_s
def _get_subdivided_features(timeseries_data, subdivision_dict):
'''
subdivision_dict = {event_v1: [feature_v1, feature_v2, ...], event_v2: [feature_vn ...], ...}
event_vector = [-1, -1, 0, 0, 1, 1]
feature_vector = [1, 3, 4, 5, 6, 6]
new_vectors ->
[1, 3, nan, nan, nan, nan]
[nan, nan, 4, 5, nan, nan]
[nan, nan, nan, nan, 6, 6]
'''
#assert all the subdivision keys are known events
assert all(x in event_region_labels.keys() for x in subdivision_dict)
event_type_link = {#%%
'food_region' : '_in_',
'motion_mode' : '_w_'
}
subdivided_data = []
for e_col, timeseries_cols in subdivision_dict.items():
e_data = timeseries_data[e_col].values
if e_col in event_type_link:
str_l = event_type_link[e_col]
else:
str_l = '_'
for flag, label in event_region_labels[e_col].items():
_flag = e_data != flag
for f_col in timeseries_cols:
f_data = timeseries_data[f_col].values.copy()
try:
f_data[_flag] = np.nan
except:
import pdb
pdb.set_trace()
new_name = f_col + str_l + label
subdivided_data.append((new_name, f_data))
if not subdivided_data:
#return empty df if nothing was subdivided
return pd.DataFrame([])
columns, data = zip(*subdivided_data)
subdivided_df = pd.DataFrame(np.array(data).T, columns = columns)
subdivided_df.index = timeseries_data.index
return subdivided_df
def process_blob_data(blob_features, derivate_delta_time, fps):
'''
Filter only the selected features and add derivatives
'''
assert not ((blob_features is None) and (derivate_delta_time is None))
assert all(x in blob_features for x in index_colums)
#add the blob prefix to the blob features if it is not present
filt_func = lambda x : (not x.startswith('blob_') and not (x in index_colums))
blob_features.columns = ['blob_' + x if filt_func(x) else x for x in blob_features.columns ]
#add blob derivatives
derivate_delta_frames = get_delta_in_frames(derivate_delta_time, fps)
blob_l = []
for w_ind, blob_w in blob_features.groupby('worm_index'):
blob_w = add_derivatives(blob_w, blob_feats_columns, derivate_delta_frames, fps)
blob_l.append(blob_w)
if blob_l:
blob_features = pd.concat(blob_l, axis=0)
#select only the valid columns
blob_feats_columns_d = blob_feats_columns + ['d_' + x for x in blob_feats_columns]
blob_cols = [x for x in blob_feats_columns_d if x in blob_features]
blob_features = blob_features[blob_cols]
else:
blob_features, blob_cols = pd.DataFrame([]), []
return blob_features, blob_cols
def check_if_event_features(selected_feat):
if selected_feat is None:
return True
for ft in selected_feat:
if np.any([x in ft for x in event_columns]):
return True
return False
def check_if_path_extent_features(selected_feat):
path_extent_cols = ['path_coverage', 'path_density', 'path_transit']
if selected_feat is None:
return True
for ft in selected_feat:
if np.any([x in ft for x in path_extent_cols]):
return True
return False
def check_if_blob_features(selected_feat):
if selected_feat is None:
return True
for ft in selected_feat:
if 'blob' in ft:
return True
return False
def select_timeseries(
timeseries_feats_columns, ventral_signed_columns, feats2normalize,
selected_feat):
if selected_feat is None:
ts_cols_all, v_sign_cols, feats2norm = \
timeseries_feats_columns, ventral_signed_columns, feats2normalize
else:
ts_cols_all = [ts for ts in timeseries_feats_columns
if np.any([ts in x for x in selected_feat])]
v_sign_cols = list(set(ventral_signed_columns) & set(ts_cols_all))
feats2norm = dict()
for key in feats2normalize.keys():
feats2norm[key] = list(set(feats2normalize[key]) & set(ts_cols_all))
ts_cols_norm = sum(feats2norm.values(), [])
return ts_cols_all, v_sign_cols, feats2norm, ts_cols_norm
def get_summary_stats(timeseries_data,
fps,
blob_features = None,
derivate_delta_time = None,
only_abs_ventral = False,
selected_feat = None
):
if timeseries_data.size == 0:
return pd.DataFrame([])
ts_cols_all, v_sign_cols, feats2norm, ts_cols_norm = select_timeseries(
timeseries_feats_columns, ventral_signed_columns, feats2normalize,
selected_feat)
#summarize everything
exp_feats = []
## event features
# EM: check if event features need to be calculated:
is_event_features = check_if_event_features(selected_feat)
if is_event_features:
n_worms_estimate = get_n_worms_estimate(timeseries_data['timestamp'])
event_stats_s = get_event_stats(timeseries_data, fps , n_worms_estimate)
else:
event_stats_s = | pd.Series() | pandas.Series |
#!/usr/bin/env python3
# Pancancer_Aberrant_Pathway_Activity_Analysis scripts/alternative_genes_pathwaymapper.py
import os
import sys
import pandas as pd
import argparse
import matplotlib.pyplot as plt
import seaborn as sns
import argparse
from sklearn.metrics import roc_auc_score, average_precision_score
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'papaa'))
from tcga_util import add_version_argument
def get_gene_auroc(x, w):
score = roc_auc_score(x, w, average='weighted')
return(score)
def get_gene_auprc(x, w):
score = average_precision_score(x, w, average='weighted')
return(score)
# argument passing
parser = argparse.ArgumentParser()
add_version_argument(parser)
parser.add_argument('-s', '--classifier_decisions',
help='string of the location of classifier decisions file with predictions/scores')
parser.add_argument('-g', '--genes', default= 'ERBB2,PIK3CA,KRAS,AKT1',
help='string of the genes to extract or genelist file')
parser.add_argument('-p', '--path_genes',
help='pathway gene list file')
parser.add_argument( '--filename_mut', default=None,
help='Filename of sample/gene mutations to use in model')
parser.add_argument( '--filename_sample', default=None,
help='Filename of patient/samples to use in model')
parser.add_argument('-c', '--copy_number', action='store_true',
help='optional flag to include copy number info in pathway map')
parser.add_argument( '--filename_copy_loss', default=None,
help='Filename of copy number loss')
parser.add_argument( '--filename_copy_gain', default=None,
help='Filename of copy number gain')
args = parser.parse_args()
scores = args.classifier_decisions
path_genes = args.path_genes
copy_number = args.copy_number
# if list of the genes provided by file or comma separated values:
try:
genes = args.genes
genes_df = | pd.read_table(genes) | pandas.read_table |
from unittest import TestCase
from nose_parameterized import parameterized
from collections import OrderedDict
import os
import gzip
from pandas import (
Series,
DataFrame,
date_range,
Timestamp,
read_csv
)
from pandas.util.testing import assert_frame_equal
from numpy import (
arange,
zeros_like,
nan,
)
import warnings
from pyfolio.utils import (to_utc, to_series, check_intraday,
detect_intraday, estimate_intraday)
from pyfolio.pos import (get_percent_alloc,
extract_pos,
get_sector_exposures,
get_max_median_position_concentration)
class PositionsTestCase(TestCase):
dates = date_range(start='2015-01-01', freq='D', periods=20)
def test_get_percent_alloc(self):
raw_data = arange(15, dtype=float).reshape(5, 3)
# Make the first column negative to test absolute magnitudes.
raw_data[:, 0] *= -1
frame = DataFrame(
raw_data,
index=date_range('01-01-2015', freq='D', periods=5),
columns=['A', 'B', 'C']
)
result = get_percent_alloc(frame)
expected_raw = zeros_like(raw_data)
for idx, row in enumerate(raw_data):
expected_raw[idx] = row / row.sum()
expected = DataFrame(
expected_raw,
index=frame.index,
columns=frame.columns,
)
assert_frame_equal(result, expected)
def test_extract_pos(self):
index_dup = [Timestamp('2015-06-08', tz='UTC'),
Timestamp('2015-06-08', tz='UTC'),
Timestamp('2015-06-09', tz='UTC'),
Timestamp('2015-06-09', tz='UTC')]
index = [ | Timestamp('2015-06-08', tz='UTC') | pandas.Timestamp |
# -*- coding: utf-8 -*-
"""EDA with Visualization.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1Anp_qii2EQ2tJDUBUSE4PNXOcLJpaS0v
# **SpaceX Falcon 9 First Stage Landing Prediction**
## Assignment: Exploring and Preparing Data
Estimated time needed: **70** minutes
In this assignment, we will predict if the Falcon 9 first stage will land successfully. SpaceX advertises Falcon 9 rocket launches on its website with a cost of 62 million dollars; other providers cost upward of 165 million dollars each, much of the savings is due to the fact that SpaceX can reuse the first stage.
In this lab, you will perform Exploratory Data Analysis and Feature Engineering.
Falcon 9 first stage will land successfully

Several examples of an unsuccessful landing are shown here:

Most unsuccessful landings are planned. Space X performs a controlled landing in the oceans.
## Objectives
Perform exploratory Data Analysis and Feature Engineering using `Pandas` and `Matplotlib`
* Exploratory Data Analysis
* Preparing Data Feature Engineering
***
### Import Libraries and Define Auxiliary Functions
We will import the following libraries the lab
"""
# andas is a software library written for the Python programming language for data manipulation and analysis.
import pandas as pd
#NumPy is a library for the Python programming language, adding support for large, multi-dimensional arrays and matrices, along with a large collection of high-level mathematical functions to operate on these arrays
import numpy as np
# Matplotlib is a plotting library for python and pyplot gives us a MatLab like plotting framework. We will use this in our plotter function to plot data.
import matplotlib.pyplot as plt
#Seaborn is a Python data visualization library based on matplotlib. It provides a high-level interface for drawing attractive and informative statistical graphics
import seaborn as sns
"""## Exploratory Data Analysis
First, let's read the SpaceX dataset into a Pandas dataframe and print its summary
"""
#df=pd.read_csv("https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBM-DS0321EN-SkillsNetwork/datasets/dataset_part_2.csv")
# If you were unable to complete the previous lab correctly you can uncomment and load this csv
df = pd.read_csv('https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DS0701EN-SkillsNetwork/api/dataset_part_2.csv')
df.head(5)
"""First, let's try to see how the `FlightNumber` (indicating the continuous launch attempts.) and `Payload` variables would affect the launch outcome.
We can plot out the <code>FlightNumber</code> vs. <code>PayloadMass</code>and overlay the outcome of the launch. We see that as the flight number increases, the first stage is more likely to land successfully. The payload mass is also important; it seems the more massive the payload, the less likely the first stage will return.
"""
sns.catplot(y="PayloadMass", x="FlightNumber", hue="Class", data=df, aspect = 5)
plt.xlabel("Flight Number",fontsize=20)
plt.ylabel("Pay load Mass (kg)",fontsize=20)
plt.show()
"""We see that different launch sites have different success rates. <code>CCAFS LC-40</code>, has a success rate of 60 %, while <code>KSC LC-39A</code> and <code>VAFB SLC 4E</code> has a success rate of 77%.
Next, let's drill down to each site visualize its detailed launch records.
### TASK 1: Visualize the relationship between Flight Number and Launch Site
Use the function <code>catplot</code> to plot <code>FlightNumber</code> vs <code>LaunchSite</code>, set the parameter <code>x</code> parameter to <code>FlightNumber</code>,set the <code>y</code> to <code>Launch Site</code> and set the parameter <code>hue</code> to <code>'class'</code>
"""
# Plot a scatter point chart with x axis to be Flight Number and y axis to be the launch site, and hue to be the class value
sns.catplot(y="LaunchSite", x="FlightNumber", hue="Class", data=df, aspect = 5)
plt.xlabel("Flight Number",fontsize=20)
plt.ylabel("Launch Site",fontsize=20)
plt.show()
"""Now try to explain the patterns you found in the Flight Number vs. Launch Site scatter point plots.
### TASK 2: Visualize the relationship between Payload and Launch Site
We also want to observe if there is any relationship between launch sites and their payload mass.
"""
# Plot a scatter point chart with x axis to be Pay Load Mass (kg) and y axis to be the launch site, and hue to be the class value
sns.catplot(y="PayloadMass", x="LaunchSite", hue="Class", data=df, aspect = 5)
plt.xlabel("Launch Site",fontsize=20)
plt.ylabel("Pay load Mass (kg)",fontsize=20)
plt.show()
"""Now try to explain any patterns you found in the Payload Vs. Launch Site scatter point chart.
### TASK 3: Visualize the relationship between success rate of each orbit type
Next, we want to visually check if there are any relationship between success rate and orbit type.
Let's create a `bar chart` for the sucess rate of each orbit
"""
# HINT use groupby method on Orbit column and get the mean of Class column
temp = df.groupby(["Orbit"]).mean().reset_index()
temp2 = temp[["Orbit", "Class"]]
temp2["Class"] = temp2["Class"]*100
sns.barplot(x = "Orbit", y = "Class", data = temp2)
"""Analyze the ploted bar chart try to find which orbits have high sucess rate.
### TASK 4: Visualize the relationship between FlightNumber and Orbit type
For each orbit, we want to see if there is any relationship between FlightNumber and Orbit type.
"""
# Plot a scatter point chart with x axis to be FlightNumber and y axis to be the Orbit, and hue to be the class value
sns.catplot(y="Orbit", x="FlightNumber", hue="Class", data=df, aspect = 5)
plt.xlabel("FlightNumber",fontsize=20)
plt.ylabel("Orbit",fontsize=20)
plt.show()
"""You should see that in the LEO orbit the Success appears related to the number of flights; on the other hand, there seems to be no relationship between flight number when in GTO orbit.
### TASK 5: Visualize the relationship between Payload and Orbit type
Similarly, we can plot the Payload vs. Orbit scatter point charts to reveal the relationship between Payload and Orbit type
"""
# Plot a scatter point chart with x axis to be Payload and y axis to be the Orbit, and hue to be the class value
sns.catplot(y="Orbit", x="PayloadMass", hue="Class", data=df, aspect = 5)
plt.xlabel("PayloadMass",fontsize=20)
plt.ylabel("Orbit",fontsize=20)
plt.show()
"""You should observe that Heavy payloads have a negative influence on GTO orbits and positive on GTO and Polar LEO (ISS) orbits.
### TASK 6: Visualize the launch success yearly trend
You can plot a line chart with x axis to be <code>Year</code> and y axis to be average success rate, to get the average launch success trend.
The function will help you get the year from the date:
"""
# A function to Extract years from the date
def Extract_year(year):
for i in df["Date"]:
year.append(i.split("-")[0])
return year
# Plot a line chart with x axis to be the extracted year and y axis to be the success rate
year = []
df["year"] = Extract_year(year)
df["Success Rate"] = df["Class"] * 100
sns.lineplot(data = df, x = "year", y = "Success Rate")
"""you can observe that the sucess rate since 2013 kept increasing till 2020
## Features Engineering
By now, you should obtain some preliminary insights about how each important variable would affect the success rate, we will select the features that will be used in success prediction in the future module.
"""
features = df[['FlightNumber', 'PayloadMass', 'Orbit', 'LaunchSite', 'Flights', 'GridFins', 'Reused', 'Legs', 'LandingPad', 'Block', 'ReusedCount', 'Serial']]
features.head()
"""### TASK 7: Create dummy variables to categorical columns
Use the function <code>get_dummies</code> and <code>features</code> dataframe to apply OneHotEncoder to the column <code>Orbits</code>, <code>LaunchSite</code>, <code>LandingPad</code>, and <code>Serial</code>. Assign the value to the variable <code>features_one_hot</code>, display the results using the method head. Your result dataframe must include all features including the encoded ones.
"""
# HINT: Use get_dummies() function on the categorical columns
oh_orbit = pd.get_dummies(features["Orbit"])
oh_launch = pd.get_dummies(features["LaunchSite"])
oh_landing = | pd.get_dummies(features["LandingPad"]) | pandas.get_dummies |
#%%
import os
import sys
os.chdir(os.path.dirname(os.getcwd())) # make directory one step up the current directory
from pymaid_creds import url, name, password, token
import pymaid
rm = pymaid.CatmaidInstance(url, token, name, password)
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib as mpl
import connectome_tools.process_matrix as pm
import connectome_tools.process_graph as pg
import connectome_tools.cascade_analysis as casc
import connectome_tools.celltype as ct
import connectome_tools.cluster_analysis as clust
# allows text to be editable in Illustrator
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
plt.rcParams['font.size'] = 5
plt.rcParams['font.family'] = 'arial'
adj = pm.Promat.pull_adj('ad', subgraph='brain and accessory')
edges = pd.read_csv('data/edges_threshold/pairwise-threshold_ad_all-edges.csv', index_col=0)
pairs = pm.Promat.get_pairs()
dVNCs = pymaid.get_skids_by_annotation('mw dVNC')
dVNC_pairs = pm.Promat.load_pairs_from_annotation('mw dVNC', pairs, return_type='all_pair_ids_bothsides', skids=dVNCs, use_skids=True)
# %%
# dVNC projectome data prep
import cmasher as cmr
projectome = pd.read_csv('data/projectome/projectome_adjacency.csv', index_col = 0, header = 0)
projectome.index = [str(x) for x in projectome.index]
# identify meshes
meshes = ['Brain Hemisphere left', 'Brain Hemisphere right', 'SEZ_left', 'SEZ_right', 'T1_left', 'T1_right', 'T2_left', 'T2_right', 'T3_left', 'T3_right', 'A1_left', 'A1_right', 'A2_left', 'A2_right', 'A3_left', 'A3_right', 'A4_left', 'A4_right', 'A5_left', 'A5_right', 'A6_left', 'A6_right', 'A7_left', 'A7_right', 'A8_left', 'A8_right']
pairOrder_dVNC = [x for sublist in zip(dVNC_pairs.leftid, dVNC_pairs.rightid) for x in sublist]
input_projectome = projectome.loc[meshes, [str(x) for x in pairOrder_dVNC]]
output_projectome = projectome.loc[[str(x) for x in pairOrder_dVNC], meshes]
dVNC_projectome_pairs_summed_output = []
indices = []
for i in np.arange(0, len(output_projectome.index), 2):
combined_pairs = (output_projectome.iloc[i, :] + output_projectome.iloc[i+1, :])
combined_hemisegs = []
for j in np.arange(0, len(combined_pairs), 2):
combined_hemisegs.append((combined_pairs[j] + combined_pairs[j+1]))
dVNC_projectome_pairs_summed_output.append(combined_hemisegs)
indices.append(output_projectome.index[i])
dVNC_projectome_pairs_summed_output = pd.DataFrame(dVNC_projectome_pairs_summed_output, index = indices, columns = ['brain','SEZ', 'T1', 'T2', 'T3', 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8'])
#dVNC_projectome_pairs_summed_output = dVNC_projectome_pairs_summed_output.iloc[:, 1:len(dVNC_projectome_pairs_summed_output)]
#normalize # of presynaptic sites
dVNC_projectome_pairs_summed_output_norm = dVNC_projectome_pairs_summed_output.copy()
for i in range(len(dVNC_projectome_pairs_summed_output)):
sum_row = sum(dVNC_projectome_pairs_summed_output_norm.iloc[i, :])
for j in range(len(dVNC_projectome_pairs_summed_output.columns)):
dVNC_projectome_pairs_summed_output_norm.iloc[i, j] = dVNC_projectome_pairs_summed_output_norm.iloc[i, j]/sum_row
# remove brain from columns
dVNC_projectome_pairs_summed_output_norm_no_brain = dVNC_projectome_pairs_summed_output_norm.iloc[:, 1:len(dVNC_projectome_pairs_summed_output)]
dVNC_projectome_pairs_summed_output_no_brain = dVNC_projectome_pairs_summed_output.iloc[:, 1:len(dVNC_projectome_pairs_summed_output)]
# %%
# ordering and plotting
# sorting with normalized data
sort_threshold = 0
dVNC_projectome_pairs_summed_output_sort_norm = dVNC_projectome_pairs_summed_output_norm_no_brain.copy()
dVNC_projectome_pairs_summed_output_sort_norm[dVNC_projectome_pairs_summed_output_sort_norm<sort_threshold]=0
order = ['SEZ', 'T1', 'T2', 'T3', 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8']
order.reverse()
dVNC_projectome_pairs_summed_output_sort_norm.sort_values(by=order, ascending=False, inplace=True)
sort = dVNC_projectome_pairs_summed_output_sort_norm.index
cmap = plt.cm.get_cmap('Blues') # modify 'Blues' cmap to have a white background
blue_cmap = cmap(np.linspace(0, 1, 20))
blue_cmap[0] = np.array([1, 1, 1, 1])
blue_cmap = mpl.colors.LinearSegmentedColormap.from_list(name='New_Blues', colors=blue_cmap)
cmap = blue_cmap
fig, ax = plt.subplots(1,1,figsize=(2,2))
sns.heatmap(dVNC_projectome_pairs_summed_output_norm.loc[sort, :], ax=ax, cmap=cmap)
plt.savefig(f'VNC_interaction/plots/projectome/A8-T1_sort_projectome_normalized_sortThres{sort_threshold}.pdf', bbox_inches='tight')
# sorting with raw data
sort_threshold = 0
dVNC_projectome_pairs_summed_output_sort = dVNC_projectome_pairs_summed_output_no_brain.copy()
dVNC_projectome_pairs_summed_output_sort[dVNC_projectome_pairs_summed_output_sort<sort_threshold]=0
order = ['SEZ', 'T1', 'T2', 'T3', 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8']
order.reverse()
dVNC_projectome_pairs_summed_output_sort.sort_values(by=order, ascending=False, inplace=True)
sort = dVNC_projectome_pairs_summed_output_sort.index
vmax = 70
cmap = blue_cmap
fig, ax = plt.subplots(1,1,figsize=(2,2))
sns.heatmap(dVNC_projectome_pairs_summed_output.loc[sort, :], ax=ax, cmap=cmap, vmax=vmax)
plt.savefig(f'VNC_interaction/plots/projectome/A8-T1_sort_projectome_sortThres{sort_threshold}.pdf', bbox_inches='tight')
# %%
# old prototype code; lots of conflicting ordering sections added for testing purposes
'''
# order based on clustering raw data
cluster = sns.clustermap(dVNC_projectome_pairs_summed_output, col_cluster = False, figsize=(6,4))
row_order = cluster.dendrogram_row.reordered_ind
#fig, ax = plt.subplots(figsize=(6,4))
#sns.heatmap(dVNC_projectome_pairs_summed_output.iloc[row_order, :], rasterized=True, ax=ax)
plt.savefig(f'VNC_interaction/plots/projectome/clustered_projectome_raw.pdf', bbox_inches='tight')
# order based on clustering normalized data
cluster = sns.clustermap(dVNC_projectome_pairs_summed_output_norm, col_cluster = False, figsize=(6,4), rasterized=True)
row_order = cluster.dendrogram_row.reordered_ind
#fig, ax = plt.subplots(figsize=(6,4))
#sns.heatmap(dVNC_projectome_pairs_summed_output_norm.iloc[row_order, :], rasterized=True, ax=ax)
plt.savefig(f'VNC_interaction/plots/projectome/clustered_projectome_normalized.pdf', bbox_inches='tight')
# order based on counts per column
for i in range(1, 51):
dVNC_projectome_pairs_summed_output_sort = dVNC_projectome_pairs_summed_output_norm.copy()
dVNC_projectome_pairs_summed_output_sort[dVNC_projectome_pairs_summed_output_sort<(i/100)]=0
dVNC_projectome_pairs_summed_output_sort.sort_values(by=['T1', 'T2', 'T3', 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8'], ascending=False, inplace=True)
row_order = dVNC_projectome_pairs_summed_output_sort[dVNC_projectome_pairs_summed_output_sort.sum(axis=1)>0].index
second_sort = dVNC_projectome_pairs_summed_output_norm[dVNC_projectome_pairs_summed_output_sort.sum(axis=1)==0]
second_sort[second_sort<.1]=0
second_sort.sort_values(by=[i for i in reversed(['T1', 'T2', 'T3', 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8'])], ascending=False, inplace=True)
row_order = list(row_order) + list(second_sort.index)
fig, ax = plt.subplots(figsize=(6,4))
sns.heatmap(dVNC_projectome_pairs_summed_output_norm.loc[row_order, :], ax=ax, rasterized=True)
plt.savefig(f'VNC_interaction/plots/projectome/projectome_0.{i}-sort-threshold.pdf', bbox_inches='tight')
for i in range(1, 51):
dVNC_projectome_pairs_summed_output_sort = dVNC_projectome_pairs_summed_output.copy()
dVNC_projectome_pairs_summed_output_sort[dVNC_projectome_pairs_summed_output_sort<(i)]=0
dVNC_projectome_pairs_summed_output_sort.sort_values(by=['T1', 'T2', 'T3', 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8'], ascending=False, inplace=True)
row_order = dVNC_projectome_pairs_summed_output_sort.index
second_sort = dVNC_projectome_pairs_summed_output[dVNC_projectome_pairs_summed_output_sort.sum(axis=1)==0]
second_sort[second_sort<10]=0
second_sort.sort_values(by=['T1', 'T2', 'T3', 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8'], ascending=False, inplace=True)
row_order = list(row_order) + list(second_sort.index)
fig, ax = plt.subplots(figsize=(6,4))
sns.heatmap(dVNC_projectome_pairs_summed_output.loc[row_order, :], ax=ax, rasterized=True)
plt.savefig(f'VNC_interaction/plots/projectome/projectome_{i}-sort-threshold.pdf', bbox_inches='tight')
fig, ax = plt.subplots(figsize=(3,2))
sns.heatmap(dVNC_projectome_pairs_summed_output.iloc[row_order, :], ax=ax)
plt.savefig('VNC_interaction/plots/projectome/output_projectome_cluster.pdf', bbox_inches='tight', transparent = True)
# order input projectome in the same way
dVNC_projectome_pairs_summed_input = []
indices = []
for i in np.arange(0, len(input_projectome.columns), 2):
combined_pairs = (input_projectome.iloc[:, i] + input_projectome.iloc[:, i+1])
combined_hemisegs = []
for j in np.arange(0, len(combined_pairs), 2):
combined_hemisegs.append((combined_pairs[j] + combined_pairs[j+1]))
dVNC_projectome_pairs_summed_input.append(combined_hemisegs)
indices.append(input_projectome.columns[i])
dVNC_projectome_pairs_summed_input = pd.DataFrame(dVNC_projectome_pairs_summed_input, index = indices, columns = ['SEZ', 'T1', 'T2', 'T3', 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8'])
dVNC_projectome_pairs_summed_input = dVNC_projectome_pairs_summed_input.iloc[:, 1:len(dVNC_projectome_pairs_summed_input)]
#cluster = sns.clustermap(dVNC_projectome_pairs_summed_input, col_cluster = False, cmap = cmr.freeze, figsize=(10,10))
fig, ax = plt.subplots(figsize=(3,2))
sns.heatmap(dVNC_projectome_pairs_summed_input.iloc[row_order, :], cmap=cmr.freeze, ax=ax)
plt.savefig('VNC_interaction/plots/projectome/input_projectome_cluster.pdf', bbox_inches='tight', transparent = True)
'''
# %%
# paths 2-hop upstream of each dVNC
from tqdm import tqdm
# sort dVNC pairs
sort = [int(x) for x in sort]
dVNC_pairs.set_index('leftid', drop=False, inplace=True)
dVNC_pairs = dVNC_pairs.loc[sort, :]
dVNC_pairs.reset_index(inplace=True, drop=True)
hops = 2
threshold = 0.01
dVNC_pair_paths_us = [pm.Promat.upstream_multihop(edges=edges, sources=dVNC_pairs.loc[i].to_list(), hops=hops) for i in tqdm(range(0, len(dVNC_pairs)))]
dVNC_pair_paths_ds = [pm.Promat.downstream_multihop(edges=edges, sources=dVNC_pairs.loc[i].to_list(), hops=hops) for i in tqdm(range(0, len(dVNC_pairs)))]
# %%
# plotting individual dVNC paths
_, celltypes = ct.Celltype_Analyzer.default_celltypes()
skids_list = [list(adj.index)] + [x.get_skids() for x in celltypes]
# UPSTREAM
all_layers_us = [ct.Celltype_Analyzer.layer_id(dVNC_pair_paths_us, dVNC_pairs.leftid, skids_type)[0] for skids_type in skids_list]
layer_names = ['Total'] + [x.get_name() for x in celltypes]
threshold = 0.01
layer_colors = ['Greens', 'Greens', 'Blues', 'Greens', 'Oranges', 'Reds', 'Greens', 'Blues', 'Purples', 'Blues', 'Reds', 'Purples', 'Reds', 'Purples', 'Reds', 'Purples', 'Reds']
layer_vmax = [200, 50, 50, 50, 50, 50, 50, 50, 50, 50, 100, 50, 50, 50, 50, 50, 50]
save_path = 'VNC_interaction/plots/dVNC_partners/Upstream_'
ct.Celltype_Analyzer.plot_layer_types(layer_types=all_layers_us, layer_names=layer_names, layer_colors=layer_colors,
layer_vmax=layer_vmax, pair_ids=dVNC_pairs.leftid, figsize=(.5*hops/3, 1.5), save_path=save_path, threshold=threshold, hops=hops)
# DOWNSTREAM
all_layers_ds = [ct.Celltype_Analyzer.layer_id(dVNC_pair_paths_ds, dVNC_pairs.leftid, skids_type)[0] for skids_type in skids_list]
layer_names = ['Total'] + [x.get_name() for x in celltypes]
threshold = 0.01
layer_colors = ['Greens', 'Greens', 'Blues', 'Greens', 'Oranges', 'Reds', 'Greens', 'Blues', 'Purples', 'Blues', 'Reds', 'Purples', 'Reds', 'Purples', 'Reds', 'Purples', 'Reds']
layer_vmax = [200, 50, 50, 50, 50, 50, 50, 50, 50, 50, 100, 50, 50, 50, 50, 50, 50]
save_path = 'VNC_interaction/plots/dVNC_partners/Downstream-in-brain_'
ct.Celltype_Analyzer.plot_layer_types(layer_types=all_layers_ds, layer_names=layer_names, layer_colors=layer_colors,
layer_vmax=layer_vmax, pair_ids=dVNC_pairs.leftid, figsize=(.5*hops/3, 1.5), save_path=save_path, threshold=threshold, hops=hops)
# %%
# make bar plots for 1-hop and 2-hop
_, celltypes = ct.Celltype_Analyzer.default_celltypes()
figsize = (2,0.5)
# UPSTREAM
us_1order = ct.Celltype_Analyzer([ct.Celltype(str(dVNC_pairs.loc[i].leftid) + '_us_1o', x[0]) for i, x in enumerate(dVNC_pair_paths_us)])
us_2order = ct.Celltype_Analyzer([ct.Celltype(str(dVNC_pairs.loc[i].leftid) + '_us_2o', x[1]) for i, x in enumerate(dVNC_pair_paths_us)])
us_1order.set_known_types(celltypes)
us_2order.set_known_types(celltypes)
path = 'VNC_interaction/plots/dVNC_partners/summary_plot_1st_order_upstream.pdf'
us_1order.plot_memberships(path = path, figsize=figsize)
path = 'VNC_interaction/plots/dVNC_partners/summary_plot_2nd_order_upstream.pdf'
us_2order.plot_memberships(path = path, figsize=figsize)
# DOWNSTREAM
ds_1order = ct.Celltype_Analyzer([ct.Celltype(str(dVNC_pairs.loc[i].leftid) + '_ds_1o', x[0]) for i, x in enumerate(dVNC_pair_paths_ds)])
ds_2order = ct.Celltype_Analyzer([ct.Celltype(str(dVNC_pairs.loc[i].leftid) + '_ds_2o', x[1]) for i, x in enumerate(dVNC_pair_paths_ds)])
ds_1order.set_known_types(celltypes)
ds_2order.set_known_types(celltypes)
path = 'VNC_interaction/plots/dVNC_partners/summary_plot_1st_order_downstream.pdf'
ds_1order.plot_memberships(path = path, figsize=figsize)
path = 'VNC_interaction/plots/dVNC_partners/summary_plot_2nd_order_downstream.pdf'
ds_2order.plot_memberships(path = path, figsize=figsize)
# %%
# combine all data types for dVNCs: us1o, us2o, ds1o, ds2o, projectome
fraction_cell_types_1o_us = pd.DataFrame([x.iloc[:, 0] for x in fraction_types], index = fraction_types_names).T
fraction_cell_types_1o_us.columns = [f'1o_us_{x}' for x in fraction_cell_types_1o_us.columns]
unk_col = 1-fraction_cell_types_1o_us.sum(axis=1)
unk_col[unk_col==11]=0
fraction_cell_types_1o_us['1o_us_unk']=unk_col
fraction_cell_types_2o_us = pd.DataFrame([x.iloc[:, 1] for x in fraction_types], index = fraction_types_names).T
fraction_cell_types_2o_us.columns = [f'2o_us_{x}' for x in fraction_cell_types_2o_us.columns]
unk_col = 1-fraction_cell_types_2o_us.sum(axis=1)
unk_col[unk_col==11]=0
fraction_cell_types_2o_us['2o_us_unk']=unk_col
fraction_cell_types_1o_ds = pd.DataFrame([x.iloc[:, 0] for x in fraction_types_ds], index = fraction_types_names).T
fraction_cell_types_1o_ds.columns = [f'1o_ds_{x}' for x in fraction_cell_types_1o_ds.columns]
unk_col = 1-fraction_cell_types_1o_ds.sum(axis=1)
unk_col[unk_col==11]=0
fraction_cell_types_1o_ds['1o_ds_unk']=unk_col
fraction_cell_types_1o_ds[fraction_cell_types_1o_ds==-1]=0
fraction_cell_types_2o_ds = pd.DataFrame([x.iloc[:, 1] for x in fraction_types_ds], index = fraction_types_names).T
fraction_cell_types_2o_ds.columns = [f'2o_ds_{x}' for x in fraction_cell_types_2o_ds.columns]
unk_col = 1-fraction_cell_types_2o_ds.sum(axis=1)
unk_col[unk_col==11]=0
fraction_cell_types_2o_ds['2o_ds_unk']=unk_col
fraction_cell_types_2o_ds[fraction_cell_types_2o_ds==-1]=0
all_data = dVNC_projectome_pairs_summed_output_norm.copy()
all_data.index = [int(x) for x in all_data.index]
all_data = pd.concat([fraction_cell_types_1o_us, fraction_cell_types_2o_us, all_data, fraction_cell_types_1o_ds, fraction_cell_types_2o_ds], axis=1)
all_data.fillna(0, inplace=True)
# clustered version of all_data combined
cluster = sns.clustermap(all_data, col_cluster = False, figsize=(30,30), rasterized=True)
plt.savefig(f'VNC_interaction/plots/projectome/clustered_projectome_all_data.pdf', bbox_inches='tight')
order = cluster.dendrogram_row.reordered_ind
fig,ax=plt.subplots(1,1,figsize=(6,4))
sns.heatmap(all_data.iloc[order, :].drop(list(fraction_cell_types_1o_us.columns) + list(fraction_cell_types_2o_us.columns) + list(fraction_cell_types_1o_ds.columns) + list(fraction_cell_types_2o_ds.columns), axis=1), ax=ax, rasterized=True)
plt.savefig(f'VNC_interaction/plots/projectome/clustered_projectome_all_data_same_size.pdf', bbox_inches='tight')
cluster = sns.clustermap(all_data.drop(['1o_us_pre-dVNC', '2o_us_pre-dVNC'], axis=1), col_cluster = False, figsize=(20,15), rasterized=True)
plt.savefig(f'VNC_interaction/plots/projectome/clustered_projectome_all_data_removed_us-pre-dVNCs.pdf', bbox_inches='tight')
# decreasing sort of all_data but with feedback and non-feedback dVNC clustered
for i in range(1, 50):
dVNCs_with_FB = all_data.loc[:, list(fraction_cell_types_1o_ds.columns) + list(fraction_cell_types_2o_ds.columns)].sum(axis=1)
dVNCs_FB_true_skids = dVNCs_with_FB[dVNCs_with_FB>0].index
dVNCs_FB_false_skids = dVNCs_with_FB[dVNCs_with_FB==0].index
dVNC_projectome_pairs_summed_output_sort = all_data.copy()
dVNC_projectome_pairs_summed_output_sort = dVNC_projectome_pairs_summed_output_sort.loc[:, ['T1', 'T2', 'T3', 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8']]
dVNC_projectome_pairs_summed_output_sort = dVNC_projectome_pairs_summed_output_sort.loc[dVNCs_FB_true_skids]
dVNC_projectome_pairs_summed_output_sort[dVNC_projectome_pairs_summed_output_sort<(i/100)]=0
dVNC_projectome_pairs_summed_output_sort.sort_values(by=['T1', 'T2', 'T3', 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8'], ascending=False, inplace=True)
row_order_FB_true = dVNC_projectome_pairs_summed_output_sort.index
second_sort = all_data.copy()
second_sort = second_sort.loc[:, ['T1', 'T2', 'T3', 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8']]
second_sort = second_sort.loc[dVNCs_FB_false_skids]
second_sort[second_sort<(i/100)]=0
second_sort.sort_values(by=['T1', 'T2', 'T3', 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8'], ascending=False, inplace=True)
row_order_FB_false = second_sort.index
row_order = list(row_order_FB_true) + list(row_order_FB_false)
fig, ax = plt.subplots(figsize=(20, 15))
sns.heatmap(all_data.loc[row_order, :], ax=ax, rasterized=True)
plt.savefig(f'VNC_interaction/plots/projectome/splitFB_projectome_0.{i}-sort-threshold.pdf', bbox_inches='tight')
fig, ax = plt.subplots(figsize=(6,4))
sns.heatmap(all_data.loc[row_order, ['T1', 'T2', 'T3', 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8']], ax=ax, rasterized=True)
plt.savefig(f'VNC_interaction/plots/projectome/splitFB_same-size_projectome_0.{i}-sort-threshold.pdf', bbox_inches='tight')
# %%
# what fraction of us and ds neurons are from different cell types per hop?
fraction_cell_types_1o_us = pd.DataFrame([x.iloc[:, 0] for x in fraction_types], index = fraction_types_names)
fraction_cell_types_1o_us = fraction_cell_types_1o_us.fillna(0) # one dVNC with no inputs
fraction_cell_types_2o_us = pd.DataFrame([x.iloc[:, 1] for x in fraction_types], index = fraction_types_names)
fraction_cell_types_2o_us = fraction_cell_types_2o_us.fillna(0) # one dVNC with no inputs
fraction_cell_types_1o_us_scatter = []
for j in range(1, len(fraction_cell_types_1o_us.columns)):
for i in range(0, len(fraction_cell_types_1o_us.index)):
fraction_cell_types_1o_us_scatter.append([fraction_cell_types_1o_us.iloc[i, j], fraction_cell_types_1o_us.index[i]])
fraction_cell_types_1o_us_scatter = | pd.DataFrame(fraction_cell_types_1o_us_scatter, columns = ['fraction', 'cell_type']) | pandas.DataFrame |
# coding: utf-8
# # PuLP testing
# In[32]:
import pulp
# Import PuLP modeler functions
from pulp import *
from funcs import store_namespace
from funcs import load_namespace
from funcs import emulate_jmod
import os
import datetime
import time
import pandas as pd
#from multiprocessing import Pool
from mpcpy import units
from mpcpy import variables
from mpcpy import models_mod as models
from Simulator_HP_mod3 import SimHandler
from pulp_funcs import *
# In[116]:
community = 'ResidentialCommunityUK_rad_2elements'
sim_id = 'MinEne'
model_id = 'R2CW_HP'
arx_model = 'ARX_lag_4_exog4'
bldg_list = load_namespace(os.path.join('path_to_folder', 'teaser_bldgs_residential'))
folder = 'path_to_folder'
bldg_index_start = 0
bldg_index_end = 30
# Overall options
date = '11/20/2017 '
start = date + '16:30:00'
end = date + '19:00:00'
meas_sampl = '300'
horizon = 3.0*3600.0/float(meas_sampl) #time horizon for optimization in multiples of the sample
mon = 'nov'
DRstart = datetime.datetime.strptime(date + '18:00:00', '%m/%d/%Y %H:%M:%S') # hour to start DR - ramp down 30 mins before
DRend = datetime.datetime.strptime(date + '18:30:00', '%m/%d/%Y %H:%M:%S') # hour to end DR - ramp 30 mins later
DR_call_start = datetime.datetime.strptime(date + '17:00:00', '%m/%d/%Y %H:%M:%S') # Round of loop to implement the call
DR_ramp_start = datetime.datetime.strptime(date + '17:30:00', '%m/%d/%Y %H:%M:%S')
DR_ramp_end = datetime.datetime.strptime(date + '18:00:00', '%m/%d/%Y %H:%M:%S') # Round of loop to stop implementing the call
# reduction of demand
compr_capacity_list=[float(4500.0)]*10+[float(3000.0)]*20
tot_cap = sum(compr_capacity_list)
print(compr_capacity_list)
print(tot_cap)
ramp_modifier = float(200.0) * len(bldg_list) # to further modify the load profile
max_modifier = float(200.0) * len(bldg_list)
print(ramp_modifier)
# Pricing
dyn_price = 0
stat_cost = 50
flex_cost = 100 # Utilisation cost for flexibility
rho = 500 # Cost of comfort violations
#incentive_dr = 150 # One-off Incentive paid for commitment
lag = 13 # Number of delay terms to take from measurements
power_lag = 3 # Lag for last control action
temp_lag1 = 4 # Continuous immediate lag of temps for optimisation
temp_lag2 = 13 # Lag from further away for temps
sim_range = pd.date_range(start, end, freq = meas_sampl+'S')
opt_start_str = start
opt_end = datetime.datetime.strptime(end, '%m/%d/%Y %H:%M:%S') + datetime.timedelta(seconds = horizon*int(meas_sampl))
opt_end_str = opt_end.strftime('%m/%d/%Y %H:%M:%S')
if mon == 'jan':
init_start = sim_range[0] - datetime.timedelta(seconds = 1.5*3600)
else:
init_start = sim_range[0] - datetime.timedelta(seconds = 4.5*3600)
init_start_str = init_start.strftime('%m/%d/%Y %H:%M:%S')
print(init_start_str)
Sim_list = []
i = 0
for bldg in bldg_list[bldg_index_start:bldg_index_end]:
i = i+1
Sim = SimHandler(sim_start = start,
sim_end = end,
meas_sampl = meas_sampl
)
Sim.building = bldg+'_'+model_id
Sim.compr_capacity = compr_capacity_list[i-1]
Sim.fmupath_emu = os.path.join(Sim.simu_path, 'fmus', community, community+'_'+bldg+'_'+bldg+'_Models_'+bldg+'_House_mpc.fmu')
Sim.fmupath_ref = os.path.join(Sim.simu_path, 'fmus', community, community+'_'+bldg+'_'+bldg+'_Models_'+bldg+'_House_PI.fmu')
Sim.moinfo_emu = (os.path.join(Sim.mod_path, community, bldg,bldg+'_Models',bldg+'_House_mpc.mo'), community+'.'+bldg+'.'+bldg+'_Models.'+bldg+'_House_mpc',
{}
)
Sim.moinfo_emu_ref = (os.path.join(Sim.mod_path, community, bldg,bldg+'_Models',bldg+'_House_PI.mo'), community+'.'+bldg+'.'+bldg+'_Models.'+bldg+'_House_PI',
{}
)
# Initialise exogenous data sources
if i == 1:
Sim.update_weather(init_start_str, opt_end_str)
Sim.get_DRinfo(init_start_str,opt_end_str)
index = pd.date_range(start, opt_end_str, freq = meas_sampl+'S', tz=Sim.weather.tz_name)
Sim.price = load_namespace(os.path.join(Sim.simu_path, 'JournalPaper', 'drcases', 'decentr_costmin_30bldgs_'+mon, 'sim_price'))
load_profile_aggr = pd.Series(0,index)
if dyn_price == 0:
price_signal = pd.Series(stat_cost, index)
Sim.price.data = {"pi_e": variables.Timeseries('pi_e', price_signal,units.cents_kWh,tz_name=Sim.weather.tz_name)
}
store_namespace(os.path.join(folder, 'sim_price'), Sim.price)
else:
Sim.weather = Sim_list[i-2].weather
Sim.ref_profile = Sim_list[i-2].ref_profile
Sim.flex_cost = Sim_list[i-2].flex_cost
Sim.price = Sim_list[i-2].price
#Sim.rho = Sim_list[i-2].rho
#Sim.addobj = Sim_list[i-2].addobj
#Sim.sim_start= '1/1/2017 00:00'
Sim.get_control()
#Sim.sim_start= start
Sim.get_other_input(init_start_str,opt_end_str)
Sim.get_constraints(init_start_str,opt_end_str,upd_control=1)
#Sim.param_file = os.path.join(Sim.simu_path,'csvs','Parameters_R2CW.csv')
#Sim.get_params()
#Sim.parameters.data = load_namespace(os.path.join(Sim.simu_path, 'sysid', 'sysid_HPrad_2element_'+mon+'_600S','est_params_'+Sim.building))
Sim.other_input = load_namespace(os.path.join(Sim.simu_path, 'JournalPaper', 'drcases', 'decentr_enemin_constr_'+mon, 'other_input_'+Sim.building))
Sim.constraints = load_namespace(os.path.join(Sim.simu_path, 'JournalPaper', 'drcases', 'decentr_enemin_constr_'+mon, 'constraints_'+Sim.building))
# Add to list of simulations
Sim_list.append(Sim)
# Initialise models
Sim.init_models(use_ukf=1, use_fmu_mpc=0, use_fmu_emu=1) # Use for initialising
# In[136]:
# Get ARX model
for Sim in Sim_list:
Sim.ARX_model = load_namespace(os.path.join(Sim.simu_path, 'JournalPaper', 'drcases', 'results_sysid_new_'+mon, arx_model, 'sysid_ARXmodel_'+mon+'_'+Sim.building))
# In[137]:
# Initialise models
for Sim in Sim_list:
emulate_jmod(Sim.emu, Sim.meas_vars_emu, Sim.meas_sampl, init_start_str, start)
Sim.start_temp = Sim.emu.display_measurements('Measured').values[-1][-1]
print(Sim.emu.display_measurements('Measured'))
print(Sim.start_temp-273.15)
# In[138]:
# Start the loop
i = 0
emutemps = {}
mpctemps = {}
controlseq = {}
power = {}
opt_stats = {}
emu_stats = {}
for simtime in sim_range:
i = i + 1
print('%%%%%%%%% IN LOOP: ' + str(i) + ' %%%%%%%%%%%%%%%%%')
if i == 1:
#simtime_str = simtime.strftime('%m/%d/%Y %H:%M:%S')
simtime_str = 'continue'
else:
simtime_str = 'continue'
opt_start_str = simtime.strftime('%m/%d/%Y %H:%M:%S')
opt_end = simtime + datetime.timedelta(seconds = horizon*int(Sim.meas_sampl))
emu_end = simtime + datetime.timedelta(seconds = int(Sim.meas_sampl))
opt_end_str = opt_end.strftime('%m/%d/%Y %H:%M:%S')
emu_end_str = emu_end.strftime('%m/%d/%Y %H:%M:%S')
simtime_naive = simtime.replace(tzinfo=None)
print('---- Simulation time: ' + str(simtime) + ' -------')
print('---- Next time step: ' + str(emu_end) + ' -------')
print('---- Optimisation horizon end: ' + str(opt_end) + ' -------')
emutemps = {}
mpctemps = {}
controlseq = {}
power = {}
opt_stats = {}
emu_stats = {}
opt_index = pd.date_range(opt_start_str, opt_end_str, freq = meas_sampl+'S')
# Shift to load tracking - down flexibility
if simtime == DR_call_start:
print('%%%%%%%%%%%%%%% DR event called - flexibility profile defined %%%%%%%%%%%%%%%%%%%%%')
#load_profile = Sim.opt_controlseq['HPPower'].display_data()
#load_profile = controlseq[opt_start_str_prev][Sim.building]
#Sim.constraints = Sim.constraints_down
flex_cost_signal = | pd.Series(0,index=index) | pandas.Series |
import typing
import warnings
import logging
import uuid
import os
from tqdm import tqdm
import pandas as pd
import imgaug
import cv2
from ..hashers import ImageHasher, tools
from ..tools import deduplicate, flatten
from .common import BenchmarkTransforms, BenchmarkDataset, BenchmarkHashes
# pylint: disable=invalid-name
log = logging.getLogger(__name__)
class BenchmarkImageTransforms(BenchmarkTransforms):
def compute_hashes(self,
hashers: typing.Dict[str, ImageHasher],
max_workers: int = 5) -> BenchmarkHashes:
"""Compute hashes for a series of files given some set of hashers.
Args:
hashers: A dictionary of hashers.
max_workers: Maximum number of workers for parallel hash
computation.
Returns:
metrics: A BenchmarkHashes object.
"""
hashsets = []
filepaths = self._df['filepath']
for hasher_name, hasher in hashers.items():
hash_dicts = hasher.compute_parallel(
filepaths,
progress=tqdm,
progress_desc=f'Computing hashes for {hasher_name}',
max_workers=max_workers)
if not hasher.returns_multiple:
hashes_df = | pd.DataFrame.from_records(hash_dicts) | pandas.DataFrame.from_records |
# -*- coding: utf-8 -*-
import pandas as pd
INCIDENCE_BASE = 100000
# https://code.activestate.com/recipes/577775-state-fips-codes-dict/
STATE_TO_FIPS = {
"WA": "53",
"DE": "10",
"DC": "11",
"WI": "55",
"WV": "54",
"HI": "15",
"FL": "12",
"WY": "56",
"PR": "72",
"NJ": "34",
"NM": "35",
"TX": "48",
"LA": "22",
"NC": "37",
"ND": "38",
"NE": "31",
"TN": "47",
"NY": "36",
"PA": "42",
"AK": "02",
"NV": "32",
"NH": "33",
"VA": "51",
"CO": "08",
"CA": "06",
"AL": "01",
"AR": "05",
"VT": "50",
"IL": "17",
"GA": "13",
"IN": "18",
"IA": "19",
"MA": "25",
"AZ": "04",
"ID": "16",
"CT": "09",
"ME": "23",
"MD": "24",
"OK": "40",
"OH": "39",
"UT": "49",
"MO": "29",
"MN": "27",
"MI": "26",
"RI": "44",
"KS": "20",
"MT": "30",
"MS": "28",
"SC": "45",
"KY": "21",
"OR": "41",
"SD": "46",
}
SECONDARY_FIPS = [
("51620", ["51093", "51175"]),
("51685", ["51153"]),
("28039", ["28059", "28041", "28131", "28045", "28059", "28109", "28047"]),
("51690", ["51089", "51067"]),
("51595", ["51081", "51025", "51175", "51183"]),
("51600", ["51059", "51059", "51059"]),
("51580", ["51005"]),
("51678", ["51163"]),
]
REPLACE_FIPS = [
("02158", "02270"),
("46102", "46113"),
]
FIPS_TO_STATE = {v: k.lower() for k, v in STATE_TO_FIPS.items()}
def fips_to_state(fips: str) -> str:
"""Wrapper that handles exceptions to the FIPS scheme in the CDS data.
The two known exceptions to the FIPS encoding are documented in the CDS
case data README. All other county FIPS codes are mapped to state by
taking the first two digits of the five digit, zero-padded county FIPS
and applying FIPS_TO_STATE to map it to the two-letter postal
abbreviation.
Parameters
----------
fips: str
Five digit, zero padded county FIPS code
Returns
-------
str
Two-letter postal abbreviation, lower case.
Raises
------
KeyError
Inputted FIPS code not recognized.
"""
return FIPS_TO_STATE[fips[:2]]
def disburse(df: pd.DataFrame, pooled_fips: str, fips_list: list):
"""Disburse counts from POOLED_FIPS equally to the counties in FIPS_LIST.
Parameters
----------
df: pd.DataFrame
Columns: fips, timestamp, new_counts, cumulative_counts, ...
pooled_fips: str
FIPS of county from which to disburse counts
fips_list: list[str]
FIPS of counties to which to disburse counts.
Results
-------
pd.DataFrame
Dataframe with same schema as df, with the counts disbursed.
"""
COLS = ["new_counts", "cumulative_counts"]
df = df.copy().sort_values(["fips", "timestamp"])
for col in COLS:
# Get values from the aggregated county:
vals = df.loc[df["fips"] == pooled_fips, col].values / len(fips_list)
for fips in fips_list:
df.loc[df["fips"] == fips, col] += vals
return df
def geo_map(df: pd.DataFrame, geo_res: str, map_df: pd.DataFrame):
"""
Maps a DataFrame df, which contains data at the county resolution, and
aggregate it to the geographic resolution geo_res.
Parameters
----------
df: pd.DataFrame
Columns: fips, timestamp, new_counts, cumulative_counts, population ...
geo_res: str
Geographic resolution to which to aggregate. Valid options:
('county', 'state', 'msa', 'hrr').
map_df: pd.DataFrame
Loaded from static file "fips_prop_pop.csv".
Returns
-------
pd.DataFrame
Columns: geo_id, timestamp, ...
"""
VALID_GEO_RES = ("county", "state", "msa", "hrr")
if geo_res not in VALID_GEO_RES:
raise ValueError(f"geo_res must be one of {VALID_GEO_RES}")
df = df.copy()
if geo_res == "county":
df["geo_id"] = df["fips"]
elif geo_res == "state":
# Grab first two digits of fips
# Map state fips to us postal code
df["geo_id"] = df["fips"].apply(fips_to_state)
elif geo_res in ("msa", "hrr"):
# Map "missing" secondary FIPS to those that are in our canonical set
for fips, fips_list in SECONDARY_FIPS:
df = disburse(df, fips, fips_list)
for cds_fips, our_fips in REPLACE_FIPS:
df.loc[df["fips"] == cds_fips, "fips"] = our_fips
colname = "cbsa_id" if geo_res == "msa" else "hrrnum"
map_df = map_df.loc[~ | pd.isnull(map_df[colname]) | pandas.isnull |
import logging as _logging
import numpy as _np
import pandas as _pd
from gn_lib.gn_const import J2000_ORIGIN as _J2000_ORIGIN, C_LIGHT as _C_LIGHT, SISRE_COEF_DF as _SISRE_COEF_DF
from gn_lib.gn_io.common import path2bytes as _path2bytes
from gn_lib.gn_io.sp3 import diff_sp3_rac as _diff_sp3_rac, read_sp3 as _read_sp3
from gn_lib.gn_io.ionex import read_ionex as _read_ionex
from gn_lib.gn_io.clk import read_clk as _read_clk
from gn_lib.gn_io.sinex import _get_snx_vector
from gn_lib.gn_io.stec import read_stec as _read_stec
from gn_lib.gn_io.trace import _read_trace_residuals, _read_trace_states
from gn_lib.gn_io.pod import read_pod_out as _read_pod_out
from gn_lib.gn_datetime import j20002datetime as _j20002datetime, datetime2gpsweeksec as _datetime2gpsweeksec
from gn_lib.gn_plot import diff2plot as _diff2plot
def _valvar2diffstd(valvar1,valvar2,trace=True,std_coeff=1):
df = _pd.concat([valvar1,valvar2],axis=0,keys=['valvar1','valvar2']).unstack(0) #fastest\
df_nd = df.values
diff = df_nd[:,0] - df_nd[:,1]
nan_mask = ~_np.isnan(diff)
diff = diff[nan_mask]
std = std_coeff*_np.sqrt((df_nd[:,3] + df_nd[:,2])[nan_mask])
df_combo = _pd.DataFrame(_np.vstack([diff,std]).T,columns=['DIFF','STD'],index=df.index[nan_mask])
if trace:
sats = df.index.get_level_values('SAT')
sats_mask = ~sats.isna()
sats_df = sats[sats_mask].unique()
df_combo.attrs['SAT_MASK'] = sats_mask[nan_mask]
sats_common = sats[sats_mask & nan_mask]#.unique()
df_combo.attrs['EXTRA_SATS'] = sats_df[~sats_df.isin(sats_common)].to_list() # is [] if none
return df_combo
def _diff2msg(diff, tol = None, dt_as_gpsweek=False):
_pd.set_option("display.max_colwidth", 10000)
from_valvar = _np.all(_np.isin(['DIFF','STD'],diff.columns.get_level_values(0).values))
if from_valvar: #if from_valvar else diff.values
diff_df = diff.DIFF
std_df = diff.STD
std_vals = std_df.values if tol is None else tol
else:
diff_df = diff
assert tol is not None, 'tol can not be None if STD info is missing'
std_vals = tol
count_total = (~_np.isnan(diff_df.values)).sum(axis=0)
mask2d_over_threshold = _np.abs(diff_df) > std_vals
diff_count = mask2d_over_threshold.sum(axis=0)
mask = diff_count.astype(bool)
if mask.sum() == 0:
return None
mask_some_vals = mask[mask.values].index
diff_over = diff_df[mask2d_over_threshold][mask_some_vals]
idx_max = diff_over.idxmax()
diff_max = _pd.Series(_np.diag(diff_over.loc[idx_max.values].values),index=idx_max.index)
idx_min = diff_over.idxmin()
diff_min = _pd.Series(_np.diag(diff_over.loc[idx_min.values].values),index=idx_min.index)
if from_valvar:
std_over = std_df[mask2d_over_threshold][mask_some_vals]
std_max = _pd.Series(_np.diag(std_over.loc[idx_max.values].values),index=idx_max.index)
std_min = _pd.Series(_np.diag(std_over.loc[idx_min.values].values),index=idx_min.index)
msg = | _pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import json as json
import os
select_columns = ['AGE', 'BBTYPE', 'ETHNICITY', 'GENDER', 'LOCATION', 'SAMPLEID']
def bb_sample_names():
bb_samples_df = pd.read_csv("belly_button_biodiversity_samples.csv")
bb_names_list = list(bb_samples_df.columns.values)[1:]
return bb_names_list
def otu_desc():
otu_df = pd.read_csv("belly_button_biodiversity_otu_id.csv")
return list(otu_df['lowest_taxonomic_unit_found'])
def get_metadata_sample(sample):
meta_df = pd.read_csv("Belly_Button_Biodiversity_Metadata.csv")
sample_id = int(sample.lower().strip('bb_'))
return meta_df.loc[(meta_df['SAMPLEID'] == sample_id)][select_columns].to_dict('records')
def get_wfreq_sample(sample):
meta_df = | pd.read_csv("Belly_Button_Biodiversity_Metadata.csv") | pandas.read_csv |
#########################################################
### DNA variant annotation tool
### Version 1.0.0
### By <NAME>
### <EMAIL>
#########################################################
import pandas as pd
import numpy as np
import allel
import argparse
import subprocess
import sys
import os.path
import pickle
import requests
import json
def extract_most_deleterious_anno(row, num_ann_max):
ann_order = pd.read_csv(anno_order_file, sep=' ')
alt = row[:num_ann_max]
anno = row[num_ann_max:]
alt.index = range(0, len(alt))
anno.index = range(0, len(anno))
ann_all_alt = pd.DataFrame()
alt_unique = alt.unique()
for unique_alt in alt_unique:
if unique_alt != '':
anno_all = anno[alt == unique_alt]
ann_order_all = pd.DataFrame()
for ann_any in anno_all:
if sum(ann_any == ann_order.Anno) > 0:
ann_any_order = ann_order[ann_order.Anno == ann_any]
else:
ann_any_order = ann_order.iloc[ann_order.shape[0]-1]
ann_order_all = ann_order_all.append(ann_any_order)
small_ann = ann_order_all.sort_index(ascending=True).Anno.iloc[0]
ann_unique_alt = [unique_alt, small_ann]
ann_all_alt = ann_all_alt.append(ann_unique_alt)
ann_all_alt.index = range(0, ann_all_alt.shape[0])
return ann_all_alt.T
def run_snpeff(temp_out_name):
snpeff_command = ['java', '-Xmx4g', '-jar', snpeff_path, \
'-ud', '0', \
# '-v', \
'-canon', '-noStats', \
ref_genome, vcf_file]
temp_output = open(temp_out_name, 'w')
subprocess.run(snpeff_command, stdout=temp_output)
temp_output.close()
def get_max_num_ann(temp_out_name):
num_ann_guess = 500
callset = allel.vcf_to_dataframe(temp_out_name, fields='ANN', numbers={'ANN': num_ann_guess})
num_ann = callset.apply(lambda x: sum(x != ''), axis=1)
num_ann_max = num_ann.max() # num_ann_max = 175
return num_ann_max
def get_ann_from_output_snpeff(temp_out_name):
callset = allel.read_vcf(temp_out_name, fields='ANN', transformers=allel.ANNTransformer(), \
numbers={'ANN': num_ann_max})
df1 = pd.DataFrame(data=callset['variants/ANN_Allele'])
df2 = pd.DataFrame(data=callset['variants/ANN_Annotation'])
df3 = pd.concat((df1, df2), axis=1)
df3.columns = range(0, df3.shape[1])
return df3
def get_anno_total(anno_from_snpeff):
anno_total = pd.DataFrame()
pickle_dump = 'pickle_dump.temp'
if not os.path.isfile(pickle_dump):
print('Extracting most deleterious annotations generated by SnpEff')
for index, row in anno_from_snpeff.iterrows():
anno_row = extract_most_deleterious_anno(row, num_ann_max)
anno_total = anno_total.append(anno_row)
print('done')
dump_file = open(pickle_dump, 'wb')
pickle.dump(anno_total, dump_file, pickle.HIGHEST_PROTOCOL)
dump_file.close()
dump_file = open(pickle_dump, 'rb')
anno_total = pickle.load(dump_file)
a = ['Alt_' + str(i) for i in range(1, num_alt + 1)]
b = ['Anno_' + str(i) for i in range(1, num_alt + 1)]
c = list(range(0, num_alt * 2))
c[::2] = a
c[1::2] = b
anno_total.columns = c
anno_total.replace(np.nan, -1, inplace=True)
anno_total.index = range(0, anno_total.shape[0])
return anno_total
def get_num_alternate(vcf_file):
num_alt = allel.read_vcf(vcf_file, fields='numalt')['variants/numalt'].max()
return num_alt
def get_dp_ro_ao(temp_out_name):
callset_dp_ro_ao = allel.vcf_to_dataframe(temp_out_name, fields=['DP', 'RO', 'AO'], alt_number=num_alt)
callset_dp_ro_ao.index = range(0, callset_dp_ro_ao.shape[0])
return callset_dp_ro_ao
def get_alt_ref_ratio(callset_dp_ro_ao):
callset_ratio = pd.DataFrame()
for i in range(0, num_alt):
# print('run ratio: ', i)
callset_ratio[i] = callset_dp_ro_ao.apply(lambda x: x[i + 2] / x[1], axis=1)
# print('run ratio: ', i, ' done')
# print('callset_ratio is done')
callset_ratio.columns = ['RatioAR_Alt_' + str(i) for i in range(1, num_alt + 1)]
callset_ratio.index = range(0, callset_ratio.shape[0])
return callset_ratio
def combine_anno_and_callset(anno_total, callset_dp_ro_ao, callset_ratio, ExAC_variant_af, ExAC_variant_ordered_csqs):
anno_and_callset = pd.concat([anno_total, callset_dp_ro_ao, callset_ratio, ExAC_variant_af, ExAC_variant_ordered_csqs], axis=1)
return anno_and_callset
def combine_with_comma(row):
a = []
for i in range(0, len(row)):
if row.iloc[i][0] != '-':
a.append(True)
else:
a.append(False)
b = ",".join(row[a])
return b
def get_anno_good(anno_and_callset):
anno_columns = pd.DataFrame()
for i in range(1, num_alt + 1):
Alt_i = 'Alt_' + str(i)
Anno_i = 'Anno_' + str(i)
AO_i = 'AO_' + str(i)
RatioAR_Alt_i = 'RatioAR_Alt_' + str(i)
exac_var_af = 'exac_' + search_af + "_" + str(i)
exac_ordered_csqs = 'exac_' + search_ordered_csqs + '_' + str(i)
column_i = anno_and_callset[[Alt_i, Anno_i, 'DP', 'RO', AO_i, RatioAR_Alt_i, exac_var_af, exac_ordered_csqs]].apply(lambda x: '|'.join(x.map(str)), axis=1)
anno_columns = pd.concat([anno_columns, column_i], axis=1)
anno_one_column = anno_columns.apply(combine_with_comma, axis=1)
anno_good = ["ANN="] * len(anno_one_column) + anno_one_column
return anno_good
def get_num_lines_header(contents):
lines_header = 0
for i in range(0, len(contents)):
if contents[i][0] == '#' and contents[i + 1][0] != '#':
# print(contents[i])
# print(i)
lines_header = i # lines_header 142
return lines_header
def generate_output_vcf(vcf_file, anno_good):
input_vcf = pd.read_csv(vcf_file, sep='\t', skiprows=lines_header)
anno_good_all = input_vcf.INFO + ';' + anno_good
input_vcf.INFO = anno_good_all
output_vcf = input_vcf.copy()
return output_vcf
def generate_header(contents):
header = contents[0:lines_header]
header_add1 = """##SimpleAnnotation Version="0.0.1" By <NAME> <EMAIL> \n"""
header_add2 = """##SimpleAnnotation Cmd="python3 SimpleAnnotation.py -input {} -snpeff {} -genome {} "\n""".format(vcf_file, snpeff_path, ref_genome)
header_add3 = """##INFO=<ID=ANN,Number=.,Type=String, Description="Simple annotations: 'Alternate allele | Type of variation most deleterious | Sequence depth at the site of variation | Number of reads of reference | Number of reads of alternate | Ratio of read counts of alt vs ref | ExAC variant Allele Frequency | ExAC variant consequence most deleterious' ">\n"""
header.append(header_add1)
header.append(header_add2)
header.append(header_add3)
return header
def search_REST_ExAC(row, search_type):
row_var = [-1] * len(row)
url_1 = 'http://exac.hms.harvard.edu/rest/variant/{}/'.format(search_type)
for i in range(0, len(row)):
if row.iloc[i][-1] != '-':
url = url_1 + row.iloc[i]
my_response = requests.get(url)
if my_response.ok:
j_data = json.loads(my_response.content)
if search_type == search_af:
if 'allele_freq' in j_data.keys():
row_var[i] = j_data['allele_freq']
else:
row_var[i] = 'Not_found'
elif search_type == search_ordered_csqs:
if j_data != None and len(j_data) > 1:
row_var[i] = j_data[1]
else:
row_var[i] = 'Not_found'
else:
row_var[i] = 'Not_found'
return row_var
def ExAC_search_variant(var_all, search_type):
exac = pd.DataFrame()
counter = 0
print('There are {} variants that need to be searched. This will take a while.'.format(var_all.shape[0]))
for index, row in var_all.iterrows():
af_row = search_REST_ExAC(row, search_type)
exac = pd.concat([exac, pd.DataFrame(af_row)], axis=1)
counter += 1
if counter%500 == 0:
print(counter)
exac = exac.T
exac.index = range(0, exac.shape[0])
exac.columns = ['exac_' + search_type + '_' + str(i) for i in range(1, num_alt + 1)]
return exac
def generate_var_id_for_exac(vcf_file):
callset = allel.vcf_to_dataframe(vcf_file, fields=['CHROM', 'POS', 'REF', 'ALT'], alt_number=num_alt)
var_all = | pd.DataFrame() | pandas.DataFrame |
import pandas
import numpy as np
import matplotlib.pyplot as plt
def get_from_pie_plot(df, minimum_emails=25):
df["from"].value_counts()
dict_values = np.array(list(df["from"].value_counts().to_dict().values()))
dict_keys = np.array(list(df["from"].value_counts().to_dict().keys()))
ind = dict_values > minimum_emails
dict_values_red = dict_values[ind].tolist()
dict_keys_red = dict_keys[ind].tolist()
dict_values_red.append(sum(dict_values[~ind]))
dict_keys_red.append("other")
fig1, ax1 = plt.subplots()
ax1.pie(dict_values_red, labels=dict_keys_red)
ax1.axis("equal")
plt.show()
def get_labels_pie_plot(gmail, df):
label_lst = []
for llst in df.labels.values:
for ll in llst:
label_lst.append(ll)
label_lst = list(set(label_lst))
label_lst = [label for label in label_lst if "Label_" in label]
label_count_lst = [
sum([True if label_select in label else False for label in df.labels])
for label_select in label_lst
]
convert_dict = {
v: k
for v, k in zip(
list(gmail._label_dict.values()), list(gmail._label_dict.keys())
)
}
label_convert_lst = [convert_dict[label] for label in label_lst]
ind = np.argsort(label_count_lst)
fig1, ax1 = plt.subplots()
ax1.pie(
np.array(label_count_lst)[ind][::-1],
labels=np.array(label_convert_lst)[ind][::-1],
)
ax1.axis("equal")
plt.show()
def get_number_of_email_plot(df, steps=8):
start_month = [d.year * 12 + d.month for d in | pandas.to_datetime(df.date) | pandas.to_datetime |
import pandas as pd
import os, sys
from eternabench.stats import calculate_Z_scores
package_list=['vienna_2', 'vienna_2_60C', 'rnastructure', 'rnastructure_60C', 'rnasoft_blstar','contrafold_2','eternafold_B']
external_dataset_types = pd.read_csv(os.environ['ETERNABENCH_PATH']+'/eternabench/external_dataset_metadata.csv')
RNA_CLASSES = list(external_dataset_types.Class.unique())
EB_CM_bootstraps=pd.DataFrame()
for pkg in package_list:
tmp = pd.read_json(os.environ['ETERNABENCH_PATH']+'/data/ChemMapping/bootstraps/CM_pearson_Dataset_%s_BOOTSTRAPS.json.zip' % pkg)
EB_CM_bootstraps = EB_CM_bootstraps.append(tmp, ignore_index=True)
EB_CM_bootstraps = EB_CM_bootstraps.loc[EB_CM_bootstraps.Dataset=='RYOS_I']
EB_CM_bootstraps['Dataset'] = 'Leppek,2021 In-line-seq'
net_dataset_zscore_stats=pd.DataFrame()
net_ranking = | pd.DataFrame() | pandas.DataFrame |
# -----------------------------------------------------------------------------
# WWW 2019 Debiasing Vandalism Detection Models at Wikidata
#
# Copyright (c) 2019 <NAME>, <NAME>, <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
import numpy as np
import pandas as pd
from numpy.core import getlimits
from sklearn.base import TransformerMixin
class BooleanImputer(TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X, y=None, **fit_params):
result = X.astype(np.float32)
result = result.fillna(0.5)
return pd.DataFrame(result)
class CumFrequencyTransformer(TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X):
# assumption: X is ordered by revisionId
grouped = X.groupby(by=list(X.columns))
result = grouped.cumcount() + 1
result = result.to_frame()
return result
class FrequencyTransformer(TransformerMixin):
"""Transforms categorical features to a numeric value (frequency).
Given a data frame with columns (C1, C2, ..., Cn), computes for each
unique tuple (c1, c2, ..., cn), how often it appears in the data frame.
For example, it counts how many revisions were done with this predicate
on the training set (one column C1='predicate').
"""
def __init__(self):
self.__frequencies = None
def fit(self, X, y=None):
self.__frequencies = X.groupby(by=list(X.columns)).size()
self.__frequencies.name = 'frequencies'
return self
def transform(self, X):
result = X.join(self.__frequencies, on=list(X.columns), how='left')
# all other frequencies are at least 1
result = result['frequencies'].fillna(0)
result = result.to_frame()
return result
class InfinityImputer(TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X):
result = X
for column in X.columns:
datatype = result.loc[:, column].dtype.type
limits = getlimits.finfo(datatype)
result.loc[:, column].replace(np.inf, limits.max, inplace=True)
result.loc[:, column].replace(-np.inf, limits.min, inplace=True)
return result
class LogTransformer(TransformerMixin):
"""Compute the formula sign(X)*ceil(log2(|X|+1))"""
def fit(self, X, y=None):
return self
def transform(self, X):
result = X
sign = result.apply(np.sign)
result = result.apply(np.absolute)
result = result + 1
result = result.apply(np.log2)
result = result.apply(np.ceil)
result = sign * result
result = result.fillna(0)
return result
class MedianImputer(TransformerMixin):
def __init__(self):
self.__median = None
def fit(self, X, y=None):
self.__median = X.median()
return self
def transform(self, X):
result = X.fillna(self.__median)
result = InfinityImputer().fit_transform(result)
return result
class MinusOneImputer(TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
result = X.fillna(-1)
result = InfinityImputer().fit_transform(result)
return result
class EqualsTransformer(TransformerMixin):
def __init__(self, value):
self.__value = value
def fit(self, X, y=None):
return self
def transform(self, X):
# value is assumed to be a tuple
result = [True] * len(X)
for i in range(len(self.__value)):
result = result & self.isNanEqual(X.iloc[:, i], self.__value[i])
result = | pd.DataFrame(result) | pandas.DataFrame |
import pandas as pd
import numpy as np
from scipy.stats import pearsonr, spearmanr, mannwhitneyu
from scripts.python.routines.manifest import get_manifest
from scripts.python.EWAS.routines.correction import correct_pvalues
from tqdm import tqdm
path = f"E:/YandexDisk/Work/pydnameth/datasets"
datasets_info = pd.read_excel(f"{path}/datasets.xlsx", index_col='dataset')
folder_name = f"proteomics"
path_save = f"{path}/meta/tasks/{folder_name}"
tissues = ['Brain', 'Liver', 'Blood']
platform = 'GPL13534'
manifest = get_manifest(platform)
for tissue in tissues:
tmp_path = f"{path_save}/{tissue}"
betas = | pd.read_pickle(f"{tmp_path}/betas.pkl") | pandas.read_pickle |
import unittest
from unittest import mock
from unittest.mock import MagicMock
import numpy as np
import pandas as pd
from matplotlib.axes import Axes
from matplotlib.figure import Figure
from pandas.util.testing import assert_frame_equal
import tests.test_data as td
from shift_detector.checks.statistical_checks import numerical_statistical_check, categorical_statistical_check
from shift_detector.checks.statistical_checks.categorical_statistical_check import CategoricalStatisticalCheck
from shift_detector.checks.statistical_checks.numerical_statistical_check import NumericalStatisticalCheck
from shift_detector.checks.statistical_checks.text_metadata_statistical_check import TextMetadataStatisticalCheck
from shift_detector.detector import Detector
from shift_detector.precalculations.store import Store
from shift_detector.precalculations.text_metadata import NumCharsMetadata, NumWordsMetadata, \
DistinctWordsRatioMetadata, LanguagePerParagraph, UnknownWordRatioMetadata, StopwordRatioMetadata, LanguageMetadata
from shift_detector.utils.visualization import PlotData
class TestTextMetadataStatisticalCheck(unittest.TestCase):
def setUp(self):
self.poems = td.poems
self.phrases = td.phrases
def test_significant_metadata(self):
pvalues = pd.DataFrame([[0.001, 0.2]], columns=['num_chars', 'distinct_words_ratio'], index=['pvalue'])
result = TextMetadataStatisticalCheck(significance=0.01).significant_metadata_names(pvalues)
self.assertIn('num_chars', result)
self.assertNotIn('distinct_words_ratio', result)
def test_not_significant(self):
df1 = pd.DataFrame.from_dict({'text': self.poems})
df2 = pd.DataFrame.from_dict({'text': list(reversed(self.poems))})
store = Store(df1, df2)
result = TextMetadataStatisticalCheck().run(store)
self.assertEqual(1, len(result.examined_columns))
self.assertEqual(0, len(result.shifted_columns))
self.assertEqual(0, len(result.explanation))
def test_significant(self):
df1 = pd.DataFrame.from_dict({'text': self.poems})
df2 = pd.DataFrame.from_dict({'text': self.phrases})
store = Store(df1, df2)
result = TextMetadataStatisticalCheck([NumCharsMetadata(), NumWordsMetadata(),
DistinctWordsRatioMetadata(), LanguagePerParagraph()]
).run(store)
self.assertEqual(1, len(result.examined_columns))
self.assertEqual(1, len(result.shifted_columns))
self.assertEqual(1, len(result.explanation))
def test_compliance_with_detector(self):
df1 = pd.DataFrame.from_dict({'text': ['This is a very important text.',
'It contains information.', 'Brilliant ideas are written down.',
'Read it.', 'You will become a lot smarter.',
'Or you will waste your time.', 'Come on, figure it out!',
'Perhaps it will at least entertain you.', 'Do not be afraid.',
'Be brave!']})
df2 = pd.DataFrame.from_dict({'text': ['This is a very important text.',
'It contains information.', 'Brilliant ideas are written down.',
'Read it.', 'You will become a lot smarter.',
'Or you will waste your time.', 'Come on, figure it out!',
'Perhaps it will at least entertain you.', 'Do not be afraid.',
'Be brave!']})
detector = Detector(df1=df1, df2=df2, log_print=False)
detector.run(TextMetadataStatisticalCheck())
column_index = pd.MultiIndex.from_product([['text'], ['distinct_words', 'num_chars', 'num_words']],
names=['column', 'metadata'])
solution = pd.DataFrame([[1.0, 1.0, 1.0]], columns=column_index, index=['pvalue'])
self.assertEqual(1, len(detector.check_reports[0].examined_columns))
self.assertEqual(0, len(detector.check_reports[0].shifted_columns))
self.assertEqual(0, len(detector.check_reports[0].explanation))
assert_frame_equal(solution, detector.check_reports[0].information['test_results'])
def test_language_can_be_set(self):
check = TextMetadataStatisticalCheck([UnknownWordRatioMetadata(), StopwordRatioMetadata()], language='fr')
md_with_lang = [mdtype for mdtype in check.metadata_precalculation.text_metadata_types
if type(mdtype) in [UnknownWordRatioMetadata, StopwordRatioMetadata]]
for mdtype in md_with_lang:
self.assertEqual('fr', mdtype.language)
def test_infer_language_is_set(self):
check = TextMetadataStatisticalCheck([UnknownWordRatioMetadata(), StopwordRatioMetadata()], infer_language=True)
md_with_lang = [mdtype for mdtype in check.metadata_precalculation.text_metadata_types
if type(mdtype) in [UnknownWordRatioMetadata, StopwordRatioMetadata]]
for mdtype in md_with_lang:
self.assertTrue(mdtype.infer_language)
def test_figure_function_is_collected(self):
df1 = pd.DataFrame.from_dict({'text': ['blub'] * 10})
df2 = pd.DataFrame.from_dict({'text': ['blub'] * 10})
metadata_names = ['num_chars', 'num_words']
cols = | pd.MultiIndex.from_product([df1.columns, metadata_names], names=['column', 'metadata']) | pandas.MultiIndex.from_product |
# Functions and classes for visualization
def plot_by_factor(df, factor, colors, showplot=False):
''' Plot by factor on a already constructed
t-SNE plot.
'''
import matplotlib.pyplot as plt
listof = {} # this gets numbers to get the colors right
listnames = []
for i, j in enumerate(df[factor].unique()):
listof[j] = i
listnames.append(j)
df[factor] = df[factor].map(listof)
f, ax = plt.subplots(figsize=(15,10))
for a, i in enumerate(df[factor].unique()):
ax.scatter(df[df[factor] == i][0],
df[df[factor] == i][1],
color=colors[i], label=listnames[a])
ax.legend()
ax.set_title('t-SNE colored by {}'.format(factor))
if showplot == True:
plt.show()
else:
f.savefig('images/{}.png'.format(factor))
class AnalyzeClusters(object):
def __init__(self):
pass
def make_dataset(self, sales_df, clus_df):
sales_df['sku_key'] = sales_df['sku_key'].astype(int)
self.c_dfs = {}
for i in clus_df['cluster'].unique():
self.c_dfs['cluster_{}'.format(i)] = clus_df[clus_df['cluster'] == i]
for i in self.c_dfs.keys():
self.c_dfs[i] = self.c_dfs[i].merge(sales_df, on='sku_key')
return self.c_dfs
def plot_median_timeseries(self, cluster_dfs, variable='sales', split=False):
import pandas as pd
import matplotlib.pyplot as plt
for i, j in cluster_dfs.items():
df = pd.DataFrame(j[[variable, 'tran_date']].groupby('tran_date').median())
df.set_index(pd.to_datetime(df.index), inplace=True)
df.plot(figsize=(15,10))
plt.title(i)
if split == True:
plt.show()
def plot_mean_timeseries(self, cluster_dfs, variable='sales', split=False):
import pandas as pd
import matplotlib.pyplot as plt
for i, j in cluster_dfs.items():
df = pd.DataFrame(j[[variable, 'tran_date']].groupby('tran_date').mean())
df.set_index(pd.to_datetime(df.index), inplace=True)
df.plot(figsize=(15,10))
plt.title(i)
if split == True:
plt.show()
def plot_rolling_mean_timeseries(self, cluster_dfs, variable='sales',
plot_second = False, period=7):
import pandas as pd
import matplotlib.pyplot as plt
for i, j in cluster_dfs.items():
f, ax = plt.subplots(figsize=(15,10))
df = j[[variable, 'tran_date']].groupby('tran_date').mean()\
.rolling(period).mean()
df.set_index(pd.to_datetime(df.index), inplace=True)
df.columns = ['Rolling {} level'.format(variable)]
df2 = j[[variable, 'tran_date']].groupby('tran_date').count()
df2.set_index(pd.to_datetime(df2.index), inplace=True)
df2.columns = ['Number of Products']
if plot_second == True:
df2.plot(kind='line', ax=ax)
df.plot(ax=ax, secondary_y=True, color='r')
plt.title(i)
plt.show()
def plot_double_rolling_mean(self, cluster_dfs, variable='sales',
plot_second = False, period=7):
import pandas as pd
import matplotlib.pyplot as plt
for i, j in cluster_dfs.items():
f, ax = plt.subplots(figsize=(15,10))
df = j[variable+ ['tran_date']].groupby('tran_date').mean()\
.rolling(period).mean()
df.set_index(pd.to_datetime(df.index), inplace=True)
df.columns = [variable]
if plot_second == True:
df[variable[0]].plot(kind='line', ax=ax)
df[variable[1]].plot(ax=ax, secondary_y=True, color='r')
plt.title(i)
plt.show()
def plot_nan_start(self, cluster_df):
import pandas as pd
import matplotlib.pyplot as plt
for i, j in cluster_df.items():
print('{}, there are {} skus and {} sales.'\
.format(i, len(j['sku_key'].unique()), sum(j['sales'])))
pivot_t = pd.pivot_table(j, index='sku_key',
columns='tran_date', values='sales')
pivot_t['nan'] = pivot_t.iloc[:,0].apply(np.isnan)
pivot_t['nan'].value_counts().plot(kind='bar')
plt.show()
def plot_all_timeseries(self, cluster_dfs):
import pandas as pd
import matplotlib.pyplot as plt
for i, j in cluster_dfs.items():
df = pd.pivot_table(j, values='sales', columns='tran_date',
index='sku_key').T
df.set_index(pd.to_datetime(df.index), inplace=True)
df.plot(figsize=(15,8))
plt.legend(bbox_to_anchor=(1.35, 1.1), ncol=6)
plt.show()
def plot_cluster_continuous(self, cluster_dfs, categories, colors, showplot=False):
import matplotlib.pyplot as plt
for j in categories:
f, ax = plt.subplots(figsize=(15,10))
for a, i in enumerate(cluster_dfs.keys()):
cluster_dfs[i][j].plot(ax=ax, kind='hist', bins=20, logy=True,
alpha=0.2, color=colors[a])
plt.title(j)
if j == 'sales':
plt.xlim(-50, 800)
elif j == 'selling_price':
plt.xlim(-100, 8000)
elif j == 'avg_discount':
plt.xlim(-1500, 2000)
if showplot == True:
plt.show()
else:
f.savefig('images/{}_continuous.png'.format(j))
def plot_cluster_continuous_box(self, cluster_dfs, categories, showplot=False):
import matplotlib.pyplot as plt
import pandas as pd
for j in categories:
f, ax = plt.subplots(figsize=(15,10))
for a, i in enumerate(cluster_dfs.keys()):
if a == 0:
int_df = pd.DataFrame(cluster_dfs[i][j])
int_df.columns = [i]
else:
temp = pd.DataFrame(cluster_dfs[i][j])
temp.columns = [i]
int_df = int_df.join(temp)
int_df.plot(ax=ax, kind='box', color='red', whis=[2.5, 97.5])
plt.title(j)
if showplot == True:
plt.show()
else:
f.savefig('images/{}_continuous-box.png'.format(j))
def plot_cluster_continuous_violin(self, cluster_dfs, categories, showplot=False):
import matplotlib.pyplot as plt
import pandas as pd
for j in categories:
f, ax = plt.subplots(figsize=(15,10))
for a, i in enumerate(cluster_dfs.keys()):
if a == 0:
int_df = pd.DataFrame(cluster_dfs[i][j])
int_df.columns = [i]
else:
temp = | pd.DataFrame(cluster_dfs[i][j]) | pandas.DataFrame |
# This file is part of the
# Garpar Project (https://github.com/quatrope/garpar).
# Copyright (c) 2021, 2022, <NAME>, <NAME> and QuatroPe
# License: MIT
# Full Text: https://github.com/quatrope/garpar/blob/master/LICENSE
# =============================================================================
# IMPORTS
# =============================================================================
from numpy import exp
from garpar.optimize import BlackLitterman, Markowitz, OptimizerABC
from garpar.optimize import mean_historical_return, sample_covariance
from garpar.core import Portfolio
import pandas as pd
import pandas.testing as pdt
import pytest
# =============================================================================
# TESTS WRAPPER FUNCTIONS
# =============================================================================
def test_mean_historical_return():
pf = Portfolio.from_dfkws(
df=pd.DataFrame(
{
"stock0": [1.11, 1.12, 1.10, 1.13, 1.18],
"stock1": [10.10, 10.32, 10.89, 10.93, 11.05],
},
),
entropy=0.5,
window_size=5,
)
result = mean_historical_return(pf)
expected = pd.Series({"stock0": 46.121466, "stock1": 287.122362})
expected.index.name = "Stocks"
pdt.assert_series_equal(result, expected)
def test_sample_covariance():
pf = Portfolio.from_dfkws(
df=pd.DataFrame(
{
"stock0": [1.11, 1.12, 1.10, 1.13, 1.18],
"stock1": [10.10, 10.32, 10.89, 10.93, 11.05],
},
),
entropy=0.5,
window_size=5,
)
result = sample_covariance(pf)
expected = pd.DataFrame(
data={
"stock0": [0.17805911, -0.13778805],
"stock1": [-0.13778805, 0.13090794],
},
index=["stock0", "stock1"],
)
expected.index.name = "Stocks"
expected.columns.name = "Stocks"
pdt.assert_frame_equal(result, expected)
# =============================================================================
# TESTS OPTIMIZER
# =============================================================================
def test_OptimizerABC_not_implementhed_methods():
class Foo(OptimizerABC):
def serialize(self, port):
return super().serialize(port)
def deserialize(self, port, weights):
return super().deserialize(port, weights)
def optimize(self, port):
return super().optimize(port)
opt = Foo()
with pytest.raises(NotImplementedError):
opt.serialize(0)
with pytest.raises(NotImplementedError):
opt.optimize(0)
with pytest.raises(NotImplementedError):
opt.deserialize(0, 0)
# =============================================================================
# TESTS MARKOWITZ
# =============================================================================
def test_Markowitz_is_OptimizerABC():
assert issubclass(Markowitz, OptimizerABC)
def test_Markowitz_defaults():
markowitz = Markowitz()
assert markowitz.weight_bounds == (0, 1)
assert markowitz.market_neutral is False
def test_Markowitz_serialize():
pf = Portfolio.from_dfkws(
df=pd.DataFrame(
{
"stock0": [1.11, 1.12, 1.10, 1.13, 1.18],
"stock1": [10.10, 10.32, 10.89, 10.93, 11.05],
},
),
entropy=0.5,
window_size=5,
)
# Instance
markowitz = Markowitz()
# Tested method
result = markowitz.serialize(pf)
# Expectations
expected_mu = pd.Series({"stock0": 46.121466, "stock1": 287.122362})
expected_mu.index.name = "Stocks"
expected_cov = pd.DataFrame(
data={
"stock0": [0.17805911, -0.13778805],
"stock1": [-0.13778805, 0.13090794],
},
index=["stock0", "stock1"],
)
expected_cov.index.name = "Stocks"
expected_cov.columns.name = "Stocks"
# Assert
assert isinstance(result, dict)
assert result.keys() == {"expected_returns", "cov_matrix", "weight_bounds"}
pdt.assert_series_equal(expected_mu, result["expected_returns"])
pdt.assert_frame_equal(expected_cov, result["cov_matrix"])
assert result["weight_bounds"] == (0, 1)
def test_Markowitz_deserialize():
pf = Portfolio.from_dfkws(
df=pd.DataFrame(
{
"stock0": [1.11, 1.12, 1.10, 1.13, 1.18],
"stock1": [10.10, 10.32, 10.89, 10.93, 11.05],
},
),
weights=[0.5, 0.5],
entropy=0.5,
window_size=5,
)
# Instance
markowitz = Markowitz()
# Tested method
weights = {"stock0": 0.45966836, "stock1": 0.54033164}
result = markowitz.deserialize(pf, weights)
# Expectations
expected_weights = pd.Series(
data={"stock0": 0.45966836, "stock1": 0.54033164}, name="Weights"
)
expected_weights.index.name = "Stocks"
# Assert everything is the same except for the weights
assert result is not pf
assert isinstance(result, Portfolio)
pdt.assert_frame_equal(pf._df, result._df)
assert isinstance(result.weights, pd.Series)
pdt.assert_series_equal(result.weights, expected_weights)
def test_Markowitz_optimize():
pf = Portfolio.from_dfkws(
df=pd.DataFrame(
{
"stock0": [1.11, 1.12, 1.10, 1.13, 1.18],
"stock1": [10.10, 10.32, 10.89, 10.93, 11.05],
},
),
weights=[0.5, 0.5],
entropy=0.5,
window_size=5,
)
# Instance
markowitz = Markowitz()
# Tested method
result = markowitz.optimize(pf, target_return=1.0)
# Expectations
expected_weights = pd.Series(
data={"stock0": 0.45966836, "stock1": 0.54033164}, name="Weights"
)
expected_weights.index.name = "Stocks"
# Assert everything is the same except for the weights
assert result is not pf
assert isinstance(result, Portfolio)
pdt.assert_frame_equal(pf._df, result._df)
assert isinstance(result.weights, pd.Series)
pdt.assert_series_equal(result.weights, expected_weights)
# =============================================================================
# TESTS BLACK LITTERMAN
# =============================================================================
def test_BlackLitterman_is_OptimizerABC():
assert issubclass(BlackLitterman, OptimizerABC)
def test_BlackLitterman_defaults():
bl = BlackLitterman()
assert bl.prior == "equal"
assert bl.absolute_views is None
assert bl.P is None
assert bl.Q is None
def test_BlackLitterman_serialize():
pf = Portfolio.from_dfkws(
df=pd.DataFrame(
{
"stock0": [1.11, 1.12, 1.10, 1.13, 1.18],
"stock1": [10.10, 10.32, 10.89, 10.93, 11.05],
},
),
entropy=0.5,
window_size=5,
)
# Instance
viewdict = {"stock0": 0.01, "stock1": 0.03}
bl = BlackLitterman(prior="algo", absolute_views=viewdict)
# Tested method
result = bl.serialize(pf)
# Expectations
expected_views = {"stock0": 0.01, "stock1": 0.03}
expected_cov = pd.DataFrame(
data={
"stock0": [0.17805911, -0.13778805],
"stock1": [-0.13778805, 0.13090794],
},
index=["stock0", "stock1"],
)
expected_cov.index.name = "Stocks"
expected_cov.columns.name = "Stocks"
# Assert
assert isinstance(result, dict)
assert result.keys() == {"pi", "absolute_views", "cov_matrix", "P", "Q"}
assert result["pi"] == "algo"
assert result["absolute_views"] == expected_views
pdt.assert_frame_equal(expected_cov, result["cov_matrix"])
assert result["P"] is None
assert result["Q"] is None
def test_BlackLitterman_deserialize():
pf = Portfolio.from_dfkws(
df=pd.DataFrame(
{
"stock0": [1.11, 1.12, 1.10, 1.13, 1.18],
"stock1": [10.10, 10.32, 10.89, 10.93, 11.05],
},
),
entropy=0.5,
window_size=5,
)
# Instance
viewdict = {"stock0": 0.01, "stock1": 0.03}
prior = pd.Series(data={"stock0": 0.02, "stock1": 0.04})
bl = BlackLitterman(prior=prior, absolute_views=viewdict)
# Tested method
weights = {"stock0": 0.45157882, "stock1": 0.54842117}
result = bl.deserialize(pf, weights)
# Expectations
expected_weights = pd.Series(
data={"stock0": 0.45157882, "stock1": 0.54842117}, name="Weights"
)
expected_weights.index.name = "Stocks"
# Assert everything is the same except for the weights
assert result is not pf
assert isinstance(result, Portfolio)
pdt.assert_frame_equal(pf._df, result._df)
assert isinstance(result.weights, pd.Series)
pdt.assert_series_equal(result.weights, expected_weights)
def test_BlackLitterman_optimize():
pf = Portfolio.from_dfkws(
df=pd.DataFrame(
{
"stock0": [1.11, 1.12, 1.10, 1.13, 1.18],
"stock1": [10.10, 10.32, 10.89, 10.93, 11.05],
},
),
entropy=0.5,
window_size=5,
)
# Instance
viewdict = {"stock0": 0.01, "stock1": 0.03}
prior = | pd.Series(data={"stock0": 0.02, "stock1": 0.04}) | pandas.Series |
from MP import MpFunctions
import requests
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import pandas as pd
import plotly.graph_objs as go
import datetime as dt
import numpy as np
import warnings
warnings.filterwarnings('ignore')
app = dash.Dash(__name__)
def get_ticksize(data, freq=30):
# data = dflive30
numlen = int(len(data) / 2)
# sample size for calculating ticksize = 50% of most recent data
tztail = data.tail(numlen).copy()
tztail['tz'] = tztail.Close.rolling(freq).std() # std. dev of 30 period rolling
tztail = tztail.dropna()
ticksize = np.ceil(tztail['tz'].mean() * 0.25) # 1/4 th of mean std. dev is our ticksize
if ticksize < 0.2:
ticksize = 0.2 # minimum ticksize limit
return int(ticksize)
def get_data(url):
"""
:param url: binance url
:return: ohlcv dataframe
"""
response = requests.get(url)
data = response.json()
df = pd.DataFrame(data)
df = df.apply(pd.to_numeric)
df[0] = pd.to_datetime(df[0], unit='ms')
df = df[[0, 1, 2, 3, 4, 5]]
df.columns = ['datetime', 'Open', 'High', 'Low', 'Close', 'volume']
df = df.set_index('datetime', inplace=False, drop=False)
return df
url_30m = "https://www.binance.com/api/v1/klines?symbol=BTCBUSD&interval=30m" # 10 days history 30 min ohlcv
df = get_data(url_30m)
df.to_csv('btcusd30m.csv', index=False)
# params
context_days = len([group[1] for group in df.groupby(df.index.date)]) # Number of days used for context
freq = 2 # for 1 min bar use 30 min frequency for each TPO, here we fetch default 30 min bars server
avglen = context_days - 2 # num days to calculate average values
mode = 'tpo' # for volume --> 'vol'
trading_hr = 24 # Default for BTC USD or Forex
day_back = 0 # -1 While testing sometimes maybe you don't want current days data then use -1
# ticksz = 28 # If you want to use manual tick size then uncomment this. Really small number means convoluted alphabets (TPO)
ticksz = (get_ticksize(df.copy(), freq=freq))*2 # Algorithm will calculate the optimal tick size based on volatility
textsize = 10
if day_back != 0:
symbol = 'Historical Mode'
else:
symbol = 'BTC-USD Live'
dfnflist = [group[1] for group in df.groupby(df.index.date)] #
dates = []
for d in range(0, len(dfnflist)):
dates.append(dfnflist[d].index[0])
date_time_close = dt.datetime.today().strftime('%Y-%m-%d') + ' ' + '23:59:59'
append_dt = | pd.Timestamp(date_time_close) | pandas.Timestamp |
#!/usr/bin/env python
"""
Represent connectivity pattern using pandas DataFrame.
"""
from collections import OrderedDict
import itertools
import re
from future.utils import iteritems
from past.builtins import basestring
import networkx as nx
import numpy as np
import pandas as pd
from .plsel import Selector, SelectorMethods
from .pm import BasePortMapper
class Interface(object):
"""
Container for set of interface comprising ports.
This class contains information about a set of interfaces comprising
path-like identifiers and the attributes associated with them.
By default, each port must have at least the following attributes;
other attributes may be added:
- interface - indicates which interface a port is associated with.
- io - indicates whether the port receives input ('in') or
emits output ('out').
- type - indicates whether the port emits/receives spikes or
graded potentials.
All port identifiers in an interface must be unique. For two interfaces
to be deemed compatible, they must contain the same port identifiers and
their identifiers' 'io' attributes must be the inverse of each other
(i.e., every 'in' port in one interface must be mirrored by an 'out' port
in the other interface.
Examples
--------
>>> i = Interface('/foo[0:4],/bar[0:3]')
>>> i['/foo[0:2]', 'interface', 'io', 'type'] = [0, 'in', 'spike']
>>> i['/foo[2:4]', 'interface', 'io', 'type'] = [1, 'out', 'spike']
Attributes
----------
data : pandas.DataFrame
Port attribute data.
index : pandas.MultiIndex
Index of port identifiers.
Parameters
----------
selector : str, unicode, or sequence
Selector string (e.g., 'foo[0:2]') or sequence of token
sequences (e.g., [['foo', (0, 2)]]) describing the port
identifiers comprised by the interface.
columns : list, default = ['interface', 'io', 'type']
Data column names.
See Also
--------
plsel.SelectorMethods
"""
def __init__(self, selector='', columns=['interface', 'io', 'type']):
# All ports in an interface must contain at least the following
# attributes:
assert set(columns).issuperset(['interface', 'io', 'type'])
self.sel = SelectorMethods()
assert not(self.sel.is_ambiguous(selector))
self.num_levels = self.sel.max_levels(selector)
names = [i for i in range(self.num_levels)]
idx = self.sel.make_index(selector, names)
self.__validate_index__(idx)
self.data = pd.DataFrame(index=idx, columns=columns, dtype=object)
# Dictionary containing mappers for different port types:
self.pm = {}
def __validate_index__(self, idx):
"""
Raise an exception if the specified index will result in an invalid interface.
"""
if idx.duplicated().any():
raise ValueError('Duplicate interface index entries detected.')
def __getitem__(self, key):
if type(key) == tuple and len(key) > 1:
return self.sel.select(self.data[list(key[1:])], key[0])
else:
return self.sel.select(self.data, key)
def __setitem__ambiguous__(self, key, value):
if type(key) == tuple:
selector = key[0]
else:
selector = key
# Ensure that the specified selector can actually be used against the
# Interface's internal DataFrame:
try:
idx = self.sel.get_index(self.data, selector,
names=self.data.index.names)
except ValueError:
raise ValueError('cannot create index with '
'selector %s and column names %s' \
% (selector, str(self.data.index.names)))
# If the data specified is not a dict, convert it to a dict:
if type(key) == tuple and len(key) > 1:
if np.isscalar(value):
data = {k:value for k in key[1:]}
elif type(value) == dict:
data = value
elif np.iterable(value) and len(value) <= len(key[1:]):
data={k:v for k, v in zip(key[1:], value)}
else:
raise ValueError('cannot assign specified value')
else:
if np.isscalar(value):
data = {self.data.columns[0]: value}
elif type(value) == dict:
data = value
elif np.iterable(value) and len(value) <= len(self.data.columns):
data={k:v for k, v in zip(self.data.columns, value)}
else:
raise ValueError('cannot assign specified value')
for k, v in iteritems(data):
self.data[k].loc[idx] = v
def __setitem__(self, key, value):
if type(key) == tuple:
selector = key[0]
else:
selector = key
# Fall back to slower method if the selector is ambiguous:
if self.sel.is_ambiguous(selector):
self.__setitem__ambiguous__(key, value)
return
else:
selector = Selector(selector)
# Don't waste time trying to do anything if the selector is empty:
if not selector.nonempty:
return
# If the number of specified identifiers doesn't exceed the size of the
# data array, enlargement by specifying identifiers that are not in
# the index will not occur:
assert len(selector) <= len(self.data)
# If the data specified is not a dict, convert it to a dict:
if type(key) == tuple and len(key) > 1:
if np.isscalar(value):
data = {k:value for k in key[1:]}
elif type(value) == dict:
data = value
elif np.iterable(value) and len(value) <= len(key[1:]):
data={k:v for k, v in zip(key[1:], value)}
else:
raise ValueError('cannot assign specified value')
else:
if np.isscalar(value):
data = {self.data.columns[0]: value}
elif type(value) == dict:
data = value
elif np.iterable(value) and len(value) <= len(self.data.columns):
data={k:v for k, v in zip(self.data.columns, value)}
else:
raise ValueError('cannot assign specified value')
if selector.max_levels == 1:
s = [i for i in itertools.chain(*selector.expanded)]
else:
s = self.sel.pad_selector(selector.expanded,
len(self.index.levshape))
for k, v in iteritems(data):
self.data[k].loc[s] = v
@property
def index(self):
"""
Interface index.
"""
return self.data.index
@index.setter
def index(self, i):
self.data.index = i
@property
def interface_ids(self):
"""
Interface identifiers.
"""
return set(self.data['interface'])
@property
def io_inv(self):
"""
Returns new Interface instance with inverse input-output attributes.
Returns
-------
i : Interface
Interface instance whose 'io' attributes are the inverse of those of
the current instance.
"""
data_inv = self.data.copy()
f = lambda x: 'out' if x == 'in' else \
('in' if x == 'out' else x)
data_inv['io'] = data_inv['io'].apply(f)
return self.from_df(data_inv)
@property
def idx_levels(self):
"""
Number of levels in Interface index.
"""
if isinstance(self.data.index, pd.MultiIndex):
return len(self.index.levels)
else:
return 1
def clear(self):
"""
Clear all ports in class instance.
"""
self.data.drop(self.data.index, inplace=True)
def data_select(self, f, inplace=False):
"""
Restrict Interface data with a selection function.
Returns an Interface instance containing only those rows
whose data is passed by the specified selection function.
Parameters
----------
f : function
Selection function with a single dict argument whose keys
are the Interface's data column names.
inplace : bool, default=False
If True, update and return the given Interface instance.
Otherwise, return a new instance.
Returns
-------
i : Interface
Interface instance containing data selected by `f`.
"""
assert callable(f)
result = self.data[f(self.data)]
if inplace:
self.data = result
return self
else:
return Interface.from_df(result)
@classmethod
def from_df(cls, df):
"""
Create an Interface from a properly formatted DataFrame.
Examples
--------
>>> import plsel, pattern
>>> import pandas
>>> idx = plsel.SelectorMethods.make_index('/foo[0:2]')
>>> data = [[0, 'in', 'spike'], [1, 'out', 'gpot']]
>>> columns = ['interface', 'io', 'type']
>>> df = pandas.DataFrame(data, index=idx, columns=columns)
>>> i = pattern.Interface.from_df(df)
Parameters
----------
df : pandas.DataFrame
DataFrame with a MultiIndex and data columns 'interface',
'io', and 'type' (additional columns may also be present).
Returns
-------
i : Interface
Generated Interface instance.
Notes
-----
The contents of the specified DataFrame instance are copied into the
new Interface instance.
"""
assert set(df.columns).issuperset(['interface', 'io', 'type'])
if isinstance(df.index, pd.MultiIndex):
if len(df.index):
i = cls(df.index.tolist(), df.columns)
else:
i = cls([()], df.columns)
elif isinstance(df.index, pd.Index):
if len(df.index):
i = cls([(s,) for s in df.index.tolist()], df.columns)
else:
i = cls([()], df.columns)
else:
raise ValueError('invalid index type')
i.data = df.copy()
i.__validate_index__(i.index)
return i
@classmethod
def from_csv(cls, file_name, **kwargs):
"""
Create an Interface from a properly formatted CSV file.
Parameters
----------
file_name : str
File name of CSV file containing interface data.
kwargs : dict
Options to pass to `DataFrame.from_csv()`
Returns
-------
i : Interface
Generated Interface instance.
"""
df = pd.DataFrame.from_csv(file_name, **kwargs)
return cls.from_df(df)
@classmethod
def from_dict(cls, d):
"""
Create an Interface from a dictionary of selectors and data values.
Examples
--------
>>> d = {'/foo[0]': [0, 'in', 'gpot'], '/foo[1]': [1, 'in', 'gpot']}
>>> i = Interface.from_dict(d)
Parameters
----------
d : dict
Dictionary that maps selectors to the data that should be associated
with the corresponding ports. If a scalar, the data is assigned to
the first attribute; if an iterable, the data is assigned to the
attributes in order.
Returns
-------
i : Interface
Generated interface instance.
"""
i = cls(','.join(d.keys()))
for k, v in iteritems(d):
i[k] = v
i.data.sort_index(inplace=True)
return i
@classmethod
def from_graph(cls, g):
"""
Create an Interface from a NetworkX graph.
Examples
--------
>>> import networkx as nx
>>> g = nx.Graph()
>>> g.add_node('/foo[0]', interface=0, io='in', type='gpot')
>>> g.add_node('/foo[1]', interface=0, io='in', type='gpot')
>>> i = Interface.from_graph(g)
Parameters
----------
g : networkx.Graph
Graph whose node IDs are path-like port identifiers. The node attributes
are assigned to the ports.
Returns
-------
i : Interface
Generated interface instance.
"""
assert isinstance(g, nx.Graph)
return cls.from_dict(g.node)
@classmethod
def from_selectors(cls, sel, sel_in='', sel_out='',
sel_spike='', sel_gpot='', *sel_int_list):
"""
Create an Interface instance from selectors.
Parameters
----------
sel : str, unicode, or sequence
Selector describing all ports comprised by interface.
sel_in : str, unicode, or sequence
Selector describing the interface's input ports.
sel_out : str, unicode, or sequence
Selector describing the interface's output ports.
sel_spike : str, unicode, or sequence
Selector describing the interface's spiking ports.
sel_gpot : str, unicode, or sequence
Selector describing the interface's graded potential ports.
sel_int_list : list of str, unicode, or sequence
Selectors consecutively describing the ports associated with interface 0,
interface 1, etc.
Returns
-------
i : Interface
Generated interface instance.
"""
i = cls(sel)
i[sel_in, 'io'] = 'in'
i[sel_out, 'io'] = 'out'
i[sel_spike, 'type'] = 'spike'
i[sel_gpot, 'type'] = 'gpot'
for n, sel_int in enumerate(sel_int_list):
i[sel_int, 'interface'] = n
return i
def gpot_ports(self, i=None, tuples=False):
"""
Restrict Interface ports to graded potential ports.
Parameters
----------
i : int
Interface identifier. If None, return all graded potential ports.
tuples : bool
If True, return a list of tuples; if False, return an
Interface instance.
Returns
-------
interface : Interface or list of tuples
Either an Interface instance containing all graded potential ports and
their attributes in the specified interface, or a list of tuples
corresponding to the expanded ports.
"""
if i is None:
try:
df = self.data[self.data['type'] == 'gpot']
except:
df = None
else:
try:
df = self.data[(self.data['type'] == 'gpot') & \
(self.data['interface'] == i)]
except:
df = None
if tuples:
if df is None:
return []
else:
return df.index.tolist()
else:
if df is None:
return Interface()
else:
return self.from_df(df)
def in_ports(self, i=None, tuples=False):
"""
Restrict Interface ports to input ports.
Parameters
----------
i : int
Interface identifier. If None, return all input ports.
tuples : bool
If True, return a list of tuples; if False, return an
Interface instance.
Returns
-------
interface : Interface or list of tuples
Either an Interface instance containing all input ports and
their attributes in the specified interface, or a list of tuples
corresponding to the expanded ports.
"""
if i is None:
try:
df = self.data[self.data['io'] == 'in']
except:
df = None
else:
try:
df = self.data[(self.data['io'] == 'in') & \
(self.data['interface'] == i)]
except:
df = None
if tuples:
if df is None:
return []
else:
return df.index.tolist()
else:
if df is None:
return Interface()
else:
return self.from_df(df)
def interface_ports(self, i=None, tuples=False):
"""
Restrict Interface ports to specific interface.
Parameters
----------
i : int
Interface identifier. If None, return all ports.
tuples : bool
If True, return a list of tuples; if False, return an
Interface instance.
Returns
-------
interface : Interface
Either an Interface instance containing all ports and
their attributes in the specified interface, or a list of tuples
corresponding to the expanded ports.
"""
if i is None:
if tuples:
return self.index.tolist()
else:
return self.copy()
else:
try:
df = self.data[self.data['interface'] == i]
except:
df = None
if tuples:
if df is None:
return []
else:
return df.index.tolist()
else:
if df is None:
return Interface()
else:
return self.from_df(df)
def _merge_on_interfaces(self, a, i, b):
"""
Merge contents of this and another Interface instance.
Notes
-----
If the number of levels in one Interface instance's DataFrame index is
greater than that of the other, the number of levels in the index of the
merged DataFrames instances is set to the former and the index with the
smaller number is padded with blank entries to enable Panda's merge
mechanism to function properly.
"""
assert isinstance(i, Interface)
df_left = self.data[self.data['interface'] == a]
df_right = i.data[i.data['interface'] == b]
n_left_names = len(self.data.index.names)
n_right_names = len(i.data.index.names)
# Pandas' merge mechanism fails if the number of levels in each of the
# merged MultiIndex indices differs and there is overlap of more than
# one level; we therefore pad the index with the smaller number of
# levels before attempting the merge:
if n_left_names > n_right_names:
for n in range(i.num_levels, i.num_levels+(n_left_names-n_right_names)):
new_col = str(n)
df_right[new_col] = ''
df_right.set_index(new_col, append=True, inplace=True)
elif n_left_names < n_right_names:
for n in range(self.num_levels, self.num_levels+(n_right_names-n_left_names)):
new_col = str(n)
df_left[new_col] = ''
df_left.set_index(new_col, append=True, inplace=True)
return pd.merge(df_left, df_right,
left_index=True,
right_index=True)
def get_common_ports(self, a, i, b, t=None):
"""
Get port identifiers common to this and another Interface instance.
Parameters
----------
a : int
Identifier of interface in the current instance.
i : Interface
Interface instance containing the other interface.
b : int
Identifier of interface in instance `i`.
t : str or unicode
If not None, restrict output to those identifiers with the specified
port type.
Returns
-------
result : list of tuple
Expanded port identifiers shared by the two specified Interface
instances.
Notes
-----
The number of levels of the returned port identifiers is equal to the
maximum number of levels of this Interface instance.
The order of the returned port identifiers is not guaranteed.
"""
if t is None:
x = self.data[self.data['interface'] == a]
y = i.data[i.data['interface'] == b]
else:
x = self.data[(self.data['interface'] == a) & (self.data['type'] == t)]
y = i.data[(i.data['interface'] == b) & (i.data['type'] == t)]
if isinstance(x.index, pd.MultiIndex):
x_list = [tuple(a for a in b if a != '') \
for b in x.index]
else:
x_list = [(a,) for a in x.index]
if isinstance(y.index, pd.MultiIndex):
y_list = [tuple(a for a in b if a != '') \
for b in y.index]
else:
y_list = [(a,) for a in y.index]
return list(set(x_list).intersection(y_list))
def is_compatible(self, a, i, b, allow_subsets=False):
"""
Check whether two interfaces can be connected.
Compares an interface in the current Interface instance with one in
another instance to determine whether their ports can be connected.
Parameters
----------
a : int
Identifier of interface in the current instance.
i : Interface
Interface instance containing the other interface.
b : int
Identifier of interface in instance `i`.
allow_subsets : bool
If True, interfaces that contain a compatible subset of ports are
deemed to be compatible; otherwise, all ports in the two interfaces
must be compatible.
Returns
-------
result : bool
True if both interfaces comprise the same identifiers, the set 'type'
attributes for each matching pair of identifiers in the two
interfaces match, and each identifier with an 'io' attribute set
to 'out' in one interface has its 'io' attribute set to 'in' in the
other interface.
Notes
-----
Assumes that the port identifiers in both interfaces are sorted in the
same order.
"""
# Merge the interface data on their indices (i.e., their port identifiers):
data_merged = self._merge_on_interfaces(a, i, b)
# Check whether there are compatible subsets, i.e., at least one pair of
# ports from the two interfaces that are compatible with each other:
if allow_subsets:
# If the interfaces share no identical port identifiers, they are
# incompatible:
if not len(data_merged):
return False
# Compatible identifiers must have the same non-null 'type'
# attribute and their non-null 'io' attributes must be the inverse
# of each other:
if not data_merged.apply(lambda row: \
((row['type_x'] == row['type_y']) or \
(pd.isnull(row['type_x']) and pd.isnull(row['type_y']))) and \
((row['io_x'] == 'out' and row['io_y'] == 'in') or \
(row['io_x'] == 'in' and row['io_y'] == 'out') or \
( | pd.isnull(row['io_x']) | pandas.isnull |
import pandas as pd
import matplotlib.pyplot as plt
import seaborn
"""
Item #01: Análise monovariada global dos preditores
- plotar histogramas
- calcular média, desvio padrão e assimetria
"""
# setando estilo e outras configs
seaborn.set()
# paths e arquivos
dataset = "datasets/glass.dat"
figpath = "figures/item1/"
result_file = "results/item1.dat"
# carregando dados e limpando coluna id (dencessaria)
df = pd.read_csv(dataset)
df = df.drop(["id"], axis=1)
# lista de preditores (exclui-se a coluna da classe)
predictors = list(df.drop(["class"], axis=1))
# analise monovariada
# - histograma
# - media
# - variancia
# - assimetria
columns = ["predictor", "mean", "std", "var", "skewness"]
monovariate = | pd.DataFrame(columns=columns) | pandas.DataFrame |
"""
Scripts used to analyse data in the human_data directory and produce the
data files used for plotting.
"""
import argparse
import os.path
import json
import collections
import numpy as np
import pandas as pd
import sys
import tskit
import tqdm
data_prefix = "human-data"
def print_sample_edge_stats(ts):
"""
Print out some basic stats about the sample edges in the specified tree
sequence.
"""
tables = ts.tables
child_counts = np.bincount(tables.edges.child)
samples = ts.samples()
all_counts = child_counts[samples]
print("mean sample count = ", np.mean(all_counts))
index = tables.nodes.flags[tables.edges.child] == msprime.NODE_IS_SAMPLE
length = tables.edges.right[index]- tables.edges.left[index]
print("mean length = ", np.mean(length))
print("median length = ", np.median(length))
n50 = np.zeros(ts.num_samples)
edges = tables.edges
child = edges.child
left = edges.left
right = edges.right
for j, sample in enumerate(samples):
index = child == sample
length = right[index]- left[index]
length = np.sort(length)[::-1]
cumulative = np.cumsum(length)
# N50 is the first length such that the cumulative sum up to that point
# is >= L / 2.
n50[j] = length[cumulative >= ts.sequence_length / 2][0]
print("Average N50 = ", np.mean(n50))
def get_sgdp_sample_edges():
filename = os.path.join(data_prefix, "sgdp_chr20.nosimplify.trees")
ts = tskit.load(filename)
print("SGDP")
print_sample_edge_stats(ts)
population_name = []
population_region = []
for pop in ts.populations():
md = json.loads(pop.metadata.decode())
population_name.append(md["name"])
population_region.append(md["region"])
tables = ts.tables
child_counts = np.bincount(tables.edges.child)
datasets = []
samples = []
strands = []
populations = []
regions = []
sample_edges = []
for ind in ts.individuals():
md = json.loads(ind.metadata.decode())
for j, node_id in enumerate(ind.nodes):
node = ts.node(node_id)
samples.append(md["sgdp_id"])
strands.append(j)
populations.append(population_name[node.population])
regions.append(population_region[node.population])
sample_edges.append(child_counts[node_id])
datasets.append("sgdp")
df = pd.DataFrame({
"dataset": datasets,
"sample": samples,
"strand": strands,
"population": populations,
"region": regions,
"sample_edges": sample_edges})
return df
def get_1kg_sample_edges():
filename = os.path.join(data_prefix, "1kg_chr20.nosimplify.trees")
ts = tskit.load(filename)
print("TGP")
print_sample_edge_stats(ts)
population_name = []
population_region = []
for pop in ts.populations():
md = json.loads(pop.metadata.decode())
population_name.append(md["name"])
population_region.append(md["super_population"])
tables = ts.tables
child_counts = np.bincount(tables.edges.child)
datasets = []
samples = []
strands = []
populations = []
regions = []
sample_edges = []
for ind in ts.individuals():
md = json.loads(ind.metadata.decode())
for j, node_id in enumerate(ind.nodes):
node = ts.node(node_id)
samples.append(md["individual_id"])
strands.append(j)
populations.append(population_name[node.population])
regions.append(population_region[node.population])
sample_edges.append(child_counts[node_id])
datasets.append("1kg")
df = pd.DataFrame({
"dataset": datasets,
"sample": samples,
"strand": strands,
"population": populations,
"region": regions,
"sample_edges": sample_edges})
return df
def process_hg01933_local_gnn():
filename = os.path.join(data_prefix, "1kg_chr20.snipped.trees")
ts = tskit.load(filename)
region_sample_set_map = collections.defaultdict(list)
for population in ts.populations():
md = json.loads(population.metadata.decode())
region = md["super_population"]
region_sample_set_map[region].extend(list(ts.samples(
population=population.id)))
regions = list(region_sample_set_map.keys())
region_sample_sets = [region_sample_set_map[k] for k in regions]
def local_gnn(ts, focal, reference_sets):
reference_set_map = np.zeros(ts.num_nodes, dtype=int) - 1
for k, reference_set in enumerate(reference_sets):
for u in reference_set:
if reference_set_map[u] != -1:
raise ValueError("Duplicate value in reference sets")
reference_set_map[u] = k
K = len(reference_sets)
A = np.zeros((len(focal), ts.num_trees, K))
lefts = np.zeros(ts.num_trees, dtype=float)
rights = np.zeros(ts.num_trees, dtype=float)
parent = np.zeros(ts.num_nodes, dtype=int) - 1
sample_count = np.zeros((ts.num_nodes, K), dtype=int)
# Set the intitial conditions.
for j in range(K):
sample_count[reference_sets[j], j] = 1
for t, ((left, right),edges_out, edges_in) in enumerate(ts.edge_diffs()):
for edge in edges_out:
parent[edge.child] = -1
v = edge.parent
while v != -1:
sample_count[v] -= sample_count[edge.child]
v = parent[v]
for edge in edges_in:
parent[edge.child] = edge.parent
v = edge.parent
while v != -1:
sample_count[v] += sample_count[edge.child]
v = parent[v]
# Process this tree.
for j, u in enumerate(focal):
focal_reference_set = reference_set_map[u]
p = parent[u]
lefts[t] = left
rights[t] = right
while p != tskit.NULL:
total = np.sum(sample_count[p])
if total > 1:
break
p = parent[p]
if p != tskit.NULL:
scale = 1 / (total - int(focal_reference_set != -1))
for k, reference_set in enumerate(reference_sets):
n = sample_count[p, k] - int(focal_reference_set == k)
A[j, t, k] = n * scale
return (A, lefts, rights)
for ind in ts.individuals():
md = json.loads(ind.metadata.decode())
if md["individual_id"] == "HG01933":
for j, node in enumerate(ind.nodes):
A, left, right = local_gnn(ts, [node], region_sample_sets)
df = pd.DataFrame(data=A[0], columns=regions)
df["left"] = left
df["right"] = right
# Remove rows with no difference in GNN to next row
keep_rows = ~(df.iloc[:, 0:5].diff(axis=0) == 0).all(axis=1)
df = df[keep_rows]
df.to_csv("data/HG01933_local_gnn_{}.csv".format(j))
def process_sample_edges():
"""
Processes data from the SGDP and 1KG data files to produce a data frame
containing the number of sample edges for every sample.
"""
df_sgdp = get_sgdp_sample_edges()
df_1kg = get_1kg_sample_edges()
df_all = pd.concat([df_sgdp, df_1kg], ignore_index=True)
datafile = "data/sample_edges.csv"
df_all.to_csv(datafile)
def process_sample_edge_outliers():
"""
Runs the analysis for finding the sample edge outliers.
"""
filename = os.path.join(data_prefix, "1kg_chr20.nosimplify.trees")
ts = tskit.load(filename)
# construct the dictionary mapping individual names to their metadata
tables = ts.tables
individual_name_map = {}
for individual in ts.individuals():
metadata = json.loads(individual.metadata.decode())
name = metadata["individual_id"]
individual_name_map[name] = individual
# construct a dictionary linking individual's names to their number of
# breakpoints within 100bp of each other
close_breakpoints = dict()
child = tables.edges.child
left = tables.edges.left
for key, individual in tqdm.tqdm(individual_name_map.items()):
index_0 = child == individual.nodes[0]
left_0 = left[index_0]
index_1 = child == individual.nodes[1]
left_1 = left[index_1]
close_100 = 0
for breakpoint in left_0:
close_100 += len(left_1[(left_1 >= breakpoint - 100) & (left_1 <= breakpoint + 100)])
close_breakpoints[key] = close_100
print("Average = ", np.mean(list(close_breakpoints.values())))
for ind in ["NA20289", "HG02789"]:
print(ind, ":", close_breakpoints[ind])
def process_1kg_ukbb_gnn():
source_file = os.path.join(data_prefix, "1kg_ukbb_chr20.snipped.trees.gnn.csv")
df = pd.read_csv(source_file)
# Use TGP populations here to make sure we don't leak any metadata.
tgp_populations = [
'CHB', 'JPT', 'CHS', 'CDX', 'KHV',
'CEU', 'TSI', 'FIN', 'GBR', 'IBS',
'YRI', 'LWK', 'GWD', 'MSL', 'ESN', 'ASW', 'ACB',
'MXL', 'PUR', 'CLM', 'PEL',
'GIH', 'PJL', 'BEB', 'STU', 'ITU']
# Overall GNN by 1KG population
dfg = df.groupby(df.ethnicity).mean()
dfg = dfg[tgp_populations]
datafile = "data/1kg_ukbb_ethnicity.csv"
dfg.to_csv(datafile)
# Subset down to the british ethnicity.
df = df[df.ethnicity == "British"]
print("British subset = ", len(df))
dfg = df.groupby(df.centre).mean()
dfg = dfg[tgp_populations]
datafile = "data/1kg_ukbb_british_centre.csv"
dfg.to_csv(datafile)
def process_ukbb_ukbb_gnn():
source_file = os.path.join(data_prefix, "ukbb_chr20.augmented_131072.snipped.trees.gnn.csv")
df = | pd.read_csv(source_file) | pandas.read_csv |
import logging
from typing import NamedTuple, Dict, List, Set, Union
import d3m
import d3m.metadata.base as mbase
import numpy as np
import pandas as pd
from common_primitives import utils
from d3m.container import DataFrame as d3m_DataFrame
from d3m.metadata import hyperparams as metadata_hyperparams
from d3m.metadata import hyperparams, params
from d3m.metadata.hyperparams import Enumeration, UniformInt, UniformBool
from d3m.primitive_interfaces.base import CallResult
from d3m.primitive_interfaces.unsupervised_learning import UnsupervisedLearnerPrimitiveBase
from . import config
_logger = logging.getLogger(__name__)
Input = d3m.container.DataFrame
Output = d3m.container.DataFrame
class EncParams(params.Params):
mapping: Dict
cat_columns: List[str]
empty_columns: List[int]
class EncHyperparameter(hyperparams.Hyperparams):
n_limit = UniformInt(lower=5, upper=100, default=12,
description='Limits the maximum number of columns generated from a single categorical column',
semantic_types=['http://schema.org/Integer',
'https://metadata.datadrivendiscovery.org/types/TuningParameter'])
use_columns = hyperparams.Set(
elements=hyperparams.Hyperparameter[int](-1),
default=(),
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="A set of column indices to force primitive to operate on. If any specified column cannot be parsed, it is skipped.",
)
exclude_columns = hyperparams.Set(
elements=hyperparams.Hyperparameter[int](-1),
default=(),
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="A set of column indices to not operate on. Applicable only if \"use_columns\" is not provided.",
)
return_result = hyperparams.Enumeration(
values=['append', 'replace', 'new'],
default='replace',
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="Should parsed columns be appended, should they replace original columns, or should only parsed columns be returned? This hyperparam is ignored if use_semantic_types is set to false.",
)
use_semantic_types = hyperparams.UniformBool(
default=False,
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="Controls whether semantic_types metadata will be used for filtering columns in input dataframe. Setting this to false makes the code ignore return_result and will produce only the output dataframe"
)
add_index_columns = hyperparams.UniformBool(
default=True,
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="Also include primary index columns if input data has them. Applicable only if \"return_result\" is set to \"new\".",
)
class Encoder(UnsupervisedLearnerPrimitiveBase[Input, Output, EncParams, EncHyperparameter]):
"""
An one-hot encoder, which
1. n_limit: max number of distinct values to one-hot encode,
remaining values with fewer occurence are put in [colname]_other_ column.
2. feed in data by set_training_data, then apply fit() function to tune the encoder.
3. produce(): input data would be encoded and return.
"""
metadata = hyperparams.base.PrimitiveMetadata({
"id": "18f0bb42-6350-3753-8f2d-d1c3da70f279",
"version": config.VERSION,
"name": "ISI DSBox Data Encoder",
"description": "Encode data, such as one-hot encoding for categorical data",
"python_path": "d3m.primitives.data_preprocessing.Encoder.DSBOX",
"primitive_family": "DATA_PREPROCESSING",
"algorithm_types": ["ENCODE_ONE_HOT"],
"source": {
"name": config.D3M_PERFORMER_TEAM,
"contact": config.D3M_CONTACT,
"uris": [config.REPOSITORY]
},
"keywords": ["preprocessing", "encoding"],
"installation": [config.INSTALLATION],
})
def __repr__(self):
return "%s(%r)" % ('Encoder', self.__dict__)
def __init__(self, *, hyperparams: EncHyperparameter) -> None:
super().__init__(hyperparams=hyperparams)
self.hyperparams = hyperparams
self._mapping: Dict = {}
self._input_data: Input = None
self._input_data_copy = None
self._fitted = False
self._cat_columns = []
self._col_index = None
self._empty_columns = []
def set_training_data(self, *, inputs: Input) -> None:
self._input_data = inputs
self._fitted = False
def _trim_features(self, feature, n_limit):
topn = feature.dropna().unique()
if n_limit:
if feature.dropna().nunique() > n_limit:
topn = list(feature.value_counts().head(n_limit).index)
topn.append('other_')
topn = [x for x in topn if x]
return feature.name, topn
def fit(self, *, timeout: float = None, iterations: int = None) -> CallResult[None]:
if self._fitted:
return
if self._input_data is None:
raise ValueError('Missing training(fitting) data.')
# Look at attribute columns only
# print('fit in', self._input_data.columns)
data = self._input_data.copy()
all_attributes = utils.list_columns_with_semantic_types(metadata=data.metadata, semantic_types=[
"https://metadata.datadrivendiscovery.org/types/Attribute"])
# Remove columns with all empty values, structural type str
numeric = utils.list_columns_with_semantic_types(
data.metadata, ['http://schema.org/Integer', 'http://schema.org/Float'])
numeric = [x for x in numeric if x in all_attributes]
self._empty_columns = []
_logger.debug(f'Numeric columns: {numeric}')
for element in numeric:
if data.metadata.query((mbase.ALL_ELEMENTS, element)).get('structural_type', ()) == str:
if pd.isnull(pd.to_numeric(data.iloc[:, element])).sum() == data.shape[0]:
_logger.debug(f'Empty numeric str column: {element}')
self._empty_columns.append(element)
# Remove columns with all empty values, structural numeric
is_empty = | pd.isnull(data) | pandas.isnull |
# BUG: DatetimeIndex has become unhashable in 1.3.1? #42844
import random
import pandas as pd
print(pd.__version__)
# Right data
ts_open = pd.DatetimeIndex(
[
pd.Timestamp("2021/01/01 00:37"),
pd.Timestamp("2021/01/01 00:40"),
pd.Timestamp("2021/01/01 01:00"),
pd.Timestamp("2021/01/01 03:45"),
pd.Timestamp("2021/01/01 03:59"),
pd.Timestamp("2021/01/01 05:20"),
]
)
length = len(ts_open)
random.seed(1)
volume = random.sample(range(1, length + 1), length)
df_smpl = pd.DataFrame({"volume": volume, "ts_open": ts_open})
# Left data
ts_full = pd.date_range(start= | pd.Timestamp("2021/01/01 00:00") | pandas.Timestamp |
# -*- coding: utf-8 -*-
from datetime import timedelta
import operator
from string import ascii_lowercase
import warnings
import numpy as np
import pytest
from pandas.compat import lrange
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical, DataFrame, MultiIndex, Series, Timestamp, date_range, isna,
notna, to_datetime, to_timedelta)
import pandas.core.algorithms as algorithms
import pandas.core.nanops as nanops
import pandas.util.testing as tm
def assert_stat_op_calc(opname, alternative, frame, has_skipna=True,
check_dtype=True, check_dates=False,
check_less_precise=False, skipna_alternative=None):
"""
Check that operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
check_dtype : bool, default True
Whether the dtypes of the result of "frame.opname()" and
"alternative(frame)" should be checked.
check_dates : bool, default false
Whether opname should be tested on a Datetime Series
check_less_precise : bool, default False
Whether results should only be compared approximately;
passed on to tm.assert_series_equal
skipna_alternative : function, default None
NaN-safe version of alternative
"""
f = getattr(frame, opname)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
result = getattr(df, opname)()
assert isinstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, opname)()
assert isinstance(result, Series)
assert len(result)
if has_skipna:
def wrapper(x):
return alternative(x.values)
skipna_wrapper = tm._make_skipna_wrapper(alternative,
skipna_alternative)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
# HACK: win32
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
skipna_wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if opname in ['sum', 'prod']:
expected = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(result1, expected, check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
assert lcd_dtype == result0.dtype
assert lcd_dtype == result1.dtype
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname in ['sum', 'prod']:
unit = 1 if opname == 'prod' else 0 # result for empty sum/prod
expected = pd.Series(unit, index=r0.index, dtype=r0.dtype)
tm.assert_series_equal(r0, expected)
expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
def assert_stat_op_api(opname, float_frame, float_string_frame,
has_numeric_only=False):
"""
Check that API for operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_numeric_only : bool, default False
Whether the method "opname" has the kwarg "numeric_only"
"""
# make sure works on mixed-type frame
getattr(float_string_frame, opname)(axis=0)
getattr(float_string_frame, opname)(axis=1)
if has_numeric_only:
getattr(float_string_frame, opname)(axis=0, numeric_only=True)
getattr(float_string_frame, opname)(axis=1, numeric_only=True)
getattr(float_frame, opname)(axis=0, numeric_only=False)
getattr(float_frame, opname)(axis=1, numeric_only=False)
def assert_bool_op_calc(opname, alternative, frame, has_skipna=True):
"""
Check that bool operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
"""
f = getattr(frame, opname)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname == 'any':
assert not r0.any()
assert not r1.any()
else:
assert r0.all()
assert r1.all()
def assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=False):
"""
Check that API for boolean operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_bool_only : bool, default False
Whether the method "opname" has the kwarg "bool_only"
"""
# make sure op works on mixed-type frame
mixed = float_string_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0.5
getattr(mixed, opname)(axis=0)
getattr(mixed, opname)(axis=1)
if has_bool_only:
getattr(mixed, opname)(axis=0, bool_only=True)
getattr(mixed, opname)(axis=1, bool_only=True)
getattr(bool_frame_with_na, opname)(axis=0, bool_only=False)
getattr(bool_frame_with_na, opname)(axis=1, bool_only=False)
class TestDataFrameAnalytics(object):
# ---------------------------------------------------------------------
# Correlation and covariance
@td.skip_if_no_scipy
def test_corr_pearson(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'pearson')
@td.skip_if_no_scipy
def test_corr_kendall(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'kendall')
@td.skip_if_no_scipy
def test_corr_spearman(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'spearman')
def _check_method(self, frame, method='pearson'):
correls = frame.corr(method=method)
expected = frame['A'].corr(frame['C'], method=method)
tm.assert_almost_equal(correls['A']['C'], expected)
@td.skip_if_no_scipy
def test_corr_non_numeric(self, float_frame, float_string_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
# exclude non-numeric types
result = float_string_frame.corr()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].corr()
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'kendall', 'spearman'])
def test_corr_nooverlap(self, meth):
# nothing in common
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],
'C': [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]})
rs = df.corr(meth)
assert isna(rs.loc['A', 'B'])
assert isna(rs.loc['B', 'A'])
assert rs.loc['A', 'A'] == 1
assert rs.loc['B', 'B'] == 1
assert isna(rs.loc['C', 'C'])
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'spearman'])
def test_corr_constant(self, meth):
# constant --> all NA
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
assert isna(rs.values).all()
def test_corr_int(self):
# dtypes other than float64 #1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
df3.cov()
df3.corr()
@td.skip_if_no_scipy
def test_corr_int_and_boolean(self):
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=[
'a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
result = df.corr(meth)
tm.assert_frame_equal(result, expected)
def test_corr_cov_independent_index_column(self):
# GH 14617
df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4),
columns=list("abcd"))
for method in ['cov', 'corr']:
result = getattr(df, method)()
assert result.index is not result.columns
assert result.index.equals(result.columns)
def test_corr_invalid_method(self):
# GH 22298
df = pd.DataFrame(np.random.normal(size=(10, 2)))
msg = ("method must be either 'pearson', "
"'spearman', 'kendall', or a callable, ")
with pytest.raises(ValueError, match=msg):
df.corr(method="____")
def test_cov(self, float_frame, float_string_frame):
# min_periods no NAs (corner case)
expected = float_frame.cov()
result = float_frame.cov(min_periods=len(float_frame))
tm.assert_frame_equal(expected, result)
result = float_frame.cov(min_periods=len(float_frame) + 1)
assert isna(result.values).all()
# with NAs
frame = float_frame.copy()
frame['A'][:5] = np.nan
frame['B'][5:10] = np.nan
result = float_frame.cov(min_periods=len(float_frame) - 8)
expected = float_frame.cov()
expected.loc['A', 'B'] = np.nan
expected.loc['B', 'A'] = np.nan
# regular
float_frame['A'][:5] = np.nan
float_frame['B'][:10] = np.nan
cov = float_frame.cov()
tm.assert_almost_equal(cov['A']['C'],
float_frame['A'].cov(float_frame['C']))
# exclude non-numeric types
result = float_string_frame.cov()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].cov()
tm.assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
df.loc[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_corrwith(self, datetime_frame):
a = datetime_frame
noise = Series(np.random.randn(len(a)), index=a.index)
b = datetime_frame.add(noise, axis=0)
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
tm.assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
tm.assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
assert 'B' not in dropped
dropped = a.corrwith(b, axis=1, drop=True)
assert a.index[-1] not in dropped.index
# non time-series data
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(np.random.randn(5, 4), index=index, columns=columns)
df2 = DataFrame(np.random.randn(4, 4),
index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
tm.assert_almost_equal(correls[row],
df1.loc[row].corr(df2.loc[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
tm.assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
tm.assert_series_equal(result, expected)
def test_corrwith_series(self, datetime_frame):
result = datetime_frame.corrwith(datetime_frame['A'])
expected = datetime_frame.apply(datetime_frame['A'].corr)
tm.assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000) ** 2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'], df2['a'])[0][1]
tm.assert_almost_equal(c1, c2)
assert c1 < 1
def test_corrwith_mixed_dtypes(self):
# GH 18570
df = pd.DataFrame({'a': [1, 4, 3, 2], 'b': [4, 6, 7, 3],
'c': ['a', 'b', 'c', 'd']})
s = pd.Series([0, 6, 7, 3])
result = df.corrwith(s)
corrs = [df['a'].corr(s), df['b'].corr(s)]
expected = pd.Series(data=corrs, index=['a', 'b'])
tm.assert_series_equal(result, expected)
def test_corrwith_index_intersection(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=True).index.sort_values()
expected = df1.columns.intersection(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_index_union(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=False).index.sort_values()
expected = df1.columns.union(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_dup_cols(self):
# GH 21925
df1 = pd.DataFrame(np.vstack([np.arange(10)] * 3).T)
df2 = df1.copy()
df2 = pd.concat((df2, df2[0]), axis=1)
result = df1.corrwith(df2)
expected = pd.Series(np.ones(4), index=[0, 0, 1, 2])
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_spearman(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="spearman")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_kendall(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="kendall")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
# ---------------------------------------------------------------------
# Describe
def test_bool_describe_in_mixed_frame(self):
df = DataFrame({
'string_data': ['a', 'b', 'c', 'd', 'e'],
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
})
# Integer data are included in .describe() output,
# Boolean and string data are not.
result = df.describe()
expected = DataFrame({'int_data': [5, 30, df.int_data.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# Top value is a boolean value that is False
result = df.describe(include=['bool'])
expected = DataFrame({'bool_data': [5, 2, False, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_bool_frame(self):
# GH 13891
df = pd.DataFrame({
'bool_data_1': [False, False, True, True],
'bool_data_2': [False, True, True, True]
})
result = df.describe()
expected = DataFrame({'bool_data_1': [4, 2, True, 2],
'bool_data_2': [4, 2, True, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True, False],
'int_data': [0, 1, 2, 3, 4]
})
result = df.describe()
expected = DataFrame({'int_data': [5, 2, df.int_data.std(), 0, 1,
2, 3, 4]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True],
'str_data': ['a', 'b', 'c', 'a']
})
result = df.describe()
expected = DataFrame({'bool_data': [4, 2, True, 2],
'str_data': [4, 3, 'a', 2]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_categorical(self):
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
right=False, labels=cat_labels)
cat = df
# Categoricals should not show up together with numerical columns
result = cat.describe()
assert len(result.columns) == 1
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "b", "c", "c"]))
df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
result = df3.describe()
tm.assert_numpy_array_equal(result["cat"].values, result["s"].values)
def test_describe_categorical_columns(self):
# GH 11558
columns = pd.CategoricalIndex(['int1', 'int2', 'obj'],
ordered=True, name='XXX')
df = DataFrame({'int1': [10, 20, 30, 40, 50],
'int2': [10, 20, 30, 40, 50],
'obj': ['A', 0, None, 'X', 1]},
columns=columns)
result = df.describe()
exp_columns = pd.CategoricalIndex(['int1', 'int2'],
categories=['int1', 'int2', 'obj'],
ordered=True, name='XXX')
expected = DataFrame({'int1': [5, 30, df.int1.std(),
10, 20, 30, 40, 50],
'int2': [5, 30, df.int2.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'],
columns=exp_columns)
tm.assert_frame_equal(result, expected)
tm.assert_categorical_equal(result.columns.values,
expected.columns.values)
def test_describe_datetime_columns(self):
columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
freq='MS', tz='US/Eastern', name='XXX')
df = DataFrame({0: [10, 20, 30, 40, 50],
1: [10, 20, 30, 40, 50],
2: ['A', 0, None, 'X', 1]})
df.columns = columns
result = df.describe()
exp_columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01'],
freq='MS', tz='US/Eastern', name='XXX')
expected = DataFrame({0: [5, 30, df.iloc[:, 0].std(),
10, 20, 30, 40, 50],
1: [5, 30, df.iloc[:, 1].std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
expected.columns = exp_columns
tm.assert_frame_equal(result, expected)
assert result.columns.freq == 'MS'
assert result.columns.tz == expected.columns.tz
def test_describe_timedelta_values(self):
# GH 6145
t1 = pd.timedelta_range('1 days', freq='D', periods=5)
t2 = pd.timedelta_range('1 hours', freq='H', periods=5)
df = pd.DataFrame({'t1': t1, 't2': t2})
expected = DataFrame({'t1': [5, pd.Timedelta('3 days'),
df.iloc[:, 0].std(),
pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days'),
pd.Timedelta('4 days'),
pd.Timedelta('5 days')],
't2': [5, pd.Timedelta('3 hours'),
df.iloc[:, 1].std(),
pd.Timedelta('1 hours'),
pd.Timedelta('2 hours'),
pd.Timedelta('3 hours'),
pd.Timedelta('4 hours'),
pd.Timedelta('5 hours')]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
result = df.describe()
tm.assert_frame_equal(result, expected)
exp_repr = (" t1 t2\n"
"count 5 5\n"
"mean 3 days 00:00:00 0 days 03:00:00\n"
"std 1 days 13:56:50.394919 0 days 01:34:52.099788\n"
"min 1 days 00:00:00 0 days 01:00:00\n"
"25% 2 days 00:00:00 0 days 02:00:00\n"
"50% 3 days 00:00:00 0 days 03:00:00\n"
"75% 4 days 00:00:00 0 days 04:00:00\n"
"max 5 days 00:00:00 0 days 05:00:00")
assert repr(result) == exp_repr
def test_describe_tz_values(self, tz_naive_fixture):
# GH 21332
tz = tz_naive_fixture
s1 = Series(range(5))
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s2 = Series(date_range(start, end, tz=tz))
df = pd.DataFrame({'s1': s1, 's2': s2})
expected = DataFrame({'s1': [5, np.nan, np.nan, np.nan, np.nan, np.nan,
2, 1.581139, 0, 1, 2, 3, 4],
's2': [5, 5, s2.value_counts().index[0], 1,
start.tz_localize(tz),
end.tz_localize(tz), np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan]},
index=['count', 'unique', 'top', 'freq', 'first',
'last', 'mean', 'std', 'min', '25%', '50%',
'75%', 'max']
)
result = df.describe(include='all')
tm.assert_frame_equal(result, expected)
# ---------------------------------------------------------------------
# Reductions
def test_stat_op_api(self, float_frame, float_string_frame):
assert_stat_op_api('count', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('sum', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('nunique', float_frame, float_string_frame)
assert_stat_op_api('mean', float_frame, float_string_frame)
assert_stat_op_api('product', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
assert_stat_op_api('min', float_frame, float_string_frame)
assert_stat_op_api('max', float_frame, float_string_frame)
assert_stat_op_api('mad', float_frame, float_string_frame)
assert_stat_op_api('var', float_frame, float_string_frame)
assert_stat_op_api('std', float_frame, float_string_frame)
assert_stat_op_api('sem', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
try:
from scipy.stats import skew, kurtosis # noqa:F401
assert_stat_op_api('skew', float_frame, float_string_frame)
assert_stat_op_api('kurt', float_frame, float_string_frame)
except ImportError:
pass
def test_stat_op_calc(self, float_frame_with_na, mixed_float_frame):
def count(s):
return notna(s).sum()
def nunique(s):
return len(algorithms.unique1d(s.dropna()))
def mad(x):
return np.abs(x - x.mean()).mean()
def var(x):
return np.var(x, ddof=1)
def std(x):
return np.std(x, ddof=1)
def sem(x):
return np.std(x, ddof=1) / np.sqrt(len(x))
def skewness(x):
from scipy.stats import skew # noqa:F811
if len(x) < 3:
return np.nan
return skew(x, bias=False)
def kurt(x):
from scipy.stats import kurtosis # noqa:F811
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
assert_stat_op_calc('nunique', nunique, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
# mixed types (with upcasting happening)
assert_stat_op_calc('sum', np.sum, mixed_float_frame.astype('float32'),
check_dtype=False, check_less_precise=True)
assert_stat_op_calc('sum', np.sum, float_frame_with_na,
skipna_alternative=np.nansum)
assert_stat_op_calc('mean', np.mean, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('product', np.prod, float_frame_with_na)
assert_stat_op_calc('mad', mad, float_frame_with_na)
assert_stat_op_calc('var', var, float_frame_with_na)
assert_stat_op_calc('std', std, float_frame_with_na)
assert_stat_op_calc('sem', sem, float_frame_with_na)
assert_stat_op_calc('count', count, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
try:
from scipy import skew, kurtosis # noqa:F401
assert_stat_op_calc('skew', skewness, float_frame_with_na)
assert_stat_op_calc('kurt', kurt, float_frame_with_na)
except ImportError:
pass
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning")
def test_median(self, float_frame_with_na, int_frame):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
assert_stat_op_calc('median', wrapper, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('median', wrapper, int_frame, check_dtype=False,
check_dates=True)
@pytest.mark.parametrize('method', ['sum', 'mean', 'prod', 'var',
'std', 'skew', 'min', 'max'])
def test_stat_operators_attempt_obj_array(self, method):
# GH#676
data = {
'a': [-0.00049987540199591344, -0.0016467257772919831,
0.00067695870775883013],
'b': [-0, -0, 0.0],
'c': [0.00031111847529610595, 0.0014902627951905339,
-0.00094099200035979691]
}
df1 = DataFrame(data, index=['foo', 'bar', 'baz'], dtype='O')
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],
2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
assert df.values.dtype == np.object_
result = getattr(df, method)(1)
expected = getattr(df.astype('f8'), method)(1)
if method in ['sum', 'prod']:
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('op', ['mean', 'std', 'var',
'skew', 'kurt', 'sem'])
def test_mixed_ops(self, op):
# GH#16116
df = DataFrame({'int': [1, 2, 3, 4],
'float': [1., 2., 3., 4.],
'str': ['a', 'b', 'c', 'd']})
result = getattr(df, op)()
assert len(result) == 2
with pd.option_context('use_bottleneck', False):
result = getattr(df, op)()
assert len(result) == 2
def test_reduce_mixed_frame(self):
# GH 6806
df = DataFrame({
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
'string_data': ['a', 'b', 'c', 'd', 'e'],
})
df.reindex(columns=['bool_data', 'int_data', 'string_data'])
test = df.sum(axis=0)
tm.assert_numpy_array_equal(test.values,
np.array([2, 150, 'abcde'], dtype=object))
tm.assert_series_equal(test, df.T.sum(axis=1))
def test_nunique(self):
df = DataFrame({'A': [1, 1, 1],
'B': [1, 2, 3],
'C': [1, np.nan, 3]})
tm.assert_series_equal(df.nunique(), Series({'A': 1, 'B': 3, 'C': 2}))
tm.assert_series_equal(df.nunique(dropna=False),
Series({'A': 1, 'B': 3, 'C': 3}))
tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2}))
tm.assert_series_equal(df.nunique(axis=1, dropna=False),
Series({0: 1, 1: 3, 2: 2}))
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_mixed_datetime_numeric(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
df = pd.DataFrame({"A": [1, 1],
"B": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series([1.0], index=['A'])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_excludeds_datetimes(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
# Our long-term desired behavior is unclear, but the behavior in
# 0.24.0rc1 was buggy.
df = pd.DataFrame({"A": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series()
tm.assert_series_equal(result, expected)
def test_var_std(self, datetime_frame):
result = datetime_frame.std(ddof=4)
expected = datetime_frame.apply(lambda x: x.std(ddof=4))
tm.assert_almost_equal(result, expected)
result = datetime_frame.var(ddof=4)
expected = datetime_frame.apply(lambda x: x.var(ddof=4))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
@pytest.mark.parametrize(
"meth", ['sem', 'var', 'std'])
def test_numeric_only_flag(self, meth):
# GH 9201
df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a number in str format
df1.loc[0, 'foo'] = '100'
df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a non-number str
df2.loc[0, 'foo'] = 'a'
result = getattr(df1, meth)(axis=1, numeric_only=True)
expected = getattr(df1[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
result = getattr(df2, meth)(axis=1, numeric_only=True)
expected = getattr(df2[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
# df1 has all numbers, df2 has a letter inside
msg = r"unsupported operand type\(s\) for -: 'float' and 'str'"
with pytest.raises(TypeError, match=msg):
getattr(df1, meth)(axis=1, numeric_only=False)
msg = "could not convert string to float: 'a'"
with pytest.raises(TypeError, match=msg):
getattr(df2, meth)(axis=1, numeric_only=False)
def test_sem(self, datetime_frame):
result = datetime_frame.sem(ddof=4)
expected = datetime_frame.apply(
lambda x: x.std(ddof=4) / np.sqrt(len(x)))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
@td.skip_if_no_scipy
def test_kurt(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(6, 3), index=index)
kurt = df.kurt()
kurt2 = df.kurt(level=0).xs('bar')
tm.assert_series_equal(kurt, kurt2, check_names=False)
assert kurt.name is None
assert kurt2.name == 'bar'
@pytest.mark.parametrize("dropna, expected", [
(True, {'A': [12],
'B': [10.0],
'C': [1.0],
'D': ['a'],
'E': Categorical(['a'], categories=['a']),
'F': to_datetime(['2000-1-2']),
'G': to_timedelta(['1 days'])}),
(False, {'A': [12],
'B': [10.0],
'C': [np.nan],
'D': np.array([np.nan], dtype=object),
'E': Categorical([np.nan], categories=['a']),
'F': [pd.NaT],
'G': to_timedelta([pd.NaT])}),
(True, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical(['a', np.nan, np.nan, np.nan],
categories=['a']),
'L': to_datetime(['2000-1-2', 'NaT', 'NaT', 'NaT']),
'M': to_timedelta(['1 days', 'nan', 'nan', 'nan']),
'N': [0, 1, 2, 3]}),
(False, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical([np.nan, 'a', np.nan, np.nan],
categories=['a']),
'L': to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
'M': to_timedelta(['nan', '1 days', 'nan', 'nan']),
'N': [0, 1, 2, 3]})
])
def test_mode_dropna(self, dropna, expected):
df = DataFrame({"A": [12, 12, 19, 11],
"B": [10, 10, np.nan, 3],
"C": [1, np.nan, np.nan, np.nan],
"D": [np.nan, np.nan, 'a', np.nan],
"E": Categorical([np.nan, np.nan, 'a', np.nan]),
"F": to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
"G": to_timedelta(['1 days', 'nan', 'nan', 'nan']),
"H": [8, 8, 9, 9],
"I": [9, 9, 8, 8],
"J": [1, 1, np.nan, np.nan],
"K": Categorical(['a', np.nan, 'a', np.nan]),
"L": to_datetime(['2000-1-2', '2000-1-2',
'NaT', 'NaT']),
"M": to_timedelta(['1 days', 'nan',
'1 days', 'nan']),
"N": np.arange(4, dtype='int64')})
result = df[sorted(list(expected.keys()))].mode(dropna=dropna)
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
def test_mode_sortwarning(self):
# Check for the warning that is raised when the mode
# results cannot be sorted
df = DataFrame({"A": [np.nan, np.nan, 'a', 'a']})
expected = DataFrame({'A': ['a', np.nan]})
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
result = df.mode(dropna=False)
result = result.sort_values(by='A').reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_operators_timedelta64(self):
df = DataFrame(dict(A=date_range('2012-1-1', periods=3, freq='D'),
B=date_range('2012-1-2', periods=3, freq='D'),
C=Timestamp('20120101') -
timedelta(minutes=5, seconds=5)))
diffs = DataFrame(dict(A=df['A'] - df['C'],
B=df['A'] - df['B']))
# min
result = diffs.min()
assert result[0] == diffs.loc[0, 'A']
assert result[1] == diffs.loc[0, 'B']
result = diffs.min(axis=1)
assert (result == diffs.loc[0, 'B']).all()
# max
result = diffs.max()
assert result[0] == diffs.loc[2, 'A']
assert result[1] == diffs.loc[2, 'B']
result = diffs.max(axis=1)
assert (result == diffs['A']).all()
# abs
result = diffs.abs()
result2 = abs(diffs)
expected = DataFrame(dict(A=df['A'] - df['C'],
B=df['B'] - df['A']))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# mixed frame
mixed = diffs.copy()
mixed['C'] = 'foo'
mixed['D'] = 1
mixed['E'] = 1.
mixed['F'] = Timestamp('20130101')
# results in an object array
result = mixed.min()
expected = Series([pd.Timedelta(timedelta(seconds=5 * 60 + 5)),
pd.Timedelta(timedelta(days=-1)),
'foo', 1, 1.0,
Timestamp('20130101')],
index=mixed.columns)
tm.assert_series_equal(result, expected)
# excludes numeric
result = mixed.min(axis=1)
expected = Series([1, 1, 1.], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
# works when only those columns are selected
result = mixed[['A', 'B']].min(1)
expected = Series([timedelta(days=-1)] * 3)
tm.assert_series_equal(result, expected)
result = mixed[['A', 'B']].min()
expected = Series([timedelta(seconds=5 * 60 + 5),
timedelta(days=-1)], index=['A', 'B'])
tm.assert_series_equal(result, expected)
# GH 3106
df = DataFrame({'time': date_range('20130102', periods=5),
'time2': date_range('20130105', periods=5)})
df['off1'] = df['time2'] - df['time']
assert df['off1'].dtype == 'timedelta64[ns]'
df['off2'] = df['time'] - df['time2']
df._consolidate_inplace()
assert df['off1'].dtype == 'timedelta64[ns]'
assert df['off2'].dtype == 'timedelta64[ns]'
def test_sum_corner(self):
empty_frame = DataFrame()
axis0 = empty_frame.sum(0)
axis1 = empty_frame.sum(1)
assert isinstance(axis0, Series)
assert isinstance(axis1, Series)
assert len(axis0) == 0
assert len(axis1) == 0
@pytest.mark.parametrize('method, unit', [
('sum', 0),
('prod', 1),
])
def test_sum_prod_nanops(self, method, unit):
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [unit, unit],
"b": [unit, np.nan],
"c": [np.nan, np.nan]})
# The default
result = getattr(df, method)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
# min_count=1
result = getattr(df, method)(min_count=1)
expected = pd.Series([unit, unit, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = getattr(df, method)(min_count=0)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
tm.assert_series_equal(result, expected)
result = getattr(df.iloc[1:], method)(min_count=1)
expected = pd.Series([unit, np.nan, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count > 1
df = pd.DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5})
result = getattr(df, method)(min_count=5)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
result = getattr(df, method)(min_count=6)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
def test_sum_nanops_timedelta(self):
# prod isn't defined on timedeltas
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [0, 0],
"b": [0, np.nan],
"c": [np.nan, np.nan]})
df2 = df.apply(pd.to_timedelta)
# 0 by default
result = df2.sum()
expected = pd.Series([0, 0, 0], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = df2.sum(min_count=0)
tm.assert_series_equal(result, expected)
# min_count=1
result = df2.sum(min_count=1)
expected = pd.Series([0, 0, np.nan], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
def test_sum_object(self, float_frame):
values = float_frame.values.astype(int)
frame = DataFrame(values, index=float_frame.index,
columns=float_frame.columns)
deltas = frame * timedelta(1)
deltas.sum()
def test_sum_bool(self, float_frame):
# ensure this works, bug report
bools = np.isnan(float_frame)
bools.sum(1)
bools.sum(0)
def test_mean_corner(self, float_frame, float_string_frame):
# unit test when have object data
the_mean = float_string_frame.mean(axis=0)
the_sum = float_string_frame.sum(axis=0, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
assert len(the_mean.index) < len(float_string_frame.columns)
# xs sum mixed type, just want to know it works...
the_mean = float_string_frame.mean(axis=1)
the_sum = float_string_frame.sum(axis=1, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
# take mean of boolean column
float_frame['bool'] = float_frame['A'] > 0
means = float_frame.mean(0)
assert means['bool'] == float_frame['bool'].values.mean()
def test_stats_mixed_type(self, float_string_frame):
# don't blow up
float_string_frame.std(1)
float_string_frame.var(1)
float_string_frame.mean(1)
float_string_frame.skew(1)
def test_sum_bools(self):
df = DataFrame(index=lrange(1), columns=lrange(10))
bools = isna(df)
assert bools.sum(axis=1)[0] == 10
# ---------------------------------------------------------------------
# Cumulative Reductions - cumsum, cummax, ...
def test_cumsum_corner(self):
dm = DataFrame(np.arange(20).reshape(4, 5),
index=lrange(4), columns=lrange(5))
# ?(wesm)
result = dm.cumsum() # noqa
def test_cumsum(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cumsum = datetime_frame.cumsum()
expected = datetime_frame.apply(Series.cumsum)
tm.assert_frame_equal(cumsum, expected)
# axis = 1
cumsum = datetime_frame.cumsum(axis=1)
expected = datetime_frame.apply(Series.cumsum, axis=1)
tm.assert_frame_equal(cumsum, expected)
# works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cumsum() # noqa
# fix issue
cumsum_xs = datetime_frame.cumsum(axis=1)
assert np.shape(cumsum_xs) == np.shape(datetime_frame)
def test_cumprod(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cumprod = datetime_frame.cumprod()
expected = datetime_frame.apply(Series.cumprod)
tm.assert_frame_equal(cumprod, expected)
# axis = 1
cumprod = datetime_frame.cumprod(axis=1)
expected = datetime_frame.apply(Series.cumprod, axis=1)
tm.assert_frame_equal(cumprod, expected)
# fix issue
cumprod_xs = datetime_frame.cumprod(axis=1)
assert np.shape(cumprod_xs) == np.shape(datetime_frame)
# ints
df = datetime_frame.fillna(0).astype(int)
df.cumprod(0)
df.cumprod(1)
# ints32
df = datetime_frame.fillna(0).astype(np.int32)
df.cumprod(0)
df.cumprod(1)
def test_cummin(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cummin = datetime_frame.cummin()
expected = datetime_frame.apply(Series.cummin)
tm.assert_frame_equal(cummin, expected)
# axis = 1
cummin = datetime_frame.cummin(axis=1)
expected = datetime_frame.apply(Series.cummin, axis=1)
tm.assert_frame_equal(cummin, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummin() # noqa
# fix issue
cummin_xs = datetime_frame.cummin(axis=1)
assert np.shape(cummin_xs) == np.shape(datetime_frame)
def test_cummax(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cummax = datetime_frame.cummax()
expected = datetime_frame.apply(Series.cummax)
tm.assert_frame_equal(cummax, expected)
# axis = 1
cummax = datetime_frame.cummax(axis=1)
expected = datetime_frame.apply(Series.cummax, axis=1)
tm.assert_frame_equal(cummax, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummax() # noqa
# fix issue
cummax_xs = datetime_frame.cummax(axis=1)
assert np.shape(cummax_xs) == np.shape(datetime_frame)
# ---------------------------------------------------------------------
# Miscellanea
def test_count(self):
# corner case
frame = DataFrame()
ct1 = frame.count(1)
assert isinstance(ct1, Series)
ct2 = frame.count(0)
assert isinstance(ct2, Series)
# GH#423
df = DataFrame(index= | lrange(10) | pandas.compat.lrange |
from itertools import groupby
from sklearn.model_selection import train_test_split
from all_stand_var import conv_dict, vent_cols3
from all_own_funct import extub_group, memory_downscale, age_calc_bron
import all_own_funct as func
import os
from all_stand_var import all_cols
import pandas as pd
import numpy as np
import locale
import datetime as dt
from matplotlib.backends.backend_pdf import PdfPages
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from collections import Counter
import pickle
"""
File which transforms the raw input from sql server to useable dataframe
Data path= folder in which the raw data in the csv file is stored
label_data = file in which the label data is stored
output_folder = Folder in which the transformed dataframe should be stored
"""
Data_path = 'data\CHD_V4.csv'
label_data = 'Results_CHD\admissiondate_CHD0_dia.csv'
locale.setlocale(locale.LC_ALL, 'fr_FR')
output_folder = os.path.join(os.getcwd(), 'Results_bron_EDA_CHD')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
class data_preprocessing():
def __init__():
pass
@staticmethod
def value_filtering(df):
"""
Function filters out empty rows, replaces null with NaN and replaces inf with NaN
Drops all rows in which the respiratory rate and expiratory tidal volume is NaN
"""
# df.dropna(axis=1,how='all',inplace=True)
df.dropna(axis=0, how='all', inplace=True)
df.replace(to_replace='NULL', value=np.nan, inplace=True)
df.replace(to_replace='nan', value=np.nan, inplace=True)
df.replace(to_replace=[np.inf, -np.inf], value=np.nan, inplace=True)
df.sort_values('pat_hosp_id', inplace=True)
df.sort_values('pat_datetime', inplace=True)
df = df.dropna(how='all', subset=['vent_m_rr', 'vent_m_tv_exp'])
return df
@staticmethod
def Age_calc_cat(df):
"""
Calculate the age and weight for every admission
"""
df['pat_datetime_temp'] = pd.to_datetime(df['pat_datetime']).dt.date
df['pat_bd'] = pd.to_datetime(df['pat_bd']).dt.date
df['Age'] = (df['pat_datetime_temp'] -
df['pat_bd']).dt.days.divide(365)
df = df.drop('pat_datetime_temp', axis=1)
df = df.drop('pat_bd', axis=1)
df['Age'] = df['Age'].astype('float64')
df['Age'] = np.where((df['Age'] > 25), 1, df['Age'])
print(df['Age'].describe())
df = df.groupby(['pat_hosp_id', 'OK_datum'], sort=False,
as_index=False).apply(age_calc_bron)
df['pat_weight_act'] = df['pat_weight_act'].astype('float64')
return df
@ staticmethod
def scale_dataframe(df, scaler):
"""
Function which removes physiological impossible values and scales all numerical features with the use of z-score normalisation
"""
float_columns = list(df.select_dtypes(
include=['float64']).columns)
to_remove = ['pat_hosp_id', 'Reintub', 'Detub_fail']
float_columns = list(
(Counter(float_columns)-Counter(to_remove)).elements())
for column in float_columns:
df.loc[df[column] < 0, column] = 0
df[column] = df[column].astype('float32')
df['mon_rr'].mask(df['mon_rr'] > 100, 100, inplace=True)
df['vent_m_tv_exp'].mask(df['vent_m_tv_exp'] >
750, 750, inplace=True)
df['mon_hr'] = | pd.to_numeric(df['mon_hr'], errors='coerce') | pandas.to_numeric |
import os
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold
import argparse
def _save_split(annotation, patients, labels, out_path):
os.makedirs(os.path.dirname(out_path), exist_ok=True)
annotation = annotation[annotation['Patient ID'].isin(patients)]
labels = set(labels)
annotation = annotation[annotation.apply(lambda row: set(row['Finding Labels'].split('|')) == labels, axis=1)]
with open(out_path, 'w') as f_out:
f_out.writelines([filename + '\n' for filename in annotation['Image Index'].values])
def create_folds(path_to_data_entry, path_to_train_val_list, output_root, n_folds):
folds_dir = os.path.join(output_root, 'folds', 'nih')
validation_classes_root = os.path.join(output_root, 'validation_classes')
validation_classes_path = os.path.join(validation_classes_root, 'nih.csv')
os.makedirs(validation_classes_root, exist_ok=True)
os.makedirs(folds_dir, exist_ok=True)
VIEWS = ["AP", "PA"]
VALID_LABEL = 'Infiltration'
"========================== GENERATE CLASSES FOR VALIDATION ========================="
if not os.path.exists(validation_classes_path):
df = pd.DataFrame([[VALID_LABEL]], columns=['Valid Labels'])
df.to_csv(validation_classes_path, index=False)
"===================== CREATE K-FOLD CROSS-VALIDATION SPLIT ========================"
valid_labels_df = pd.read_csv(validation_classes_path)
valid_anomaly_labels = valid_labels_df['Valid Labels'].values
with open(path_to_train_val_list) as fin:
image_names = list(map(lambda x: x.strip(), fin.readlines()))
annotation = | pd.read_csv(path_to_data_entry) | pandas.read_csv |
import pandas as pd
import numpy as np
import seaborn as sns
import warnings
def createRowColorDataFrame( discreteStatesDataFrame, nanColor =(0,0,0), predeterminedColorMapping={} ):
""" Create color dataframe for use with seaborn clustermap
Args:
discreteStatesDataFrame (pd.DataFrame) : Dataframe containing the data to convert to colors, like: pd.DataFrame( [['A','x'],['A','y']],index=['A','B'], columns=['First', 'Second'] )
nanColor(tuple) : Color for records having an NAN
predeterminedColorMapping(dict) : Supply class colors here (optional)
Returns:
discreteColorMatrix (pd.DataFrame) : Dataframe to pass to seaborn clustermap row_colors, or col_colors
luts (dict) : class->color mapping
"""
# Should look like:
# discreteStatesDataFrame = pd.DataFrame( [['A','x'],['A','y']],index=['A','B'], columns=['First', 'Second'] )
colorMatrix = []
luts = {}
for column in discreteStatesDataFrame:
states = [x for x in discreteStatesDataFrame[column].unique() if not pd.isnull(x)]
undeterminedColorStates = [x for x in discreteStatesDataFrame[column].unique() if not pd.isnull(x) and not x in predeterminedColorMapping]
cols = sns.color_palette('hls',len(undeterminedColorStates))
#lut = { i:sns.color_palette('bright').jet(x) for i,x in zip(states, np.linspace(0,1,len(states)) )}
lut = { state:cols[i] for i,state in enumerate(undeterminedColorStates) }
lut.update({key:value for key,value in predeterminedColorMapping.items() if key in states})
lut[np.nan] = nanColor
colorMatrix.append( [ nanColor if pd.isnull(x) else lut[x] for x in discreteStatesDataFrame[column] ] )
luts[column] = lut
discreteColorMatrix = | pd.DataFrame(colorMatrix, index=discreteStatesDataFrame.columns, columns=discreteStatesDataFrame.index ) | pandas.DataFrame |
import argparse
import logging
import pandas as pd
import pathlib
from pyspark.sql import SparkSession
from typing import List
from src import constants
from src.utils.logging import get_logger
from src.processing import recovery_analysis
logger = get_logger(__name__)
logger.setLevel(logging.INFO)
def _get_paisagenslidar_path() -> pathlib.Path:
return
def _get_files(survey_name: str) -> List[pathlib.Path]:
if survey_name == 'eba':
filepath = constants.EBALIDAR_PATH / 'laz_EBA_processed'
return [survey for survey in filepath.iterdir()
if (survey / "grid_metrics").exists() and
any((survey / "grid_metrics").iterdir())
]
elif survey_name == 'paisagenslidar':
filepath = constants.PAISAGENSLIDAR_PATH / 'processed'
surveys = []
for subdir in filepath.iterdir():
for survey in subdir.iterdir():
surveys.append(survey)
return [survey for survey in surveys
if (survey / "grid_metrics").exists() and
any((survey / "grid_metrics").iterdir())
]
else:
raise ValueError(f'Survey {survey_name} not supported')
def compute_recovery(file: pathlib.Path):
try:
return recovery_analysis.compute_recovery_dataset(file.name)
except (KeyError, ValueError, RuntimeError) as e:
logger.warning(
"Encountered error in survey {}, skipping: {}".format(
file.name, e)
)
return | pd.DataFrame({}) | pandas.DataFrame |
"""
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
import csv
from io import StringIO
from pandas import DataFrame
import pandas._testing as tm
from pandas.io.parsers import TextParser
def test_read_data_list(all_parsers):
parser = all_parsers
kwargs = {"index_col": 0}
data = "A,B,C\nfoo,1,2,3\nbar,4,5,6"
data_list = [["A", "B", "C"], ["foo", "1", "2", "3"], ["bar", "4", "5", "6"]]
expected = parser.read_csv(StringIO(data), **kwargs)
with TextParser(data_list, chunksize=2, **kwargs) as parser:
result = parser.read()
tm.assert_frame_equal(result, expected)
def test_reader_list(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = {"index_col": 0}
lines = list(csv.reader(StringIO(data)))
with TextParser(lines, chunksize=2, **kwargs) as reader:
chunks = list(reader)
expected = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(chunks[0], expected[:2])
tm.assert_frame_equal(chunks[1], expected[2:4])
tm.assert_frame_equal(chunks[2], expected[4:])
def test_reader_list_skiprows(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = {"index_col": 0}
lines = list(csv.reader(StringIO(data)))
with TextParser(lines, chunksize=2, skiprows=[1], **kwargs) as reader:
chunks = list(reader)
expected = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(chunks[0], expected[1:3])
def test_read_csv_parse_simple_list(all_parsers):
parser = all_parsers
data = """foo
bar baz
qux foo
foo
bar"""
result = parser.read_csv(StringIO(data), header=None)
expected = DataFrame(["foo", "bar baz", "qux foo", "foo", "bar"])
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
from configparser import ConfigParser
from election_anomaly import db_routines
from election_anomaly import db_routines as dbr
from election_anomaly.db_routines import create_cdf_db as db_cdf
from election_anomaly import munge_routines as mr
import pandas as pd
import numpy as np
import csv
from sqlalchemy.orm import sessionmaker
import os
from pathlib import Path
import ntpath
import re
import datetime
from election_anomaly import juris_and_munger as sf
import random
from tkinter import filedialog
from configparser import MissingSectionHeaderError
recognized_encodings = {'iso2022jp', 'arabic', 'cp861', 'csptcp154', 'shiftjisx0213', '950', 'IBM775',
'IBM861', 'shift_jis', 'euc_jp', 'ibm1026', 'ascii', 'IBM437', 'EBCDIC-CP-BE',
'csshiftjis', 'cp1253', 'jisx0213', 'latin', 'cp874', '861', 'windows-1255', 'cp1361',
'macroman', 'ms950', 'iso-2022-jp-3', 'iso8859_14', 'cp949', 'utf_16', '932', 'cp737',
'iso2022_jp_2004', 'ks_c-5601', 'iso-2022-kr', 'ms936', 'cp819', 'iso-8859-3', 'windows-1258',
'csiso2022kr', 'iso-8859-2', 'iso2022_jp_ext', 'hz', 'iso-8859-13', 'IBM855', 'cp1140', '866',
'862', 'iso2022jp-2004', 'cp1250', 'windows-1254', 'cp1258', 'gb2312-1980', '936', 'L6',
'iso-8859-6', 'ms932', 'macgreek', 'cp154', 'big5-tw', 'maccentraleurope', 'iso-8859-7',
'ks_x-1001', 'csbig5', 'cp1257', 'latin1', 'mac_roman', 'euckr', 'latin3', 'eucjis2004',
'437', 'cp500', 'mac_latin2', 'CP-GR', 'IBM863', 'hz-gb-2312', 'iso2022jp-3', 'iso-8859-15',
'koi8_r', 'sjisx0213', 'windows-1252', '850', 'cp855', 'windows1256', 'eucjisx0213', 'hkscs',
'gb18030', 'iso-2022-jp-2004', 'L1', 'cyrillic-asian', 'iso2022jp-ext', 'cp1006', 'utf16',
'iso2022_kr', 'iso2022jp-2', 'shiftjis', 'IBM037', 'gb2312-80', 'IBM500', '865', 'UTF-16BE',
'IBM864', 'EBCDIC-CP-CH', 'iso-8859-4', 'cp856', 'iso2022_jp_1', 'eucjp', 'iso-2022-jp-1',
'iso8859_3', 'gb18030-2000', 'cp860', 'mskanji', 'iso2022jp-1', 'iso-8859-8',
'iso-2022-jp-ext', 'csiso58gb231280', 'shift_jis_2004', 'L2', 'ms1361', 'cp852', 'ms949',
'IBM865', 'cp437', 'iso8859_4', 'iso8859_2', 'cp1255', 'euc_jisx0213', 'cp1252', 'macturkish',
'iso8859_9', 'ptcp154', '949', 'cp864', 's_jisx0213', 'big5-hkscs', 'korean', 'iso2022_jp_2',
'cp932', 'euc-cn', 'latin5', 'utf_8', 'ibm1140', 'cp862', 'euc_kr', 'iso8859_8', 'iso-8859-9',
'utf8', 'cp1251', '863', 'cp850', 'cp857', 'greek', 'latin8', 'iso2022_jp_3', 'iso-8859-10',
'big5hkscs', 'ms-kanji', 'iso2022kr', '646', 'iso8859_7', 'koi8_u', 'mac_greek',
'windows-1251', 'cp775', 'IBM860', 'u-jis', 'iso-8859-5', 'us-ascii', 'maccyrillic',
'IBM866', 'L3', 'sjis2004', 'cp1256', 'sjis_2004', '852', 'windows-1250', 'latin4',
'cp037', 'shift_jisx0213', 'greek8', 'latin6', 'latin2', 'mac_turkish', 'IBM862', 'iso8859-1',
'cp1026', 'IBM852', 'pt154', 'iso-2022-jp-2', 'ujis', '855', 'iso-8859-14', 'iso-2022-jp',
'utf_16_be', 'chinese', 'maclatin2', 'U7', 'hzgb', 'iso8859_5', '857', 'IBM850', '8859',
'gb2312', 'cp866', 'CP-IS', 'latin_1', 'L4', 'euccn', 'cyrillic', 'IBM424', 'cp863',
'UTF-16LE', 'mac_cyrillic', 'iso8859_10', 'L8', 'IBM869', 'ksc5601', '860', 'iso2022_jp',
'hz-gb', 'UTF', 'utf8ascii', 'utf_7', 'cp936', 'euc_jis_2004', 'iso-ir-58', 'csiso2022jp',
'IBM039', 'eucgb2312-cn', 'cp950', 'iso8859_13', 'shiftjis2004', 'sjis', 'U8', 'cp1254',
's_jis', 'gbk', 'hebrew', 'U16', 'big5', 'cp865', 'cp424', 'uhc', 'windows-1257', '869',
'iso-8859-1', 'windows-1253', 'ksx1001', 'johab', 'IBM857', 'L5', 'iso8859_6', 'cp869',
'cp875', 'mac_iceland', 'iso8859_15', 'maciceland', 'utf_16_le', 'EBCDIC-CP-HE',
'ks_c-5601-1987'}
def get_project_root():
p_root = os.getcwd().split('election_anomaly')[0]
confirmed = False
subdir_list = ['election_anomaly','jurisdictions','mungers']
while not confirmed:
missing = [x for x in subdir_list if x not in os.listdir(p_root)]
print(f'\nSuggested project root directory is:\n\t{p_root}')
if missing:
print(f'The suggested directory does not contain required subdirectories {",".join(missing)}')
new_pr = input(f'Designate a different project root (y/n)?\n')
if new_pr == 'y':
p_root = input(f'Enter absolute path of project root.\n')
else:
input('Add required subdirectories and hit return to continue.\n')
elif input('Is this the correct project root (y/n)?\n') == 'y':
confirmed = True
return p_root
def pick_file_or_directory(description=None,mode=None):
if not mode:
print(f'No mode specified')
return None
elif mode not in ['file','directory']:
print(f'Mode {mode} not recognized')
return None
else:
if not description:
description = f'the {mode}'
print(f'Use the pop-up window to pick {description}.')
directory = pick_path(mode=mode)
return directory
def track_results_file(project_root,sess,results_file):
filename = ntpath.basename(results_file)
db_idx, datafile_record_d, datafile_enumeration_name_d, datafile_fk_name_d = pick_or_create_record(
sess,project_root,'_datafile',known_info_d={'file_name':filename})
# TODO typing url into debug window opens the web page; want it to just act like a string
return [datafile_record_d, datafile_enumeration_name_d]
def pick_path(initialdir='~/',mode='file'):
"""Creates pop-up window for user to choose a <mode>, starting from <initialdir>.
Returns chosen file path or directory path (depending on <mode>"""
while True:
fpath = input(
f'Enter path to {mode} (or hit return to use pop-up window to find it).\n').strip()
if not fpath:
print(f'Use pop-up window to pick your {mode}.')
if mode == 'file':
fpath = filedialog.askopenfilename(
initialdir=initialdir,title=f"Select {mode}",
filetypes=(("text files","*.txt"),("csv files","*.csv"),("ini files","*.ini"),("all files","*.*")))
elif mode == 'directory':
fpath = filedialog.askdirectory(initialdir=initialdir,title=f'Select {mode}')
else:
print(f'Mode {mode} not recognized')
return None
print(f'The {mode} you chose is:\n\t{fpath}')
break
elif (mode == 'file' and not os.path.isfile(fpath)) or (mode == 'directory' and not os.path.isdir(fpath)):
print(f'This is not a {mode}: {fpath}\nTry again.')
else:
break
return fpath
def pick_one(choices,return_col,item='row',required=False,max_rows=40):
"""Returns index and <return_col> value of item chosen by user
<choices> is a dataframe, unless <return_col> is None, in which case <choices>
may be a list or a set"""
if return_col is None:
df = pd.DataFrame(np.array(list(choices)).transpose(),columns=[item])
return_col = item
choices = df # regularizes 'choices.index[choice]' in return
else:
df = choices.copy()
df.index = range(choices.shape[0])
if df.empty:
return None, None
with pd.option_context('display.max_rows',max_rows,'display.max_columns',None):
print(df)
choice = -1 # guaranteed not to be in df.index
while choice not in df.index:
if not required:
req_str=' (or nothing, if your choice is not on the list)'
else:
req_str=''
choice_str = input(f'Enter the number of the desired {item}{req_str}:\n')
if choice_str == '' and not required:
return None,None
else:
try:
choice = int(choice_str)
if choice not in df.index:
print(f'Enter an option from the leftmost column. Please try again.')
except ValueError:
print(f'You must enter a number{req_str}, then hit return. Please try again.')
print(f'Chosen {item} is {df.loc[choice,return_col]}\n\n')
return choices.index[choice], df.loc[choice,return_col]
def pick_paramfile(msg='Locate the parameter file for your postgreSQL database.'):
print(msg)
fpath= pick_path()
return fpath
def show_sample(input_iter,items,condition,outfile='shown_items.txt',export_dir=None,export=False):
print(f'There are {len(input_iter)} {items} that {condition}:')
if len(input_iter) == 0:
return
if isinstance(input_iter,pd.DataFrame):
st = input_iter.to_csv(sep='\t').split('\n')
else:
st = list(input_iter)
st.sort()
if len(st) < 11:
show_list = st
else:
print('(sample)')
show_list = random.sample(st,10)
show_list.sort()
for r in show_list:
print(r)
if len(st) > 10:
show_all = input(f'Show all {len(st)} {items} that {condition} (y/n)?\n')
if show_all == 'y':
for r in st:
print(f'{r}')
if export:
if export_dir is None:
export_dir = input(f'Export all {len(st)} {items} that {condition}? If so, enter directory for export.'
f'Existing file will be overwritten.\n'
f'(Current directory is {os.getcwd()})\n')
if os.path.isdir(export_dir):
export = input(f'Export all {len(st)} {items} that {condition} to {outfile} (y/n)?\n')
if export == 'y':
with open(os.path.join(export_dir,outfile),'w') as f:
f.write('\n'.join(st))
print(f'{items} exported to {os.path.join(export_dir,outfile)}')
elif export_dir != '':
print(f'Directory {export_dir} does not exist.')
return
def pick_juris_from_filesystem(project_root,juriss_dir='jurisdictions',juris_name=None,check_files=False):
"""Returns a State object.
If <jurisdiction_name> is given, this just initializes based on info
in the folder with that name; """
missing_values = {}
path_to_jurisdictions = os.path.join(project_root,juriss_dir)
if check_files:
juris_path = os.path.join(path_to_jurisdictions,juris_name)
missing_values = sf.ensure_jurisdiction_files(juris_path,project_root)
# initialize the jurisdiction
if missing_values:
ss = None
else:
ss = sf.Jurisdiction(juris_name,path_to_jurisdictions)
return ss, missing_values
def find_dupes(df):
dupes_df = df[df.duplicated()].drop_duplicates(keep='first')
deduped = df.drop_duplicates(keep='first')
return dupes_df, deduped
def pick_munger(mungers_dir='mungers',project_root=None,session=None,munger_name=None):
error = sf.ensure_munger_files(munger_name,project_root=project_root)
munger_path = os.path.join(mungers_dir,munger_name)
if not error:
munger = sf.Munger(munger_path,project_root=project_root,check_files=False)
#munger_error is None unless internal inconsistency found
munger_error = munger.check_against_self()
return munger, munger_error
else:
return None, error
def pick_or_create_record(sess,project_root,element,known_info_d=None):
"""User picks record from database if exists.
Otherwise user picks from file system if exists.
Otherwise user enters all relevant info.
Store record in file system and/or db if new
Return index of record in database"""
if not known_info_d:
known_info_d = {}
storage_dir = os.path.join(project_root,'db_records_entered_by_hand')
# pick from database if possible
db_idx, db_style_record = pick_record_from_db(sess,element,known_info_d=known_info_d)
# if not from db
if db_idx is None:
# pick from file_system
fs_idx, file_style_record = pick_record_from_file_system(storage_dir,element,known_info_d=known_info_d)
# if not from file_system
if fs_idx is None:
# have user enter record
db_style_record, enum_plaintext_dict, fk_plaintext_dict = get_record_info_from_user(
sess,element,known_info_d=known_info_d)
# save to db
[db_idx, db_style_record, enum_plaintext_dict, fk_plaintext_dict,changed] = dbr.save_one_to_db(
sess,element,db_style_record)
# save to file system
save_record_to_filesystem(storage_dir,element,db_style_record,enum_plaintext_dict)
# if found in file system
else:
try:
db_style_record = mr.db_record_from_file_record(sess,element,file_style_record)
db_idx,db_style_record,enum_plaintext_dict,fk_plaintext_dict, changed = dbr.save_one_to_db(
sess,element,db_style_record)
except KeyError as e:
print(e)
input(
f'Perhaps the file {element}.txt in {storage_dir} does not have all fields '
f'required by the corresponding database table.\n'
f'Revise {element}.txt and hit return to continue.')
db_idx,db_style_record,enum_plaintext_dict,fk_plaintext_dict = pick_or_create_record(
sess,project_root,element,known_info_d=known_info_d)
# if picked from db
else:
enum_plaintext_dict = mr.enum_plaintext_dict_from_db_record(sess,element,db_style_record)
fk_plaintext_dict = mr.fk_plaintext_dict_from_db_record(
sess,element,db_style_record,excluded=enum_plaintext_dict.keys())
return db_idx, db_style_record, enum_plaintext_dict, fk_plaintext_dict
def pick_record_from_db(sess,element,known_info_d=None,required=False,db_idx=None):
"""Get id and info from database, if it exists.
If <db_idx> is passed, return that index and a dictionary with the rest of the record"""
if not known_info_d:
known_info_d = {}
element_df = pd.read_sql_table(element,sess.bind,index_col='Id')
if element_df.empty:
return None,None
elif db_idx:
return db_idx, element_df.loc[db_idx].to_dict()
# add columns for plaintext of any enumerations
# FIXME also add columns for foreign key plaintext
enums = dbr.read_enums_from_db_table(sess,element)
element_enhanced_df = element_df.copy()
for e in enums:
e_df = pd.read_sql_table(e,sess.bind,index_col='Id')
element_enhanced_df = mr.enum_col_from_id_othertext(element_enhanced_df,e,e_df,drop_old=False)
# filter by known_info_d
d = {k:v for k,v in known_info_d.items() if k in element_enhanced_df.columns}
filtered = element_enhanced_df.loc[(element_enhanced_df[list(d)] == pd.Series(d)).all(axis=1)]
# TODO if filtered is empty, offer all
if filtered.empty:
print('Nothing meets the filter criteria. Unfiltered options will be offered.')
filtered = element_enhanced_df
print(f'Pick the {element} record from the database:')
name_field = db_routines.get_name_field(element)
element_idx, values = pick_one(filtered,name_field,element)
if element_idx in element_df.index:
d = dict(element_df.loc[element_idx])
else:
d = None
if required and element_idx is None:
# offer to filter by available enumerations
enum_list = [x for x in dbr.get_enumerations(sess,element) if x not in known_info_d]
if len(enum_list) == 0:
print('No more filters available. You must choose from this list')
element_idx, d = pick_record_from_db(sess,element,known_info_d=known_info_d)
else:
while element_idx is None and len(enum_list) > 0:
e = enum_list[0]
e_filter = input(f'Filter by {e} (y/n)?\n')
if e_filter == 'y':
known_info_d[f'{e}_Id'],known_info_d[f'Other{e}'],known_info_d[e] = pick_enum(sess,e)
element_idx, d = pick_record_from_db(sess,element,known_info_d=known_info_d,required=True)
enum_list.remove(e)
return element_idx, d
def pick_enum(sess,e):
e_df = pd.read_sql_table(e,sess.bind,index_col='Id')
e_idx,e_plaintext = pick_one(e_df,'Txt',item=e,required=True)
if e_plaintext == 'other':
# get plaintext from user
while e_plaintext in ['','other']:
e_plaintext = get_alphanumeric_from_user(
f'Enter the value for {e}, which cannot be empty and cannot be \'other\'',allow_hyphen=True)
e_othertext = e_plaintext
else:
e_othertext = ''
return e_idx,e_othertext,e_plaintext
def pick_record_from_file_system(storage_dir,table,known_info_d=None):
""" Looks for record in file system.
Returns a file-style <record> (with enums as plaintext).
If no record found, <idx> is none;
otherwise value of <idx> is irrelevant."""
# initialize to keep syntax-checker happy
filtered_file = None
if not known_info_d:
known_info_d = {}
name_field = dbr.get_name_field(table)
# identify/create the directory for storing individual records in file system
if not os.path.isdir(storage_dir):
os.makedirs(storage_dir)
# read any info from <table>'s file within that directory
storage_file = os.path.join(storage_dir,f'{table}.txt')
if os.path.isfile(storage_file):
from_file = pd.read_csv(storage_file,sep='\t')
if not from_file.empty:
# filter via known_info_d
filtered_file = from_file.loc[(from_file[list(known_info_d)] == pd.Series(known_info_d)).all(axis=1)]
else:
filtered_file = from_file
print(f'Pick a record from {table} list in file system:')
idx, file_style_record = pick_one(filtered_file,name_field)
else:
idx, file_style_record = None, None
if idx is not None:
file_style_record = dict(filtered_file.loc[idx])
else:
file_style_record = None
return idx, file_style_record
def save_record_to_filesystem(storage_dir,table,user_record,enum_plain_text_values):
# identify/create the directory for storing individual records in file system
for e in enum_plain_text_values.keys():
user_record[e] = enum_plain_text_values[e] # add plain text
user_record.remove(f'{e}_Id') # remove Id
user_record.remove(f'Other{e}') # remove other text
if not os.path.isdir(storage_dir):
os.makedirs(storage_dir)
storage_file = os.path.join(storage_dir,f'{table}.txt')
if os.path.isfile(storage_file):
records = pd.read_csv(storage_file,sep='\t')
else:
# create empty, with all cols of from_db except Id
records = pd.DataFrame([],columns=user_record.keys())
records.append(user_record,ignore_index=True)
records.to_csv(storage_file,sep='\t',index=False)
return
def get_datatype(df,c):
"""Kludge to get datatype"""
if df.dtypes[c] in [np.dtype('M8[ns]'),np.dtype('datetime64')]:
datatype_string = 'Date'
elif df.dtypes[c] in [np.dtype('int64')]:
datatype_string = 'Int'
elif df.dtypes[c] in [np.object]:
datatype_string = 'String'
else:
print(f'Datatype {df.dtypes[c]} not recognized. To fix this error, alter code in the `get_datatype` function.')
datatype_string = None
return datatype_string
def get_record_info_from_user(sess,element,known_info_d={},mode='database'):
"""Collect new record info from user, with chance to confirm.
For each enumeration, translate the user's plaintext input into id/othertext.
Return the corresponding record (id/othertext only) and an enumeration-value
dictionary. Depending on <mode> ('database', 'filesystem' or 'database_and_filesystem'),
returns enum plaintext, or enum id/othertext pairs, or both.
"""
# read existing info from db
all_from_db = pd.read_sql_table(element,sess.bind,index_col='Id')
# initialize <show_user_cols>
db_cols = list(all_from_db.columns) # note: does not include 'Id'
show_user_cols=db_cols.copy()
# initialize value dictionaries to be returned
enum_val = fk_val = new = {}
enum_list = dbr.get_enumerations(sess,element)
fk_df = dbr.get_foreign_key_df(sess,element)
# get enumeration tables from db
e_df = {}
for e in enum_list:
e_df[e] = pd.read_sql_table(e,sess.bind,index_col='Id')
# add cols to all_from_db for showing user and update show_user_cols
for e in enum_list:
all_from_db = mr.enum_col_from_id_othertext(all_from_db,e,e_df[e],drop_old=False)
show_user_cols.append(e)
show_user_cols.remove(f'{e}_Id')
show_user_cols.remove(f'Other{e}')
for i,r in fk_df.iterrows():
# exclude foreign ids pointing to enumerations
if i[:-3] not in enum_list:
all_from_db = dbr.add_foreign_key_name_col(
sess,all_from_db,r['foreign_column_name'],r['foreign_table_name'],drop_old=False)
show_user_cols.append(i[:-3])
show_user_cols.remove(i)
# collect and confirm info from user
unconfirmed = True
while unconfirmed:
# solicit info from user and store values for db insertion
new = {}
print(f'Enter info for new {element} record.')
for c in db_cols:
# define new[c] if value is known
if c in known_info_d.keys():
new[c] = known_info_d[c]
# if c is an enumeration Id
if c[-3:] == '_Id' and c[:-3] in enum_list:
c_plain = c[:-3]
# if plaintext of enumeration is known
if c_plain in new.keys():
new[c], new[f'Other{c_plain}'] = mr.enum_value_to_id_othertext(e_df[c],new[c_plain])
# if id/othertext of enumeration is known
elif f'{c}_Id' in new.keys() and f'Other{c}' in new.keys():
new[c] = mr.enum_value_from_id_othertext(e_df[c],new[f'{c}_Id'],new[f'Other{c}'])
# otherwise
else:
new[c], new[f'Other{c_plain}'], new[c_plain] = pick_enum(sess,c_plain)
# if c is an Other<enumeration>, new value was defined in loop through enum_list
elif c[:5] == 'Other' and c[5:] in enum_list:
pass
# if c is a foreign key (and not an enumeration)
elif c in fk_df.index:
# if foreign key id is known
c_plain = c[:-3]
if c in new.keys():
new[c_plain] = dbr.name_from_id(sess,fk_df.loc[c,'foreign_table_name'],new[c])
# if foreign key plaintext is known
elif c_plain in new.keys():
new[c] = dbr.name_to_id(sess,fk_df.loc[c,'foreign_table_name'],new[c_plain])
# otherwise
else:
print(f'Specify the {fk_df.loc[c,"foreign_table_name"]} for this {element}')
idx, db_record = pick_record_from_db(sess,fk_df.loc[c,'foreign_table_name'],required=True)
new[c_plain] = db_record[dbr.get_name_field(fk_df.loc[c,'foreign_table_name'])]
# TODO pull from DB info about whether the foreign key is required
new[c] = dbr.name_to_id(sess,fk_df.loc[c,'foreign_table_name'],new[c_plain])
else:
new[c] = enter_and_check_datatype(f'Enter the {c}',get_datatype(all_from_db,c))
# present to user for confirmation
entry = '\n\t'.join([f'{k}:\t{new[k]}' for k in show_user_cols])
confirm = input(f'Confirm entry:\n\t{entry}\nIs this correct (y/n)?\n')
if confirm == 'y':
unconfirmed = False
# get db_record, enum_val, fk_val
db_record = {k:new[k] for k in db_cols}
enum_val = {e:new[e] for e in enum_list}
fk_val = {k[:-3]:new[k[:-3]] for k in fk_df.index}
show_user = {k:new[k] for k in show_user_cols}
if mode == 'database':
return db_record, enum_val, fk_val
elif mode == 'filesystem':
return show_user, enum_val, fk_val
elif mode == 'database_and_filesystem':
return {**db_record,**show_user},enum_val, fk_val
else:
print(f'Mode {mode} not recognized.')
return None, None, None
def check_datatype(answer,datatype):
"""Datatype is typically 'Integer', 'String', 'Date' or 'Encoding'"""
good = False
if datatype == 'Date':
default = datetime.datetime.today().date()
try:
answer = datetime.datetime.strptime(answer,'%Y-%m-%d').date()
good = True
except ValueError:
use_default = input(f'Answer not recognized as {datatype}. Use default value of {default} (y/n)?\n')
if use_default == 'y':
answer = default
good = True
else:
print('You need to enter a date in the form \'2018-11-06\'.')
# express date as string, e.g., 2020-05-23
answer = f'{answer}'
elif datatype == 'Integer':
try:
int(answer)
good = True
except ValueError:
print('You need to enter an integer.')
else:
# Nothing to check for String datatype
good = True
return good, answer
def enter_and_check_datatype(question,datatype):
answer = input(f'{question}')
good = False
while not good:
good,answer = check_datatype(answer,datatype)
if not good:
answer = input('Try again:\n')
return answer
def read_datafile(munger,f_path):
try:
if munger.file_type in ['txt','csv']:
kwargs = {'encoding':munger.encoding,'quoting':csv.QUOTE_MINIMAL,'header':list(range(munger.header_row_count)),
'thousands':munger.thousands_separator}
if munger.file_type == 'txt':
kwargs['sep'] = '\t'
df = pd.read_csv(f_path,**kwargs)
elif munger.file_type in ['xls','xlsx']:
df = | pd.read_excel(f_path,dtype=str,thousands=munger.thousands_separator) | pandas.read_excel |
from __future__ import absolute_import, division, print_function
import datetime
import pandas as pd
from config import *
def _analysis_create_members():
"""
Creates a table with members data
:return:
"""
logger.info("Creating members table")
members_metadata = pd.read_csv(members_metadata_path)
members_metadata = members_metadata.query('participates == 1').copy()
members_metadata['start_date_ts'] = | pd.to_datetime(members_metadata['start_date']) | pandas.to_datetime |
#IMPORTS.......................................................................
import pandas as pd
from numpy import log2 as log
from sklearn.metrics import confusion_matrix
import seaborn as sns
import os # accessing directory structure
import numpy as np
import matplotlib.pyplot as plt
import random
eps = np.finfo(float).eps #Small value such that log won't fail
Directory = ""
os.chdir(Directory)
#%% DATASETS...................................................................
def get_wine_dataset():
df = pd.read_csv('wine.data', delimiter=',')
df.columns=['Class', 'Alcohol', 'Malic acid', 'Ash', 'Alcalinity of ash', 'Magnesium', 'Total phenols', 'Flavanoids','Nonflavanoid phenols', 'Proanthocyanins', 'Color intensity','Hue' , 'OD280/OD315 of diluted wines','Proline' ]
#df.to_excel("outputttt.xlsx")
return df
#%% LOAD DATASET...............................................................
df = get_wine_dataset()
class_attribute = 'Class'
class_column = df[class_attribute]
df.drop(labels=[class_attribute], axis=1,inplace = True) #Removes the column with class labels
df.insert(len(df.columns), class_attribute, class_column) # inserts class label column at the end of the dataframe
#df.to_excel("test_cont.xlsx")
print(df)
#%%FUNCTIONS...................................................................
def accuracy(y_true, y_predicted):
'''Reports the accuracy of two lists of true and predicted values'''
correct = 0
count = 0
for true, pred in zip(y_true, y_predicted):
if int(true) == int(pred):
correct += 1
else:
print(count)
count += 1
accuracy = correct/len(y_predicted)*100
print('Accuracy of classifer {:0.2f} %' .format(accuracy))
return accuracy
def print_conf_mat(y_true, y_predicted):
'''Prints the confusion matrix from the true and predicted class labels'''
mat1 = confusion_matrix(y_true, y_predicted) #labels=["positive", "negative"])
#true_mat = confusion_matrix(y_true,y_true,labels=["positive", "negative"])
#plt.figure(0)
ax= plt.subplot()
sns.heatmap(mat1, square=True, annot=True, cbar=False,fmt="d", ax = ax)
ax.set_title('Predicted Matrix')
ax.set_xlabel('predicted value')
ax.set_ylabel('true value')
plt.show()
return
def entropy(dataframe, target_attribute):
'''Calculates the Entropy of a dataset for the target attribute'''
entropy = 0 #Initialize Entropy
values = dataframe[target_attribute].unique() #Play has two options 'Yes', 'No'
play_data = list(dataframe[target_attribute].values)
for value in values:
proportion = play_data.count(value)/len(play_data) #Proportion of given value in the dataset
entropy += -proportion*log(proportion) # Entropy measures the uncertainty in a specific distribution
return entropy
def information_gain(Entropy_parent, Entropy_child):
'''Calculates the information gain from the parent and child entropy'''
return (Entropy_parent- Entropy_child)
def binary_split_dataframes(threshold, dataframe, node_seed):
'''Produces a binary split of a dataframe given a thershold and returns the two datassets
'''
l = list(dataframe.columns) # A list of the names of the dataframe columns
left_array = np.array(l) # makes an array with the header of dataframe
right_array = np.array(l)# makes an array with the header of dataframe
for index, row in dataframe.iterrows():
if row[node_seed] < threshold:
row_array = np.array(row)
right_array = np.vstack((right_array, row_array))
else:
#if row[node_seed] > threshold:
row_array = np.array(row)
left_array = np.vstack((left_array, row_array))
#Edge case, if the value is the min or max of the dataframe[node_seed] then
#one of the split dataframe will have all the data and the other none. This checks the length of the split dataframes and returns an empty dataframe
if len(left_array.shape) == 1 or len(right_array.shape) == 1: #This truth statement says if the shape of the array only has one entry, then it has the column titles and no entries and thus is empty.
left_df = | pd.DataFrame() | pandas.DataFrame |
Subsets and Splits