prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pandas as pd
import flywheel
import warnings
import argparse
import ast
import os
from flywheel_bids_tools.query_bids import process_acquisition
#from flywheel_bids_tools.bids_generator import BidsGenerator
from flywheel_bids_tools.utils import read_flywheel_csv
from tqdm import tqdm
FAILS = []
def build_intention_path(row):
path = "ses-{0}/{1}/{2}".format(
row['session.label'], row['info_BIDS_Folder'], row['info_BIDS_Filename'])
return path
def update_intentions(df, client):
global FAILS
df = df.dropna(subset=["info_BIDS_IntendedFor"]).reset_index()
counter = []
for index, row in tqdm(df.iterrows(), total=df.shape[0]):
try:
acq = client.get(row['acquisition.id'])
session = client.get(acq['parents']['session'])
acqs_df = []
for acquisition in session.acquisitions():
temp = process_acquisition(acquisition.id, client, target_cols=['info_SeriesDescription','info_ShimSetting', 'info_BIDS_Folder', 'info_BIDS_Filename', 'type'])
temp['session.label'] = row['session.label']
temp['subject.label'] = row['subject.label']
acqs_df.append(temp)
acqs_df = pd.concat(acqs_df, ignore_index=True, sort=False)
acqs_df = acqs_df.loc[acqs_df.type.str.contains("nifti"),]
current_shim = tuple(acqs_df.loc[(acqs_df['info_SeriesDescription'] == row['info_SeriesDescription']) & (acqs_df['acquisition.id'] == row['acquisition.id'])].info_ShimSetting.values[0])
assert len(current_shim) > 0, "No shim settings for this file"
acqs_df = acqs_df.loc[~(acqs_df['acquisition.id'] == row['acquisition.id'])]
#acqs_df = acqs_df.dropna(subset=['info_ShimSetting'])
#acqs_df['info_ShimSetting'] = acqs_df['info_ShimSetting'].map(tuple)
#final_files = acqs_df.loc[(acqs_df['info_ShimSetting'] == current_shim)]
#final_files = final_files.dropna()
intent = [x['Folder'] for x in ast.literal_eval(row['info_BIDS_IntendedFor'])]
final_files = acqs_df.loc[acqs_df['info_BIDS_Folder'].isin(intent), ]
assert len(final_files) > 0, "No matching files for this shim setting"
result = final_files.apply(build_intention_path, axis=1)
#print("{}: This file has {} matching files".format(row['info_BIDS_Filename'], len(result)))
acq.update_file_info(row['name'], {'IntendedFor': list(result.values)})
counter.append(pd.DataFrame({'files': result, 'origin': row['info_BIDS_Filename']}))
except Exception as e:
print("Unable to update intentions for this file:")
print(row['name'], row['session.label'], row['info_BIDS_Filename'])
print(e)
FAILS.append(row)
cwd = os.getcwd()
counter = pd.concat(counter, ignore_index=True, sort=False)
counter.to_csv("{}/successful_intention_updates.csv".format(cwd), index=False)
if len(FAILS) > 0:
fails_dict = [x.to_dict() for x in FAILS]
fails_df =
|
pd.DataFrame(fails_dict)
|
pandas.DataFrame
|
import requests
import pandas as pd
import json
email = ''
key = ''
url_todos = 'https://www.mountainproject.com/data/get-ticks?email='+email+'&key='+key
url_route_info = 'https://www.mountainproject.com/data/get-routes?key='+key+'&routeIds='
def get_dataframe(passed_url, dataframe_element):
get_active_url = requests.request("GET", passed_url)
json_details = json.loads(get_active_url.text)[dataframe_element]
print(json_details)
df =
|
pd.DataFrame(json_details)
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import itertools
from .statistics import r
def __get_X_Y_L__(X, Y=None):
if type(X) is not pd.DataFrame:
X = pd.DataFrame(X)
if Y is None:
Y = X.iloc[:,0]
X = X.iloc[:,1:]
else:
if type(Y) is not pd.DataFrame:
Y =
|
pd.DataFrame(Y)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import tests.mocks.operations as mockops
from trumania.core import operations
from trumania.core.util_functions import build_ids
def test_apply_should_delegate_to_single_col_dataframe_function_correctly():
# some function that expect a dataframe as input => must return
# dataframe with "result" column
def f(df):
return pd.DataFrame({"result": df["A"] + df["D"] - df["C"]})
tested = operations.Apply(source_fields=["A", "C", "D"],
named_as="r",
f=f, f_args="dataframe")
story_data = pd.DataFrame(
np.random.rand(10, 5), columns=["A", "B", "C", "D", "E"])
result = tested.build_output(story_data)
assert result["r"].equals(story_data["A"] + story_data["D"] - story_data[
"C"])
def test_apply_should_delegate_to_multi_col_dataframe_function_correctly():
# now f returns several columns
def f(df):
return pd.DataFrame({
"r1": df["A"] + df["D"] - df["C"],
"r2": df["A"] + df["C"],
"r3": df["A"] * df["C"],
})
tested = operations.Apply(source_fields=["A", "C", "D"],
named_as=["op1", "op2", "op3"],
f=f, f_args="dataframe")
story_data = pd.DataFrame(
np.random.rand(10, 5), columns=["A", "B", "C", "D", "E"])
result = tested.transform(story_data)
assert result.columns.tolist() == ["A", "B", "C", "D", "E", "op1", "op2",
"op3"]
assert result["op1"].equals(
story_data["A"] + story_data["D"] - story_data["C"])
assert result["op2"].equals(
story_data["A"] + story_data["C"])
assert result["op3"].equals(
story_data["A"] * story_data["C"])
def test_apply_should_delegate_to_columns_function_correctly():
"""
same as the above, but this time f input and output arguments are
pandas Series
"""
def f(ca, cc, cd):
return ca + cd - cc
tested = operations.Apply(source_fields=["A", "C", "D"],
named_as="r",
f=f, f_args="series")
story_data = pd.DataFrame(
np.random.rand(10, 5), columns=["A", "B", "C", "D", "E"])
result = tested.build_output(story_data)
assert result["r"].equals(
story_data["A"] + story_data["D"] - story_data["C"])
def test_one_execution_should_merge_empty_data_correctly():
# empty previous
prev_df = pd.DataFrame(columns=[])
prev_log = {}
nop = operations.Operation()
output, logs = operations.Chain._execute_operation((prev_df, prev_log), nop)
assert logs == {}
assert output.equals(prev_df)
def test_one_execution_should_merge_one_op_with_nothing_into_one_result():
# empty previous
prev =
|
pd.DataFrame(columns=[])
|
pandas.DataFrame
|
#Use scikit-learn to grid search the batch size and epochs
import csv
import os
import numpy as np
import pandas as pd
from standard_plots import *
from sklearn.grid_search import GridSearchCV
from keras.wrappers.scikit_learn import KerasClassifier
from math import sqrt
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
from keras.layers import Input
from keras.models import model_from_json, load_model
from keras.layers.normalization import BatchNormalization
from keras.models import Sequential, Model
from keras.layers import Dense, LSTM, Dropout, Embedding, Input, Activation, Bidirectional, TimeDistributed, RepeatVector, Flatten
from keras.optimizers import Adam
from keras.utils import plot_model
import matplotlib.pyplot as plt
import tensorflow
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
resultpath = "result"
#Parameters
#time steps
look_back = 20
#Optimizer Adam
adam = Adam(lr=learning_rate)
'''
filename = 'waypoint_trace_new.csv'
Loc_x = pd.read_csv(filepath_or_buffer = filename, sep = ',')["Loc_x"].values.astype('int')
Loc_y = pd.read_csv(filepath_or_buffer = filename, sep = ',')["Loc_y"].values.astype('int')
Mag_x = pd.read_csv(filepath_or_buffer = filename, sep = ',')["Mag_x"].values.astype('float')
Mag_y = pd.read_csv(filepath_or_buffer = filename, sep = ',')["Mag_y"].values.astype('float')
Mag_z = pd.read_csv(filepath_or_buffer = filename, sep = ',')["Mag_z"].values.astype('float')
'''
# convert an array of values into a dataset matrix for both input and output
def create_dataset_input(dataset, look_back=look_back):
dataX = []
for i in range(len(dataset)-look_back):
dataX.append(dataset[i:(i+look_back)])
return np.array(dataX)
#Process training data
def load_data(file_name, batch_size, split=0.75, look_back = look_back):
raw_Loc_x = pd.read_csv(file_name, sep=',', usecols=[0])
raw_Loc_x = np.array(raw_Loc_x).astype(float)
scaler_loc_x = MinMaxScaler()
Loc_x = scaler_loc_x.fit_transform(raw_Loc_x)
raw_Loc_y =
|
pd.read_csv(file_name, sep=',', usecols=[1])
|
pandas.read_csv
|
import numpy as np
import pandas as pd
import os
import glob
import astropy_stark.cream_lcplot as cream_plot
import astropy_stark.cream_plotlibrary as cpl
import matplotlib.pylab as plt
import multiprocessing as mp
import time
import itertools
import corner
from scipy.stats import pearsonr
import seaborn as sns; sns.set(style="ticks", color_codes=True)
class pycecream:
'''
One stop shop for fitting time lags and response functions to AGN
accretion discs. Fitting continuum light curves, pycecream can infer the
inclination and temperature profile of the AGN disc by fitting the wavelegnth
dependent response functions described in
Starkey et al 2016 ( https://ui.adsabs.harvard.edu/#abs/arXiv:1511.06162 )
For a full list of creams features, please see the (sadly out of date) mamnual that
describes these features as applied to the previous fortran version of the code
CREAM. A more up-to-date manual will follow shortly.
Global class instance arguments...
redshift: The target redshift (default 0).
high_frequency: Large numbers will explore higher frequency variations at the expense
of computation time.
'''
def __init__(self):
'''
The default parameters are given below. First up are the global non-fitted input parameters.
Second are the global fitted parameters whos starting values can be modified below.
Note that...
1) Entering zero step sizes indicates the parameter will not be stepped)
2) Light curve-specific parameters (the error bar scaling, starting lag centroids and widths
for line light curves etc) are specified in the add_lc function.
'''
#config, path, compiler parameters
self.module_path = os.path.dirname(os.path.realpath(__file__))
self.fortran_caller = 'gfortran'
self.fortran_compile_command = self.fortran_caller+' cream_f90.f90 -o creamrun.exe'
print('pycecream path... ' + self.module_path)
#convention parameters
self.project_folder = 'pycecream'
self.append_date_to_output_directory = False
self.save_ALL_parameters = True
#global non-fitted parameters
self.redshift = 0.0
self.high_frequency = 0.5
self.bh_mass = 1.e7
self.bh_efficieny = 0.1
self.N_iterations = 1000
self.lag_lims = [-10.0,50.0]
#fitted parameters
self.p_inclination = 0.0
self.p_inclination_step = 0.0
self.p_inclination_priorcentroid = None
self.p_inclination_priorwidth = None
self.p_accretion_rate = 0.1
self.p_accretion_rate_step = 0.0
self.p_accretion_rate_priorcentroid = None
self.p_accretion_rate_priorwidth = None
self.p_viscous_slope = 0.75
self.p_viscous_slope_step = 0.0
self.p_extra_variance_step = 0.1
#non configureable parameters
self.count_lightcurves = 0
self.count_continuum_lightcurves = 0
self.count_line_lightcurves = 0
self.p_linelag_centroids_start = 0.0
self.p_linelag_centroids_step = 5.0
self.p_linelag_widths_start = 2.0
self.p_linelag_widths_step = 0.0
self.dir_pwd = os.getcwd()
self.lightcurve_input_params = pd.DataFrame(columns=[
'name', 'type', 'wavelength', 'noise model',
'share previous lag','temporary file name',
'mean', 'standard deviation', 'tophat centroid',
'tophat centroid step',
'tophat centroid prior cent',
'tophat centroid prior width',
'tophat width',
'tophat width step',
'tophat width prior cent',
'tophat width prior width',
'background offset start','vertical scaling start'
])
self.global_input_params = pd.DataFrame(columns = [
'redshift','BH mass','BH efficiency','upper fourier frequency',''
])
def setup_directory_structure(self):
'''
Set up the output directory structure for a cream simulation.
Shouild be called once as you add the first light curve
:return:
'''
#make a directory into which to store the cream results
dir_pycecream = self.project_folder
if self.append_date_to_output_directory is True:
dir_pycecream = self.project_folder+'_'+str(pd.datetime.today().strftime("%d_%m_%Y"))+'_'
child_dirs = next(os.walk('.'))[1]
number_of_pyceream_dirs = len( [c for c in child_dirs if dir_pycecream in c] )
dir_pycecream = dir_pycecream+str(number_of_pyceream_dirs)
self.dir_pycecream = dir_pycecream
self.dir_sim = 'simulation_files'
try:
os.mkdir(self.dir_pycecream)
except:
raise Exception('directory...'+self.dir_pycecream+' already exists. '
'Please choose another using the '
'self.dir_pycecream variable')
os.mkdir(self.dir_pycecream+'/'+self.dir_sim)
#copy fortran files to pycecream directory
os.system('cp '+self.module_path+'/cream_f90.f90 ./'+self.dir_pycecream)
print('copying file...')
print(self.module_path)
os.system('cp ' + self.module_path + '/creaminpar.par ./' + self.dir_pycecream)
def add_lc(self,input,
kind = 'line',
wavelength = -1.0,
expand_errors = ['var','multiplicative'],
extra_variance_prior = [-1.0,-1.0],
multiplicative_errorbar_prior = [-1.0,-1.0],
name = None,
share_previous_lag = False,
background_offset_start=[-1.,-1.],
vertical_scaling_start=[-1.,-1.],
background_offset_prior = None,
vertical_scaling_prior = None,
tophat_centroid = None,
tophat_centroid_step = None,
tophat_centroid_prior = [0.0,-1.0],
tophat_width = None,
tophat_width_step = None,
tophat_width_prior=[0.0, -0.1],
background_polynomials = None):
'''
This is the go to command to add a new light curve into the
simulation.
:param input: either a Nx3 numpy array of time,flux,errors or a link to a file in the same format
:param kind: 'line' or 'continuum'. If continuum, must specify
wavelegnth
:param wavelength: The centroid wavelength of the contuum light curve
:param expand_errors:
:param share_previous_lag:
:param name: optional to set name for a light curve to annotate on plots and in data frames.
:param background_offset_start:[value,stepsize] start value and step size for the background offset parameter.
Leave as default [-1.,-1.] to ignore.
:param vertical_scaling_start: as above but for vertical scaling parameter.
:param background_offset_prior: [mean,sd] of gaussian prior. Leave as None to ignore priors
:param vertical_scaling_prior: as above but for the vertical scaling parameter
:param background_polynomials: add variable background list of starting coefficients for each level of polynomial
(advise [0.1,0.1] to add a quadratic polynomial )
:return:
'''
#assertion errors
assert(type(share_previous_lag) == bool)
#set up the directory structure if first call
if self.count_lightcurves == 0:
self.setup_directory_structure()
#count the numnber of line or continuum light curves already specified
if kind is 'line':
count = self.count_line_lightcurves
self.count_line_lightcurves = self.count_line_lightcurves + 1
elif kind is 'continuum':
count = self.count_continuum_lightcurves
self.count_continuum_lightcurves = self.count_continuum_lightcurves + 1
if wavelength == -1.0:
raise Exception('Must specify wavelength for a continuum light curve')
else:
raise Exception('kind argument must be "line" or "continuum"')
#configure the naming convention
if name is None:
name_ann = kind + ' lightcurve ' + np.str(count)
else:
name_ann = name
#load the data and save in required directory
if type(input) is str:
dat = np.loadtxt(input)
elif type(input) is np.ndarray:
dat = np.array(input)
else:
raise Exception('input to add_lc must be file name or numpy.ndarray')
fname = kind+'_'+np.str(count)+'.dat'
#check the data for problems and save
check_for_bad_values(dat,name_ann)
np.savetxt(self.dir_pycecream+'/'+self.dir_sim+'/'+fname,dat)
'''
configure the line lag settings (if sharing the same response function as the previous line
should then use the same step size else increment by a small positive number e.g 0.1
'''
if tophat_centroid is None:
tophat_centroid = self.p_linelag_centroids_start
if tophat_centroid_step is None:
tophat_centroid_step = self.p_linelag_centroids_step
if tophat_width is None:
tophat_width = self.p_linelag_widths_start
if tophat_width_step is None:
tophat_width_step = self.p_linelag_widths_step
if share_previous_lag is False:
tophat_centroid_step = tophat_centroid_step + 0.1*self.count_lightcurves
else:
tophat_centroid_step = self.lightcurve_input_params['tophat centroid step'].values[-1]
#update the lightcurve_input_params table of records
df = pd.DataFrame(data = [name_ann,kind,wavelength,expand_errors,
extra_variance_prior,
multiplicative_errorbar_prior,
share_previous_lag,fname,
np.mean(dat[:,1]), np.std(dat[:,1]),
tophat_centroid,
tophat_centroid_step,
tophat_centroid_prior[0],
tophat_centroid_prior[1],
tophat_width,
tophat_width_step,
tophat_width_prior[0],
tophat_width_prior[1],
background_offset_start,
vertical_scaling_start,
background_offset_prior,
vertical_scaling_prior,
background_polynomials
],
index=['name', 'type', 'wavelength', 'noise model',
'extra variance prior','multiplicative errorbar prior',
'share previous lag','temporary file name',
'mean', 'standard deviation', 'tophat centroid',
'tophat centroid step',
'tophat centroid prior cent',
'tophat centroid prior width',
'tophat width',
'tophat width step',
'tophat width prior cent',
'tophat width prior width',
'background offset start','vertical scaling start',
'background offset prior','vertical scaling prior',
'background_polynomials']).T
self.lightcurve_input_params = pd.DataFrame(pd.concat([self.lightcurve_input_params,df]))
self.lightcurve_input_params['wavelength']= \
pd.to_numeric(self.lightcurve_input_params['wavelength'],downcast = 'float')
self.lightcurve_input_params['mean'] = \
pd.to_numeric(self.lightcurve_input_params['mean'],downcast = 'float')
self.lightcurve_input_params['standard deviation'] = \
pd.to_numeric(self.lightcurve_input_params['standard deviation'],downcast = 'float')
self.lightcurve_input_params['tophat centroid']= \
|
pd.to_numeric(self.lightcurve_input_params['tophat centroid'],downcast = 'float')
|
pandas.to_numeric
|
# -*- coding: utf-8 -*-
"""
Pipeline-GUI for Analysis with MNE-Python
@author: <NAME>
@email: <EMAIL>
@github: https://github.com/marsipu/mne_pipeline_hd
License: BSD (3-clause)
Written on top of MNE-Python
Copyright © 2011-2020, authors of MNE-Python (https://doi.org/10.3389/fnins.2013.00267)
inspired by <NAME>. (2018) (https://doi.org/10.3389/fnins.2018.00006)
"""
from ast import literal_eval
from datetime import datetime
import pandas as pd
from PyQt5.QtCore import QAbstractItemModel, QAbstractListModel, QAbstractTableModel, QModelIndex, Qt
from PyQt5.QtGui import QBrush, QFont
from PyQt5.QtWidgets import QStyle
from mne_pipeline_hd.gui.gui_utils import get_std_icon
class BaseListModel(QAbstractListModel):
""" A basic List-Model
Parameters
----------
data : list()
input existing list here, otherwise defaults to empty list
show_index : bool
Set True if you want to display the list-index in front of each value
drag_drop: bool
Set True to enable Drag&Drop.
"""
def __init__(self, data=None, show_index=False, drag_drop=False, **kwargs):
super().__init__(**kwargs)
self.show_index = show_index
self.drag_drop = drag_drop
if data is None:
self._data = list()
else:
self._data = data
def getData(self, index):
return self._data[index.row()]
def data(self, index, role=None):
if role == Qt.DisplayRole:
if self.show_index:
return f'{index.row()}: {self.getData(index)}'
else:
return str(self.getData(index))
elif role == Qt.EditRole:
return str(self.getData(index))
def rowCount(self, index=QModelIndex()):
return len(self._data)
def insertRows(self, row, count, index=QModelIndex()):
self.beginInsertRows(index, row, row + count - 1)
n = 0
for pos in range(row, row + count):
item_name = f'__new{n}__'
while item_name in self._data:
n += 1
item_name = f'__new{n}__'
self._data.insert(pos, item_name)
self.endInsertRows()
return True
def removeRows(self, row, count, index=QModelIndex()):
self.beginRemoveRows(index, row, row + count - 1)
for item in [self._data[i] for i in range(row, row + count)]:
self._data.remove(item)
self.endRemoveRows()
return True
def flags(self, index):
default_flags = QAbstractListModel.flags(self, index)
if self.drag_drop:
if index.isValid():
return default_flags | Qt.ItemIsDragEnabled | Qt.ItemIsDropEnabled
else:
return default_flags | Qt.ItemIsDropEnabled
else:
return default_flags
def supportedDragActions(self):
if self.drag_drop:
return Qt.CopyAction | Qt.MoveAction
class EditListModel(BaseListModel):
"""An editable List-Model
Parameters
----------
data : list()
input existing list here, otherwise defaults to empty list
show_index: bool
Set True if you want to display the list-index in front of each value
drag_drop: bool
Set True to enable Drag&Drop.
"""
def __init__(self, data, show_index=False, drag_drop=False, **kwargs):
super().__init__(data, show_index, drag_drop, **kwargs)
def flags(self, index=QModelIndex()):
default_flags = BaseListModel.flags(self, index)
if index.isValid():
return default_flags | Qt.ItemIsEditable
else:
return default_flags
def setData(self, index, value, role=None):
if role == Qt.EditRole:
try:
self._data[index.row()] = literal_eval(value)
except (ValueError, SyntaxError):
self._data[index.row()] = value
self.dataChanged.emit(index, index)
return True
return False
class CheckListModel(BaseListModel):
"""
A Model for a Check-List
Parameters
----------
data : list()
list with content to be displayed, defaults to empty list
checked : list()
list which stores the checked items from data
show_index: bool
Set True if you want to display the list-index in front of each value
drag_drop: bool
Set True to enable Drag&Drop.
"""
def __init__(self, data, checked, one_check=False, show_index=False, drag_drop=False, **kwargs):
super().__init__(data, show_index, drag_drop, **kwargs)
self.one_check = one_check
if data is None:
self._data = list()
else:
self._data = data
if checked is None:
self._checked = list()
else:
self._checked = checked
def getChecked(self, index=QModelIndex()):
return self.checked[index.row()]
def data(self, index, role=None):
if role == Qt.DisplayRole:
if self.show_index:
return f'{index.row()}: {self.getData(index)}'
else:
return str(self.getData(index))
if role == Qt.CheckStateRole:
if self.getData(index) in self._checked:
return Qt.Checked
else:
return Qt.Unchecked
def setData(self, index, value, role=None):
if role == Qt.CheckStateRole:
if value == Qt.Checked:
if self.one_check:
self._checked.clear()
self._checked.append(self.getData(index))
else:
if self.getData(index) in self._checked:
self._checked.remove(self.getData(index))
self.dataChanged.emit(index, index)
return True
return False
def flags(self, index=QModelIndex()):
return QAbstractItemModel.flags(self, index) | Qt.ItemIsUserCheckable
class CheckDictModel(BaseListModel):
"""
A Model for a list, which marks items which are present in a dictionary
Parameters
----------
data : list()
list with content to be displayed, defaults to empty list
check_dict : dict()
dictionary which may contain items from data as keys
show_index: bool
Set True if you want to display the list-index in front of each value
drag_drop: bool
Set True to enable Drag&Drop.
yes_bt: str
Supply the name for a qt-standard-icon to mark the items existing in check_dict
no_bt: str
Supply the name for a qt-standard-icon to mark the items not existing in check_dict
Notes
-----
Names for QT standard-icons:
https://doc.qt.io/qt-5/qstyle.html#StandardPixmap-enum
"""
def __init__(self, data, check_dict, show_index=False, drag_drop=False,
yes_bt=None, no_bt=None, **kwargs):
super().__init__(data, show_index, drag_drop, **kwargs)
self._check_dict = check_dict
self.yes_bt = yes_bt or 'SP_DialogApplyButton'
self.no_bt = no_bt or 'SP_DialogCancelButton'
def data(self, index, role=None):
if role == Qt.DisplayRole:
if self.show_index:
return f'{index.row()}: {self.getData(index)}'
else:
return str(self.getData(index))
elif role == Qt.EditRole:
return str(self.getData(index))
elif role == Qt.DecorationRole:
if self.getData(index) in self._check_dict:
return get_std_icon(self.yes_bt)
else:
return get_std_icon(self.no_bt)
class CheckDictEditModel(CheckDictModel, EditListModel):
"""An editable List-Model
Parameters
----------
data : list()
list with content to be displayed, defaults to empty list
check_dict : dict()
dictionary which may contain items from data as keys
show_index: bool
Set True if you want to display the list-index in front of each value
drag_drop: bool
Set True to enable Drag&Drop.
yes_bt: str
Supply the name for a qt-standard-icon to mark the items existing in check_dict
no_bt: str
Supply the name for a qt-standard-icon to mark the items not existing in check_dict
Notes
-----
Names for QT standard-icons:
https://doc.qt.io/qt-5/qstyle.html#StandardPixmap-enum
"""
def __init__(self, data, check_dict, show_index=False, drag_drop=False,
yes_bt=None, no_bt=None):
super().__init__(data, check_dict, show_index, drag_drop, yes_bt, no_bt)
# EditListModel doesn't have to be initialized because in __init__ of EditListModel
# only BaseListModel is initialized which is already done in __init__ of CheckDictModel
class BaseDictModel(QAbstractTableModel):
"""Basic Model for Dictonaries
Parameters
----------
data : dict | OrderedDict | None
Dictionary with keys and values to be displayed, default to empty Dictionary
Notes
-----
Python 3.7 is required to ensure order in dictionary when inserting a normal dict (or use OrderedDict)
"""
def __init__(self, data=None, **kwargs):
super().__init__(**kwargs)
if data is None:
self._data = dict()
else:
self._data = data
def getData(self, index=QModelIndex()):
try:
if index.column() == 0:
return list(self._data.keys())[index.row()]
elif index.column() == 1:
return list(self._data.values())[index.row()]
# Happens, when a duplicate key is entered
except IndexError:
self.layoutChanged.emit()
return ''
def data(self, index, role=None):
if role == Qt.DisplayRole or role == Qt.EditRole:
return str(self.getData(index))
def headerData(self, idx, orientation, role=None):
if role == Qt.DisplayRole:
if orientation == Qt.Horizontal:
if idx == 0:
return 'Key'
elif idx == 1:
return 'Value'
elif orientation == Qt.Vertical:
return str(idx)
def rowCount(self, index=QModelIndex()):
return len(self._data)
def columnCount(self, index=QModelIndex()):
return 2
class EditDictModel(BaseDictModel):
"""An editable model for Dictionaries
Parameters
----------
data : dict | OrderedDict | None
Dictionary with keys and values to be displayed, default to empty Dictionary
only_edit : 'keys' | 'values' | None
Makes only keys or only values editable. Both are editable if None.
Notes
-----
Python 3.7 is required to ensure order in dictionary when inserting a normal dict (or use OrderedDict)
"""
def __init__(self, data=None, only_edit=None, **kwargs):
super().__init__(data, **kwargs)
self.only_edit = only_edit
def setData(self, index, value, role=None):
if role == Qt.EditRole:
try:
value = literal_eval(value)
except (SyntaxError, ValueError):
pass
if index.column() == 0:
self._data[value] = self._data.pop(list(self._data.keys())[index.row()])
elif index.column() == 1:
self._data[list(self._data.keys())[index.row()]] = value
else:
return False
self.dataChanged.emit(index, index, [role])
return True
return False
def flags(self, index=QModelIndex()):
if not self.only_edit:
return QAbstractItemModel.flags(self, index) | Qt.ItemIsEditable
elif index.column() == 0 and self.only_edit == 'keys':
return QAbstractItemModel.flags(self, index) | Qt.ItemIsEditable
elif index.column() == 1 and self.only_edit == 'values':
return QAbstractItemModel.flags(self, index) | Qt.ItemIsEditable
else:
return QAbstractItemModel.flags(self, index)
def insertRows(self, row, count, index=QModelIndex()):
self.beginInsertRows(index, row, row + count - 1)
for n in range(count):
key_name = f'__new{n}__'
while key_name in self._data.keys():
n += 1
key_name = f'__new{n}__'
self._data[key_name] = ''
self.endInsertRows()
return True
def removeRows(self, row, count, index=QModelIndex()):
self.beginRemoveRows(index, row, row + count - 1)
for n in range(count):
self._data.pop(list(self._data.keys())[row + n])
self.endRemoveRows()
return True
class BasePandasModel(QAbstractTableModel):
"""Basic Model for pandas DataFrame
Parameters
----------
data : pandas.DataFrame | None
pandas DataFrame with contents to be displayed, defaults to empty DataFrame
"""
def __init__(self, data=None, **kwargs):
super().__init__(**kwargs)
if data is None:
self._data = pd.DataFrame([])
else:
self._data = data
def getData(self, index=QModelIndex()):
return self._data.iloc[index.row(), index.column()]
def data(self, index, role=None):
if role == Qt.DisplayRole or role == Qt.EditRole:
return str(self.getData(index))
def headerData(self, idx, orientation, role=None):
if role == Qt.DisplayRole:
if orientation == Qt.Horizontal:
return str(self._data.columns[idx])
elif orientation == Qt.Vertical:
return str(self._data.index[idx])
def rowCount(self, index=QModelIndex()):
return len(self._data.index)
def columnCount(self, index=QModelIndex()):
return len(self._data.columns)
class EditPandasModel(BasePandasModel):
""" Editable TableModel for Pandas DataFrames
Parameters
----------
data : pandas.DataFrame | None
pandas DataFrame with contents to be displayed, defaults to empty DataFrame
Notes
-----
The reference of the original input-DataFrame is lost when edited by this Model,
you need to retrieve it directly from the model after editing
"""
def __init__(self, data=None, **kwargs):
super().__init__(data, **kwargs)
def setData(self, index, value, role=None):
if role == Qt.EditRole:
try:
value = literal_eval(value)
# List or Dictionary not allowed here as PandasDataFrame-Item
if isinstance(value, dict) or isinstance(value, list):
value = str(value)
except (SyntaxError, ValueError):
pass
self._data.iloc[index.row(), index.column()] = value
self.dataChanged.emit(index, index, [role])
return True
return False
def setHeaderData(self, index, orientation, value, role=Qt.EditRole):
if role == Qt.EditRole:
if orientation == Qt.Vertical:
# DataFrame.rename does rename all duplicate indices if existent,
# that's why the index is reassigned directly
new_index = list(self._data.index)
new_index[index] = value
self._data.index = new_index
self.headerDataChanged.emit(Qt.Vertical, index, index)
return True
elif orientation == Qt.Horizontal:
# DataFrame.rename does rename all duplicate columns if existent,
# that's why the columns are reassigned directly
new_columns = list(self._data.columns)
new_columns[index] = value
self._data.columns = new_columns
self.headerDataChanged.emit(Qt.Horizontal, index, index)
return True
return False
def flags(self, index=QModelIndex()):
return QAbstractItemModel.flags(self, index) | Qt.ItemIsEditable
def insertRows(self, row, count, index=QModelIndex()):
self.beginInsertRows(index, row, row + count - 1)
add_data = pd.DataFrame(columns=self._data.columns, index=[r for r in range(count)])
if row == 0:
self._data = pd.concat([add_data, self._data])
elif row == len(self._data.index):
self._data = self._data.append(add_data)
else:
self._data = pd.concat([self._data.iloc[:row], add_data, self._data.iloc[row:]])
self.endInsertRows()
return True
def insertColumns(self, column, count, index=QModelIndex()):
self.beginInsertColumns(index, column, column + count - 1)
add_data = pd.DataFrame(index=self._data.index, columns=[c for c in range(count)])
if column == 0:
self._data = pd.concat([add_data, self._data], axis=1)
elif column == len(self._data.columns):
self._data =
|
pd.concat([self._data, add_data], axis=1)
|
pandas.concat
|
import numpy as np
import pandas as pd
import sidetable
import plotly.graph_objects as go
import plotly_express as px
from plotly.subplots import make_subplots
import matplotlib.pyplot as plt
import seaborn as sns
from clean_data.cleaner import Cleaner
from do_data.getter import Reader
from do_data.writer import Writer
from do_data.config import Columns
name = Columns()
# from pandasgui import show
from analyze_data.colors import Colors
from scipy import stats
import networkx as nx
from collections import Counter
import math
from sklearn import preprocessing
colors = Colors()
class Network():
def __init__(self):
self.G = nx.DiGraph()
self.hubs = None
self.blue = '#1f77b4' # muted blue
self.orange = '#ff7f0e' # safety orange
self.green = '#2ca02c' # cooked asparagus green
self.red = '#d62728' # brick red
self.purple = '#9467bd' # muted purple
self.brown = '#8c564b' # chestnut brown
self.pink = '#e377c2' # raspberry yogurt pink
self.gray = '#7f7f7f' # middle gray
self.yellow_green = '#bcbd22' # curry yellow-green
self.teal = '#17becf'
self.sentence_color = {
'Prison': self.red
, 'Conversion': self.blue
, 'Probation': self.blue
, 'Jail': self.red
, 'Conditional Discharge': self.green
, 'Supervision': self.blue
, 'Cook County Boot Camp': self.green
, 'Probation Terminated Satisfactorily': self.green
, 'Inpatient Mental Health Services': self.blue
, 'Death': self.red
, 'Conditional Release': self.green
, 'Probation Terminated Instanter': self.orange
, 'Probation Terminated Unsatisfactorily': self.orange
, '2nd Chance Probation': self.orange
}
def ingest_df(self, df, filename):
self.G.clear()
judge_df = df.groupby(name.sentence_judge)
edges = []
counter = 0
# df['sentence_color'] = df[name.sentence_type].apply(lambda x: self.sentence_color[x])
df['sentence_color'] = df[name.sentence_type].cat.codes
scaler = preprocessing.MinMaxScaler(feature_range=(.5, 15.))
# https://towardsdatascience.com/data-normalization-with-pandas-and-scikit-learn-7c1cc6ed6475
df['scaled_commitment_days'] = pd.DataFrame(scaler.fit_transform(df[[name.commitment_days]]))
self.hubs = list(df[name.sentence_court_name].unique())
sentence_types = list(df[name.sentence_type].unique())
sentence_facs = list(df[name.sentence_court_facility].unique())
mean_scaled_commitment_days = []
self.hubs.extend(sentence_types)
self.hubs.extend(sentence_facs)
self.hubs = list(set(self.hubs))
self.hubs.remove(np.nan)
self.hubs.sort()
for df1_name, df1 in judge_df:
counter +=1
if len(df1) > 0:
n_cases = len(df1)
node_record = tuple((df1_name, {'n_cases':n_cases}))
self.G.add_nodes_from([node_record])
court_df = df1.groupby(name.sentence_court_name)
for df2_name, df2 in court_df:
df2[name.sentence_court_facility] = df2[name.sentence_court_facility].cat.remove_unused_categories()
if len(df2) > 0:
n_cases = len(df2)
node_record = tuple((df2_name, {'n_cases': n_cases}))
self.G.add_nodes_from([node_record])
fac_df = df2.groupby(name.sentence_court_facility)
for df3_name, df3 in fac_df:
df3[name.sentence_court_name] = df2[name.sentence_court_name].cat.remove_unused_categories()
if len(df3) > 0:
# nx.add_path(self.G, [df1_name, df3_name, df2_name])
sentence_df = df3.groupby(name.sentence_type)
n_cases = len(df3)
node_record = tuple((df3_name, {'n_cases': n_cases}))
self.G.add_nodes_from([node_record])
for df4_name, df4 in sentence_df:
df4[name.sentence_type] = df4[name.sentence_type].cat.remove_unused_categories()
if len(df4) > 0:
n_cases = len(df4)
node_record = tuple((df4_name, {'n_cases': n_cases}))
self.G.add_nodes_from([node_record])
color = df4['sentence_color'].unique()[0]
msc_day = df4['scaled_commitment_days'].fillna(0).median()
n_cases = len(df4)
nx.add_path(self.G, [df1_name, df3_name, df2_name, df4_name]
, color=color
, label=df4_name
, msc_day=msc_day
, judge=df1_name
, n_cases=n_cases
)
if filename:
saved_name = str('data/'+filename+'.gpickle')
nx.write_gpickle(self.G, saved_name, protocol=2)
def graph_network(self, df=None, filename=None):
if filename:
read_name = str('data/'+filename+'.gpickle')
self.G = nx.read_gpickle(read_name)
if df is not None:
self.hubs = list(df[name.sentence_court_name].unique())
self.types = list(set(df[name.sentence_type].unique()))
sentence_facs = list(df[name.sentence_court_facility].unique())
mean_scaled_commitment_days = []
# self.hubs.extend(sentence_types)
# self.hubs.extend(sentence_facs)
self.hubs = list(set(self.hubs))
self.hubs.remove(np.nan)
self.hubs.sort()
self.judges = list(set(df[name.sentence_judge].unique()))
self.judges.remove(np.nan)
d = dict(self.G.degree)
pos = nx.spring_layout(self.G, k=5 / math.sqrt(self.G.order()), seed=0)
node_values = np.array([v for v in d.values()]).reshape(-1, 1)
scaler = preprocessing.MinMaxScaler(feature_range=(5, 30))
scaled_node_values = scaler.fit_transform(node_values)
vmin = min(scaled_node_values)
vmax = max(scaled_node_values)
# colors = [self.G[u][v]['color'] for u, v in self.G.edges()]
edge_values = [self.G[n1][n2]['color'] for n1, n2 in self.G.edges()]
edge_vmin = min(edge_values)
edge_vmax = max(edge_values)
plt.figure(figsize=(10, 10))
labels = {}
for node in self.G.nodes():
if node in self.hubs:
labels[node] = node
nx.draw_networkx_nodes(self.G
, cmap=plt.get_cmap('coolwarm')
, node_color=scaled_node_values
, pos=pos
, vmin=vmin
, vmax=vmax
, node_size=scaled_node_values
, label=False
)
edge_widths=[self.G[n1][n2]['msc_day'] for n1, n2 in self.G.edges()]
nx.draw_networkx_edges(self.G
, edge_cmap=plt.get_cmap('tab20')
, pos=pos
, width=edge_widths
, alpha =.8
, edge_vmin=edge_vmin
, edge_vmax=edge_vmax
, edge_color=edge_values
)
edge_labels = nx.get_edge_attributes(self.G, 'label')
# nx.draw_networkx_edge_labels(self.G
# , pos=pos
# , edge_labels=edge_labels
# , font_size=4
# )
nx.draw_networkx_labels(self.G
, pos=pos
, labels=labels
, font_size=8
, font_color='black'
, font_weight='medium'
)
# plt.show()
return self.G, pos, edge_widths, scaled_node_values, self.hubs, self.judges, self.types
def make_network(self):
# neo 4 j
# https://stackoverflow.com/questions/44611449/cannot-create-a-directed-graph-using-from-pandas-dataframe-from-networkx
# types https://networkx.org/documentation/stable//reference/drawing.html
"""
https://stackoverflow.com/questions/45350222/select-nodes-and-edges-form-networkx-graph-with-attributes
selected_nodes = [n for n, v in G.nodes(data=True) if v['ej_status'] == 'EM']
print(selected_nodes)
https://stackoverflow.com/questions/13517614/draw-different-color-for-nodes-in-networkx-based-on-their-node-value
"""
G = nx.DiGraph()
H = nx.DiGraph()
df = pd.read_csv('data/testing.csv', index_col=0)
df = df[df['ir'] != 'NOT ASSIGNED.']
df = df.dropna(subset=['ir'])
df = df[(df['detainee_status_date']=='2020-07-15') | (df['detainee_status_date']=='2020-06-30')]
df['detainee_status_date'] =
|
pd.to_datetime(df['detainee_status_date'])
|
pandas.to_datetime
|
import pandas as pd
import plotly.graph_objects as go
import dash_table
from plotlydash.views.helpers import *
def create_df():
df_node =
|
pd.DataFrame()
|
pandas.DataFrame
|
"""
See also: test_reindex.py:TestReindexSetIndex
"""
from datetime import (
datetime,
timedelta,
)
import numpy as np
import pytest
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
date_range,
period_range,
to_datetime,
)
import pandas._testing as tm
class TestSetIndex:
def test_set_index_multiindex(self):
# segfault in GH#3308
d = {"t1": [2, 2.5, 3], "t2": [4, 5, 6]}
df = DataFrame(d)
tuples = [(0, 1), (0, 2), (1, 2)]
df["tuples"] = tuples
index = MultiIndex.from_tuples(df["tuples"])
# it works!
df.set_index(index)
def test_set_index_empty_column(self):
# GH#1971
df = DataFrame(
[
{"a": 1, "p": 0},
{"a": 2, "m": 10},
{"a": 3, "m": 11, "p": 20},
{"a": 4, "m": 12, "p": 21},
],
columns=["a", "m", "p", "x"],
)
result = df.set_index(["a", "x"])
expected = df[["m", "p"]]
expected.index = MultiIndex.from_arrays([df["a"], df["x"]], names=["a", "x"])
tm.assert_frame_equal(result, expected)
def test_set_index_empty_dataframe(self):
# GH#38419
df1 = DataFrame(
{"a": Series(dtype="datetime64[ns]"), "b": Series(dtype="int64"), "c": []}
)
df2 = df1.set_index(["a", "b"])
result = df2.index.to_frame().dtypes
expected = df1[["a", "b"]].dtypes
tm.assert_series_equal(result, expected)
def test_set_index_multiindexcolumns(self):
columns = MultiIndex.from_tuples([("foo", 1), ("foo", 2), ("bar", 1)])
df = DataFrame(np.random.randn(3, 3), columns=columns)
result = df.set_index(df.columns[0])
expected = df.iloc[:, 1:]
expected.index = df.iloc[:, 0].values
expected.index.names = [df.columns[0]]
tm.assert_frame_equal(result, expected)
def test_set_index_timezone(self):
# GH#12358
# tz-aware Series should retain the tz
idx = DatetimeIndex(["2014-01-01 10:10:10"], tz="UTC").tz_convert("Europe/Rome")
df = DataFrame({"A": idx})
assert df.set_index(idx).index[0].hour == 11
assert DatetimeIndex(Series(df.A))[0].hour == 11
assert df.set_index(df.A).index[0].hour == 11
def test_set_index_cast_datetimeindex(self):
df = DataFrame(
{
"A": [datetime(2000, 1, 1) + timedelta(i) for i in range(1000)],
"B": np.random.randn(1000),
}
)
idf = df.set_index("A")
assert isinstance(idf.index, DatetimeIndex)
def test_set_index_dst(self):
di = date_range("2006-10-29 00:00:00", periods=3, freq="H", tz="US/Pacific")
df = DataFrame(data={"a": [0, 1, 2], "b": [3, 4, 5]}, index=di).reset_index()
# single level
res = df.set_index("index")
exp = DataFrame(
data={"a": [0, 1, 2], "b": [3, 4, 5]},
index=Index(di, name="index"),
)
exp.index = exp.index._with_freq(None)
tm.assert_frame_equal(res, exp)
# GH#12920
res = df.set_index(["index", "a"])
exp_index = MultiIndex.from_arrays([di, [0, 1, 2]], names=["index", "a"])
exp = DataFrame({"b": [3, 4, 5]}, index=exp_index)
tm.assert_frame_equal(res, exp)
def test_set_index(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df = df.set_index(idx)
tm.assert_index_equal(df.index, idx)
with pytest.raises(ValueError, match="Length mismatch"):
df.set_index(idx[::2])
def test_set_index_names(self):
df = tm.makeDataFrame()
df.index.name = "name"
assert df.set_index(df.index).index.names == ["name"]
mi = MultiIndex.from_arrays(df[["A", "B"]].T.values, names=["A", "B"])
mi2 = MultiIndex.from_arrays(
df[["A", "B", "A", "B"]].T.values, names=["A", "B", "C", "D"]
)
df = df.set_index(["A", "B"])
assert df.set_index(df.index).index.names == ["A", "B"]
# Check that set_index isn't converting a MultiIndex into an Index
assert isinstance(df.set_index(df.index).index, MultiIndex)
# Check actual equality
tm.assert_index_equal(df.set_index(df.index).index, mi)
idx2 = df.index.rename(["C", "D"])
# Check that [MultiIndex, MultiIndex] yields a MultiIndex rather
# than a pair of tuples
assert isinstance(df.set_index([df.index, idx2]).index, MultiIndex)
# Check equality
tm.assert_index_equal(df.set_index([df.index, idx2]).index, mi2)
def test_set_index_cast(self):
# issue casting an index then set_index
df = DataFrame(
{"A": [1.1, 2.2, 3.3], "B": [5.0, 6.1, 7.2]}, index=[2010, 2011, 2012]
)
df2 = df.set_index(df.index.astype(np.int32))
tm.assert_frame_equal(df, df2)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_drop_inplace(self, frame_of_index_cols, drop, inplace, keys):
df = frame_of_index_cols
if isinstance(keys, list):
idx = MultiIndex.from_arrays([df[x] for x in keys], names=keys)
else:
idx = Index(df[keys], name=keys)
expected = df.drop(keys, axis=1) if drop else df
expected.index = idx
if inplace:
result = df.copy()
return_value = result.set_index(keys, drop=drop, inplace=True)
assert return_value is None
else:
result = df.set_index(keys, drop=drop)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_append(self, frame_of_index_cols, drop, keys):
df = frame_of_index_cols
keys = keys if isinstance(keys, list) else [keys]
idx = MultiIndex.from_arrays(
[df.index] + [df[x] for x in keys], names=[None] + keys
)
expected = df.drop(keys, axis=1) if drop else df.copy()
expected.index = idx
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_append_to_multiindex(self, frame_of_index_cols, drop, keys):
# append to existing multiindex
df = frame_of_index_cols.set_index(["D"], drop=drop, append=True)
keys = keys if isinstance(keys, list) else [keys]
expected = frame_of_index_cols.set_index(["D"] + keys, drop=drop, append=True)
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
def test_set_index_after_mutation(self):
# GH#1590
df = DataFrame({"val": [0, 1, 2], "key": ["a", "b", "c"]})
expected = DataFrame({"val": [1, 2]},
|
Index(["b", "c"], name="key")
|
pandas.Index
|
import torch
import numpy as np
import pandas as pd
import os
import sys
from torchsummary import summary
import torch.nn as nn
from collections import defaultdict
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot')
from matplotlib import cm
import seaborn as sns
sns.set(
font_scale=1.5,
style="whitegrid",
rc={
'text.usetex' : False,
'lines.linewidth': 2
}
)
# sns.set_theme()
# sns.set_style('whitegrid')
import glob
import copy
import math
import models
import random
import torch.optim
import torch
import argparse
import utils
from sklearn.linear_model import LogisticRegression
try:
from tqdm import tqdm
except:
def tqdm(x): return x
"""
Plot the data contained in quant (keys: the name of the experiments), agains the reference (contained in stats_ref)
dirname: the output directory name
"""
def process_df(quant, dirname, stats_ref=None, args=None, args_model=None, save=True, split=False):
global table_format
col_names = ["experiment", "stat", "set", "layer"]
quant = utils.assert_col_order(quant, col_names, id_vars="var")
keys = list(quant.columns.levels[0].sort_values())
output_root = os.path.join(dirname, f"merge_" + "_".join(keys))
os.makedirs(output_root, exist_ok=True)
idx = pd.IndexSlice
cols_error = idx[:, 'error', :, :]
N_L = len(quant.columns.unique(level="layer")) # number of hidden layers
# errors = quant["error"]
# losses = quant["loss"]
quant.drop("val", axis=1,level="set", inplace=True, errors='ignore')
quant.drop(("test", "loss"), axis=1, inplace=True, errors='ignore')
if save:
quant.to_csv(os.path.join(output_root, 'merge.csv'))
if stats_ref is not None:
stats_ref.to_csv(os.path.join(output_root, 'stats_ref.csv'))
quant.sort_index(axis=1, inplace=True)
quant.loc[:, cols_error] *= 100 # in %
quant.groupby(level=["experiment", "stat", "set"], axis=1, group_keys=False).describe().to_csv(os.path.join(output_root, 'describe.csv'))
quant_ref = None
Ts = { -1: 0, 0: 0, 1: 12.71, 2: 4.303, 3: 3.182, 4: 2.776, 9: 2.262}
# quant.where(quant != 0, 6.1*10**(-5), inplace=True)
if args.yscale == "log":
quant_log = np.log10(quant)
# quant_log.loc[:, Idx['B', "loss", :, 10]]
if stats_ref is not None: # the reference to plot against
N_S = len(stats_ref.columns)
quant_ref_merge = pd.DataFrame()
stats_ref.loc[:, "error"] = stats_ref["error"].values * 100
if "layer" in stats_ref.columns.names:
stats_ref.columns = stats_ref.columns.droplevel('layer')
# confidence intervals for the reference loss
quant_ref = stats_ref.agg(['mean', 'count', 'std'])
quant_ref.loc['se'] = quant_ref.loc['std'] / np.sqrt(quant_ref.loc['count']) # standard error
quant_ref.loc['ci95'] = [ Ts[n-1] * se for (n, se) in zip(quant_ref.loc['count'], quant_ref.loc['se']) ] # 95% CI
if args.yscale == "log":
quant_ref_log = np.log10(stats_ref).agg(['mean', 'count', 'std'])
quant_ref_log.loc['se'] = quant_ref_log.loc['std'] / np.sqrt(quant_ref_log.loc['count'])
quant_ref_log.loc['ci95'] = [ Ts[n-1] * se for (n, se) in zip(quant_ref_log.loc['count'], quant_ref_log.loc['se']) ] # 95% CI
# if args_model is not None:
# else:
xlabels=[str(i) for i in range(N_L)]
logstr = "_log" if args.yscale == "log" else ""
has_ref = quant_ref is not None
# if len(keys) <= 2:
palette=sns.color_palette(n_colors=len(keys))
if not split:
fig, axes = plt.subplots(2, 1, figsize=(4, 8), sharex=False)
# sns.set(font_scale=1,rc={"lines.linewidth":3})
k = 0
# the confidence intervals
df_ci = quant.describe()
df_ci.loc["ymax", :] = [mean + Ts[int(n-1)] / np.sqrt(n) * std for (mean, std, n) in zip(df_ci.loc["mean", :], df_ci.loc["std", :], df_ci.loc["count", :])]
df_ci.loc["ymin", :] = [mean - Ts[int(n-1)] / np.sqrt(n) * std for (mean, std, n) in zip(df_ci.loc["mean", :], df_ci.loc["std", :], df_ci.loc["count", :])]
#confidence intervals for the log plot
if args.yscale == "log":
df_ci_log = quant_log.describe()
df_ci_log.loc["ymax", :] = [mean + Ts[int(n-1)] / np.sqrt(n) * std for (mean, std, n) in zip(df_ci_log.loc["mean", :], df_ci_log.loc["std", :], df_ci_log.loc["count", :])]
df_ci_log.loc["ymin", :] = [mean - Ts[int(n-1)] / np.sqrt(n) * std for (mean, std, n) in zip(df_ci_log.loc["mean", :], df_ci_log.loc["std", :], df_ci_log.loc["count", :])]
#rp.set_axis_labels("layer", "Loss", labelpad=10)
#quant.loc[1, Idx["loss", :, 0]].lineplot(x="layer_ids", y="value", hue="")
for i, stat in enumerate(["loss","error" ]):
for j, setn in enumerate(["train","test"]):
if stat == "loss" and setn=="test":
continue
if stat == "error" and setn=="train":
continue
# axes[k] = rp.axes[j,i]
log_plot = args.yscale == "log" and setn == "train"
if split:
fig, ax = plt.subplots(1, 1, figsize=(4, 4), sharex=False)
else:
ax = axes.flatten()[k]
if log_plot:
df_plot = quant_log.loc[:, Idx[:, stat, setn, :]]
df_ci_plot = df_ci_log
else:
df_plot = quant.loc[:, Idx[:, stat, setn, :]]#.min(axis=0).to_frame(name="value")
df_ci_plot = df_ci
df_plot = pd.melt(df_plot.reset_index(), id_vars="var")
lp = sns.lineplot(
#data=rel_losses.min(axis=0).to_frame(name="loss"),
data=df_plot,
#hue="width",
hue="experiment",
hue_order=keys,
x="layer",
y="value",
legend=None,
# style='set',
ci=None,
palette=palette,
#style='layer',
markers=False,
ax=ax,
dashes=True,
# linewidth=3.,
#legend_out=True,
#y="value",
)
lp.set(xticks=range(0, len(xlabels)))
# rp.set_xticklabels(xlabels)
# rp.axes[0,0].locator_params(axis='x', nbins=len(xlabels))
lp.set_xticklabels(xlabels)#, rotation=40*(is_vgg))
for j, exp in enumerate(keys):
xs =quant.loc[:, Idx[exp, stat, setn, :]].columns.get_level_values('layer').unique()
df_ci_pplot = df_ci_plot.loc[:, Idx[exp, stat, setn, xs]]
ax.fill_between(xs, df_ci_pplot.loc["ymax",:].values, df_ci_pplot.loc["ymin", :].values, color=ax.lines[j].get_color(), alpha=0.3)
# else:
# lp.set_xticklabels(len(xlabels)*[None])
if not split:
ax.set_title("{} {}{}".format(setn.title()+(setn=="train")*"ing", stat.title(), " (%)" if stat=="error" else ''))
# ylabel = stat if stat == "loss" else "error (%)"
ax.set_xlabel("layer index l")
ax.set_ylabel(None)
if setn == "test":
ax.set_ylim(df_plot["value"].min(), df_plot["value"].max())
if log_plot: # set the axis in power of 10 values
ax.get_yaxis().get_major_formatter().set_useMathText(True)
ax.get_yaxis().set_major_formatter(lambda x, pos: "$10^{" + f"{int(x)}" + "}$")
if has_ref:
# data_ref = quant_ref[stat, setn].reset_index()
if not log_plot:
ax.axline((0,quant_ref[stat, setn][0]), (1, quant_ref[stat, setn][0]), ls=":", zorder=2, c='g') # for the mean
y1 = quant_ref.loc['mean', (stat, setn)] + quant_ref.loc['ci95', (stat, setn)]#quant_ref.loc['std', (stat, setn)] #
y2 = quant_ref.loc['mean', (stat, setn)] - quant_ref.loc['ci95', (stat, setn)] #quant_ref.loc['ci95', (stat, setn)]
ax.axhspan(y1, y2, facecolor='g', alpha=0.5)
else:
ax.axline((0,quant_ref_log[stat, setn][0]), (1, quant_ref_log[stat, setn][0]), ls=":", zorder=2, c='g') # for the mean
y1 = quant_ref_log.loc['mean', (stat, setn)] + quant_ref_log.loc['ci95', (stat, setn)]#quant_ref_log.loc['std', (stat, setn)] #
y2 = quant_ref_log.loc['mean', (stat, setn)] - quant_ref_log.loc['ci95', (stat, setn)] #quant_ref_log.loc['ci95', (stat, setn)]
ax.axhspan(y1, y2, facecolor='g', alpha=0.5)
# data_ref.index = pd.Index(range(len(data_ref)))
# ax=ax,
# if setn == "train":
# ax.set_yscale(args.yscale)
if split:
# if k == 1:
labels=keys + has_ref*["ref."]
if setn == "test": # reset the name (not log)
logstr = ""
fig.legend(handles=ax.lines, labels=labels,
# title="Exp.",
loc="upper right", borderaxespad=0, bbox_to_anchor=(0.9,0.9))#, bbox_transform=fig.transFigure)
# fig.tight_layout()
plt.margins()
plt.savefig(fname=os.path.join(output_root, f"{setn}_{stat}{logstr}.pdf"), bbox_inches='tight')
k += 1
# fig.subplots_adjust(top=0.85)
# if is_vgg:
if not split:
labels=keys + has_ref*["ref."]
fig.legend(handles=ax.lines, labels=labels,
# title="Exp.",
loc="upper right", borderaxespad=0, bbox_to_anchor=(0.9,0.9))#, bbox_transform=fig.transFigure)
fig.tight_layout()
# plt.margins()
fig.savefig(fname=os.path.join(output_root, f"train_loss_test_error{logstr}.pdf"), bbox_inches='tight')
k=0
# sns.set(font_scale=1,rc={"lines.linewidth":3})
fig, axes = plt.subplots(1, 1, figsize=(4, 4), sharex=False)
for i, stat in enumerate(["error"]):
for j, setn in enumerate(["train"]):
if stat == "loss" and setn=="test":
continue
if stat=="error" and setn=="test":
continue
# axes[k] = rp.axes[j,i]
ax = axes
# df_plot = quant.loc[:, Idx[:, stat, setn, :]].min(axis=0).to_frame(name="value")
df_plot = quant.loc[:, Idx[:, stat, setn, :]]#.min(axis=0).to_frame(name="value")
df_plot = pd.melt(df_plot.reset_index(), id_vars="var")
lp = sns.lineplot(
#data=rel_losses.min(axis=0).to_frame(name="loss"),
data=df_plot,
#hue="width",
hue="experiment",
hue_order=keys,
x="layer",
y="value",
legend=None,
# style='set',
ci=95,
palette=palette,
#style='layer',
markers=False,
ax=ax,
dashes=True,
#legend_out=True,
#y="value",
)
lp.set(xticks=range(0, len(xlabels)))
# rp.set_xticklabels(xlabels)
# rp.axes[0,0].locator_params(axis='x', nbins=len(xlabels))
# rp.axes[0,1].locator_params(axis='x', nbins=len(xlabels))
lp.set_xticklabels(xlabels)#, rotation=40*(is_vgg))
if not split:
ax.set_title("{} {}{}".format(setn.title()+(setn=="train")*'ing', stat.title(), " (%)" if stat=="error" else ''))
# ylabel = stat if stat == "loss" else "error (%)"
ax.set_xlabel("layer index l")
ax.set_ylabel(None)
if setn == "train":
ax.set_yscale(args.yscale)
if quant_ref is not None:
# data_ref = quant_ref[stat, setn].reset_index()
ax.axline((0,quant_ref[stat, setn][0]), (1,quant_ref[stat, setn][0]), ls=":", zorder=2, c='g')
# data_ref.index = pd.Index(range(len(data_ref)))
# sns.lineplot(
# data=data_ref, # repeat the datasaet N_L times
# ax=ax,
# # x=range(len(data_ref)),
# # y="value",
# # xc np.tile(np.linspace(1, N_L, num=N_L), 2),
# # x='',
# # hue='r',
# # color='red',
# palette=['red'],
# # style='set',
# # x='index',
# # dashes=True,
# legend=False,
# # y="value"
# )
# for ax in ax.lines[-1:]: # the last two
# ax.set_linestyle('--')
k += 1
# fig.subplots_adjust(top=0.85)
# if is_vgg:
labels=keys + ["ref."]
fig.legend(handles=ax.lines, labels=keys,
#title="Exp.",
loc="upper right", bbox_to_anchor=(0.9,0.9),borderaxespad=0)#, bbox_transform=fig.transFigure)
plt.margins()
plt.savefig(fname=os.path.join(output_root, f"error_train{logstr}.pdf"), bbox_inches='tight')
if "B" in keys:
df_B = quant["B"]
elif "B2" in keys:
df_B = quant["B2"]
else:
return
n_draws = len(df_B.index)
# vary_draw=copy.deepcopy(df_B)
df_B_plot = pd.melt(df_B.reset_index(), id_vars="var")
cp = sns.FacetGrid(
data=df_B_plot,
# hue="experiment",
# hue_order=["A", "B"],
col="stat",
col_order=["loss", "error"],
row="set",
row_order=["train", "test"],
# x="layer",
# y="value",
# kind='line',
# legend="full",
# style='set',
# ci='sd',
# palette=palette,
#style='layer',
# markers=False,
# dashes=True,
#legend_out=True,
# facet_kws={
sharey= False,
sharex= True,
#y="value",
)
styles=['dotted', 'dashed', 'dashdot', 'solid']
# for i_k, k in enumerate([10, 50, 100, 200]):
draws = len(df_B.index)
df_bound =
|
pd.DataFrame(columns=df_B.columns)
|
pandas.DataFrame
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import explained_variance_score
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
from sklearn.neighbors import KNeighborsRegressor
from scipy.interpolate import interp1d
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
# -------------------------------------------------------------------------------- 5.1 Approximation Demand and Supply
# ---------- Demand and Supply Functions ----------
def demand(p):
"""Vectorized Function to determine *demand*.
Args:
p (np.array): Price vector for demand.
Raises:
ValueError: Argument p has to be an array.
AssertionError: Type of q and p has to be identical.
Returns:
np.array: Returns demand quantity.
"""
if not isinstance(p, np.ndarray):
raise TypeError("Price vector has to be an array!")
r = np.random.rand() * 2
n = abs(np.random.randn()) * 2
q = (
40 / (p + n)
+ 1 / (1 + np.exp(p - 75 + r))
+ 2 / (1 + np.exp(p - 50 + r))
+ 3 / (1 + np.exp(p - 25 + r))
)
q[q > 20] = np.nan
assert type(q) == type(p), "Type of output does not equal type of input!"
return q
def supply(p):
"""Vectorized Function to determine *supply.*
Args:
p (np.array): Price vector for supply.
Raises:
ValueError: Argument p has to be an array.
AssertionError: Type of q and p has to be identical.
Returns:
np.array: Returns supply quantity.
"""
if not isinstance(p, np.ndarray):
raise TypeError("Price vector has to be an array!")
q = np.zeros(p.shape)
for i, c in enumerate(p):
if (c > 0) and (c < 10):
q[i] = 1.0
elif (c >= 10) and (c < 20):
q[i] = 1.5
elif (c >= 20) and (c < 25):
q[i] = 3.0
elif (c >= 25) and (c < 35):
q[i] = 3.6
elif (c >= 35) and (c < 45):
q[i] = 4.2
elif (c >= 45) and (c < 60):
q[i] = 5.0
elif (c >= 60) and (c < 75):
q[i] = 8.0
elif (c >= 75) and (c < 85):
q[i] = 12.0
elif (c >= 85) and (c < 90):
q[i] = 16.5
elif (c >= 90) and (c < 95):
q[i] = 18.5
elif c >= 95:
q[i] = 20.0
assert type(q) == type(p), "Type of output does not equals type of input!"
return q
# ---------- Approximation using scipy ----------
class PolynomialDS:
"""Object that approximates supply and demand functions using sicpy
interpolate method.
Args:
a (int): Lower bound of prices.
b (int): Upper bound of prices.
nodes (int): Interpolation nodes for demand and supply.
demand (function): Benchmark function supply.
supply (function): Benchmark function demand.
Raises:
AssertionError: Price must be non-negative.
AssertionError: By Assumption: price cannot exceed 100.
"""
def __init__(self, a, b, nodes, demand, supply):
"""Constructor method.
"""
self.a = a
self.b = b
assert a >= 0, "Price cannot be negative!"
assert (b > a) and (b <= 100), "By Assumption: Price cannot exceed 100!"
self.nodes = nodes
self.demand = demand
self.supply = supply
self.p = np.linspace(a, b, nodes)
self.qd = demand(self.p)
self.qs = supply(self.p)
def __len__(self):
"""Returns number of interpolation nodes.
Returns:
int: Number of known prices.
"""
return len(self.p)
def __repr__(self):
"""String representation of object.
"""
p = np.around(self.p, decimals=2)
qd = np.around(self.qd, decimals=2)
qs = np.around(self.qs, decimals=2)
return f"{len(self)} known values for Demand and Supply:\n\nPrices={p} \n\nDemand={qd} \nSupply={qs}"
def __call__(self, p):
"""Returns true and approximated value of demand and supply for a
given price.
Args:
p (np.array): Price vector.
Returns:
: Comparison.
"""
self.apprx_qd = interp1d(self.p, self.qd)
self.apprx_qs = interp1d(self.p, self.qs)
return f"-- Real value -- at price {p}: \n\nDemand = {self.demand(p)} \nSupply = {self.supply(p)} \n\n-- Approximated value -- at price {p}: \n\nDemand = {self.apprx_qd(p)} \nSupply = {self.apprx_qs(p)}"
@staticmethod
def __name__():
"""Returns the name of the object.
"""
return "Demand and Supply Interpolator"
def plt_approx(self, fs=(14, 7), num1=16.1, num2=16.2, num3=16.3, num4=16.4):
"""Plots Approximation and true supply as well as demand.
Args:
fs (tuple, optional): Figuresize. Defaults to (14, 7).
num1 (float, optional): Number first figure. Defaults to 16.1.
num2 (float, optional): Number second figure. Defaults to 16.2.
num3 (float, optional): Number third figure. Defaults to 16.3.
num4 (float, optional): Number fourth figure. Defaults to 16.4.
"""
prices = np.linspace(self.a, self.b, self.nodes * 150)
apprx_qd = self.apprx_qd(prices)
apprx_qs = self.apprx_qs(prices)
qd = self.demand(prices)
qs = self.supply(prices)
fig, (ax1, ax2) = plt.subplots(2, 2, figsize=fs)
ax1[0].plot(self.qd, self.p, "o", label="Nodes Demand", color="#4B045D")
ax1[0].plot(
apprx_qd, prices, label="Interpolation Demand", ls="--", color="#8E0C08"
)
ax1[0].plot(qd, prices, label="Real Demand", alpha=0.7, color="#D98D08")
ax1[0].set_title(f"Figure {num1}: Approximation of Demand")
ax1[0].legend(loc="center right")
ax1[0].grid()
ax1[1].plot(self.qs, self.p, "o", label="Nodes Supply", color="#4B045D")
ax1[1].plot(
apprx_qs, prices, label="Interpolation Supply", ls="--", color="#0C5BCD"
)
ax1[1].plot(qs, prices, label="Real Supply", alpha=0.7, color="#67853E")
ax1[1].set_title(f"Figure {num2}: Approximation of Supply")
ax1[1].legend(loc="center right")
ax1[1].grid()
ax2[0].plot(
apprx_qd, prices, label="Interpolation Demand", ls="--", color="#8E0C08"
)
ax2[0].plot(
apprx_qs, prices, label="Interpolation Supply", ls="--", color="#0C5BCD"
)
ax2[0].set_title(f"Figure {num3}: Approximated Demand and Supply")
ax2[0].legend(loc="center right")
ax2[0].grid()
ax2[1].plot(qd, prices, label="Real Demand", color="#D98D08")
ax2[1].plot(qs, prices, label="Real Supply", color="#67853E")
ax2[1].set_title(f"Figure {num4}: True Demand and Supply")
ax2[1].legend(loc="center right")
ax2[1].grid()
plt.show()
abs_error_qd = np.array(abs(qd - apprx_qd))
abs_error_qd = abs_error_qd[~np.isnan(abs_error_qd)]
abs_error_qs = np.array(abs(qs - apprx_qs))
print(
f"Mean Absolute Error: \n\nDemand = {abs_error_qd.mean():.4f} \nSupply = {abs_error_qs.mean():.4f}"
)
def close_intersection(self, nodes=1000000):
"""Returns true and approximated market equilibrium.
Args:
nodes (int, optional): Number of interpolation nodes. Defaults to 1000000.
"""
prices = np.linspace(self.a, self.b, nodes)
f = lambda p: self.demand(p) - self.supply(p)
abs_sd = f(prices)
abs_sd = abs_sd[~np.isnan(abs_sd)]
argmin = abs(abs_sd).argmin()
pe = prices[argmin]
qe_demand = np.around(demand(np.array([pe])), decimals=3)
qe_supply = np.around(supply(np.array([pe])), decimals=3)
g = lambda p: self.apprx_qd(p) - self.apprx_qs(p)
abs_asd = f(prices)
abs_asd = abs_asd[~np.isnan(abs_asd)]
argmin_a = abs(abs_asd).argmin()
pea = prices[argmin_a]
aqe_demand = np.around(self.apprx_qd(np.array([pea])), decimals=3)
aqe_supply = np.around(self.apprx_qs(np.array([pea])), decimals=3)
print(
f"Equilibrium True (Quantity, Price) \n*** *** *** *** \nDemand: {(qe_demand[0], np.around(pe, decimals=3))} \nSupply: {(qe_supply[0], np.around(pe, decimals=3))}\n"
)
print(
f"Equilibrium Approximation (Quantity, Price) \n*** *** *** *** \nDemand: {(aqe_demand[0], np.around(pea, decimals=3))} \nSupply: {(aqe_supply[0], np.around(pea, decimals=3))}"
)
# ---------- Approximation using ML ----------
class AISupplyDemandApprox:
"""Object that approximates supply and demand using various ML methods.
Args:
nodes (int): Number of known nodes.
supply (function): Unknown supply function.
demand (function): Unknown demand function.
a (int, optional): Lower bound of prices. Defaults to 0.
b (int, optional): Upper bound of prices. Defaults to 100.
ts (float, optional): Size of testing data. Defaults to 0.4.
rs (int, optional): Random state. Defaults to 42.
Raises:
AssertionError: Price must be non-negative.
AssertionError: Training data includes nan values.
AssertionError: Testing data includes nan values.
"""
def __init__(self, nodes, supply, demand, a=0, b=100, ts=0.4, rs=42):
"""Constructor method.
"""
assert a >= 0, "Price must be Non Negative!"
p = np.linspace(a, b, nodes)
q = supply(p)
qd = demand(p)
p_train, p_test, q_train, q_test = train_test_split(
p, q, test_size=ts, random_state=rs
)
pd_train, pd_test, qd_train, qd_test = train_test_split(
p, qd, test_size=ts, random_state=rs
)
self.p_train = p_train.reshape(-1, 1) # reshape data
self.p_test = p_test.reshape(-1, 1) # reshape data
self.q_train = q_train.reshape(-1, 1) # reshape data
self.q_test = q_test.reshape(-1, 1) # reshape data
nan_ind = np.argwhere(np.isnan(qd_train)) # select index of nan values
qd_train_mod = np.delete(qd_train, nan_ind) # delete nan index value
pd_train_mod = np.delete(pd_train, nan_ind)
self.pd_train = pd_train_mod.reshape(-1, 1)
self.pd_test = pd_test.reshape(-1, 1)
self.qd_train = qd_train_mod.reshape(-1, 1)
self.qd_test = qd_test.reshape(-1, 1)
assert np.isnan(self.pd_train).all() == False, "There are nan Values!"
assert np.isnan(self.pd_test).all() == False, "There are nan Values!"
@staticmethod
def __name__():
"""Returns name of AISupplyDemandApprox object.
"""
return "Modern-ML Demand and Supply Interpolator"
def plots(
self,
colors=["teal", "yellowgreen", "gold"],
label=["Training Values", "Testing Values"] * 2,
markers=["x", "*", "v"],
n_neighbors=4,
degrees=[3, 6],
weight="distance",
fs=(15, 10),
num1=17.1,
num2=17.2,
num3=17.3,
num4=17.4,
):
"""Plots approximation results as well as training and testing data.
Args:
colors (list, optional): Colors of approximation results. Defaults
to ["teal", "yellowgreen", "gold"].
label (list, optional): Labels of training and testing data.
Defaults to ["Training Values", "Testing Values"]*2.
markers (list, optional): Markers of approximation. Defaults
to ["x", "*", "v"].
n_neighbors (int, optional): Number of k-nearest neighbors. Defaults to 4.
degrees (list, optional): Number of degrees for Linear Regression.
Defaults to [3, 6].
weight (str, optional): Weight of KNN Regression. Defaults to "distance".
fs (tuple, optional): Figuresize. Defaults to (15, 10)
num1 (float, optional): Number of first Figure. Defaults to 17.1.
num2 (float, optional): Number of second Figure. Defaults to 17.2.
num3 (float, optional): Number of third Figure. Defaults to 17.3.
num4 (float, optional): Number of fourth Figure. Defaults to 17.4.
Raises:
AssertionError: Length of degrees is out of range.
"""
self.degrees = degrees
assert len(degrees) == 2, "List out of range!"
qsup, psup = [self.q_train, self.q_test], [self.p_train, self.p_test]
qdem, pdem = [self.qd_train, self.qd_test], [self.pd_train, self.pd_test]
fig, (ax1, ax2) = plt.subplots(2, 2, figsize=fs)
for i, (qs, ps, qd, pd) in enumerate(zip(qsup, psup, qdem, pdem)):
for ax in [ax1[0], ax1[1]]:
ax.plot(qs, ps, "o", ms=4, label=label[i])
for ax in [ax2[0], ax2[1]]:
ax.plot(qd, pd, "o", ms=4, label=label[i])
self.maes, self.maed = [], []
self.mses, self.msed = [], []
self.evss, self.evsd = [], []
self.r2s, self.r2d = [], []
for i, ax in enumerate([ax1, ax2]):
for j, d in enumerate(degrees):
model = make_pipeline(PolynomialFeatures(d), LinearRegression())
if i == 0:
model.fit(self.p_train, self.q_train)
pred = model.predict(self.p_test)
ax[i].plot(
pred,
self.p_test,
markers[j],
color=colors[j],
ms=5,
label=f"Approximation Degree {d}",
)
indexs_to_order_by = pred.ravel().argsort()
pred_ordered = pred[indexs_to_order_by]
ptest_ordered = self.p_test.ravel()[indexs_to_order_by]
ax[i].plot(pred_ordered, ptest_ordered, color=colors[j], alpha=0.5)
ax[i].set_title(
f"Figure {num1}: Linear Regression Approximation Supply"
)
ax[i].grid(True)
ax[i].legend(loc="center right")
self.maes.append(mean_absolute_error(pred, self.q_test))
self.mses.append(mean_squared_error(pred, self.q_test))
self.evss.append(explained_variance_score(pred, self.q_test))
self.r2s.append(r2_score(pred, self.q_test))
elif i == 1:
model.fit(self.pd_train, self.qd_train)
pred = model.predict(self.pd_test)
ax[i - 1].plot(
pred,
self.pd_test,
markers[j],
color=colors[j],
ms=5,
label=f"Approximation Degree {d}",
)
indexs_to_order_by = pred.ravel().argsort()
pred_ordered = pred[indexs_to_order_by]
ptest_ordered = self.pd_test.ravel()[indexs_to_order_by]
ax[i - 1].plot(
pred_ordered, ptest_ordered, color=colors[j], alpha=0.5
)
ax[i - 1].set_title(
f"Figure {num3}: Linear Regression Approximation Demand"
)
ax[i - 1].grid(True)
ax[i - 1].legend(loc="center right")
self.maed.append(mean_absolute_error(pred, self.qd_test))
self.msed.append(mean_squared_error(pred, self.qd_test))
self.evsd.append(explained_variance_score(pred, self.qd_test))
self.r2d.append(r2_score(pred, self.qd_test))
methods = ["KNN", "DecisionTree"]
knn = KNeighborsRegressor(n_neighbors, weights=weight)
tree = DecisionTreeRegressor()
for i, ax in enumerate([ax1, ax2]):
for j, m in enumerate([knn, tree]):
if i == 0:
m.fit(self.p_train, self.q_train)
pred = m.predict(self.p_test)
ax[i + 1].plot(
pred,
self.p_test,
markers[j],
color=colors[j],
ms=4,
label=f"Approximation using {methods[j]}",
)
indexs_to_order_by = pred.ravel().argsort()
pred_ordered = pred[indexs_to_order_by]
ptest_ordered = self.pd_test.ravel()[indexs_to_order_by]
ax[i + 1].plot(
pred_ordered, ptest_ordered, color=colors[j], alpha=0.5
)
ax[i + 1].set_title(
f"Figure {num2}: KNN and DT Approximation Supply"
)
ax[i + 1].grid(True)
ax[i + 1].legend(loc="center right")
self.maes.append(mean_absolute_error(pred, self.q_test))
self.mses.append(mean_squared_error(pred, self.q_test))
self.evss.append(explained_variance_score(pred, self.q_test))
self.r2s.append(r2_score(pred, self.q_test))
elif i == 1:
m.fit(self.pd_train, self.qd_train)
pred = m.predict(self.pd_test)
ax[i].plot(
pred,
self.pd_test,
markers[j],
color=colors[j],
ms=4,
label=f"Approximation using {methods[j]}",
)
indexs_to_order_by = pred.ravel().argsort()
pred_ordered = pred[indexs_to_order_by]
ptest_ordered = self.pd_test.ravel()[indexs_to_order_by]
ax[i].plot(pred_ordered, ptest_ordered, color=colors[j], alpha=0.5)
ax[i].set_title(f"Figure {num4}: KNN and DT Approximation Demand")
ax[i].grid(True)
ax[i].legend(loc="center right")
self.maed.append(mean_absolute_error(pred, self.qd_test))
self.msed.append(mean_squared_error(pred, self.qd_test))
self.evsd.append(explained_variance_score(pred, self.qd_test))
self.r2d.append(r2_score(pred, self.qd_test))
plt.show()
def reslts_as_frame(self, num=14):
"""Returns accuracy of approximation using ML.
Args:
num (int, float, optional): Number of dataframe. Defaults to 14.
Returns:
pd.DataFrame: Accuracy of approximation.
"""
d1, d2 = self.degrees[0], self.degrees[1]
index_as_array_sup = [
np.array(["Supply"] * 4),
np.array(["Linear Regression"] * 2 + ["KNN Regression", "DTR"]),
np.array([f"{d1} Degrees", f"{d2} Degrees", "", ""]),
]
index_as_array_dem = [
np.array(["Demand"] * 4),
np.array(["Linear Regression"] * 2 + ["KNN Regression", "DTR"]),
np.array([f"{d1} Degrees", f"{d2} Degrees", "", ""]),
]
col = [
"Mean Absolute Error",
"Mean Squared Error",
"Explained Variance Score",
"$R^2$-Score",
]
data_supply = pd.concat(
[
pd.DataFrame(self.maes, index=index_as_array_sup),
pd.DataFrame(self.mses, index=index_as_array_sup),
pd.DataFrame(self.evss, index=index_as_array_sup),
|
pd.DataFrame(self.r2s, index=index_as_array_sup)
|
pandas.DataFrame
|
import pandas as pd
from pathlib import Path
""" This module contains functions to access training, validation and testing data
for the river flow (discharge) of the Dranse river in Bioge, France.
Hourly values for the years 2016, 2017, 2018, and 2019.
"""
def get_yearly_flow(year=2016):
""" Reading text files with yearly data.
:return: A DataFrame with DateTimeIndex and the discharge values
"""
file_path = (Path(__file__).parent / f"../data/hourly_flows_2016-19/flows-{year}.txt").resolve()
flow_df = pd.read_csv(file_path, delimiter='\t').drop(columns='V')
flow_df.columns = ['datetime', 'discharge']
flow_df['datetime'] = pd.to_datetime(flow_df['datetime'], dayfirst=True)
flow_df = flow_df.set_index('datetime')
return flow_df
def get_combined_flow():
""" Combining dataframes from individual years into one dataframe.
:return: a combined dataframe, with interpolated missing values.
"""
dfs = []
for year in range(2016, 2021):
dfs.append(get_yearly_flow(year=year))
flow_df = pd.concat(dfs)
flow_df = flow_df.asfreq('H').interpolate()
return flow_df
def get_combined_flow_split():
"""
Splitting the combined data of four years into training, validation and test sets.
:return: three dataframes
"""
flow_df = get_combined_flow()
train = flow_df.loc[flow_df.index < pd.to_datetime('2018-07-01 00:00:00')]
validation = flow_df.loc[(flow_df.index >
|
pd.to_datetime('2018-07-01 00:00:00')
|
pandas.to_datetime
|
# -*- encoding:utf-8 -*-
import pandas as pd
import numpy as np
import datetime
# from datetime import datetime
dire = '../../data/'
start = datetime.datetime.now()
orderHistory_train = pd.read_csv(dire + 'train/orderHistory_train.csv', encoding='utf-8')
orderFuture_train = pd.read_csv(dire + 'train/orderFuture_train.csv', encoding='utf-8')
userProfile_train = pd.read_csv(dire + 'train/userProfile_train.csv', encoding='utf-8')
userComment_train = pd.read_csv(dire + 'train/userComment_train.csv', encoding='utf-8')
action_train = pd.read_csv(dire + 'train/insert_action_train2.csv', encoding='utf-8')
city = pd.read_csv(dire + 'train/city.csv', encoding='utf-8')
orderHistory_test = pd.read_csv(dire + 'test/orderHistory_test.csv', encoding='utf-8')
orderFuture_test = pd.read_csv(dire + 'test/orderFuture_test.csv', encoding='utf-8')
userProfile_test = pd.read_csv(dire + 'test/userProfile_test.csv', encoding='utf-8')
userComment_test = pd.read_csv(dire + 'test/userComment_test.csv', encoding='utf-8')
action_test = pd.read_csv(dire + 'test/insert_action_test2.csv', encoding='utf-8')
# """
############# 3.action feature_3 #############
"""
# 1. 全部浏览记录中0-9出现的次数
# 2. 对应浏览记录中0-9出现的次数
# 3. 全部浏览记录浏览时间
# 4. 对应浏览记录浏览时间
# 5. 对应浏览记录是否出现5 6
# """
# 全部浏览记录中0-9出现的次数
def count_56789(orderFuture, action):
action_1 = action[action['actionType'] == 1]
action_2 = action[action['actionType'] == 2]
action_3 = action[action['actionType'] == 3]
action_4 = action[action['actionType'] == 4]
action_5 = action[action['actionType'] == 5]
action_6 = action[action['actionType'] == 6]
action_7 = action[action['actionType'] == 7]
action_8 = action[action['actionType'] == 8]
action_9 = action[action['actionType'] == 9]
action_1 = action_1.groupby(action_1.userid)['actionType'].count().reset_index() # 每个用户1操作的总数
action_2 = action_2.groupby(action_2.userid)['actionType'].count().reset_index() # 每个用户2操作的总数
action_3 = action_3.groupby(action_3.userid)['actionType'].count().reset_index() # 每个用户3操作的总数
action_4 = action_4.groupby(action_4.userid)['actionType'].count().reset_index() # 每个用户4操作的总数
action_5 = action_5.groupby(action_5.userid)['actionType'].count().reset_index() # 每个用户5操作的总数
action_6 = action_6.groupby(action_6.userid)['actionType'].count().reset_index() # 每个用户6操作的总数
action_7 = action_7.groupby(action_7.userid)['actionType'].count().reset_index() # 每个用户7操作的总数
action_8 = action_8.groupby(action_8.userid)['actionType'].count().reset_index() # 每个用户8操作的总数
action_9 = action_9.groupby(action_9.userid)['actionType'].count().reset_index() # 每个用户9操作的总数
action_all = action.groupby(action.userid)['actionType'].count().reset_index() # 每个用户 操作的总数
action_1.rename(columns={'actionType': 'action_1'}, inplace=True)
action_2.rename(columns={'actionType': 'action_2'}, inplace=True)
action_3.rename(columns={'actionType': 'action_3'}, inplace=True)
action_4.rename(columns={'actionType': 'action_4'}, inplace=True)
action_5.rename(columns={'actionType': 'action_5'}, inplace=True)
action_6.rename(columns={'actionType': 'action_6'}, inplace=True)
action_7.rename(columns={'actionType': 'action_7'}, inplace=True)
action_8.rename(columns={'actionType': 'action_8'}, inplace=True)
action_9.rename(columns={'actionType': 'action_9'}, inplace=True)
action_all.rename(columns={'actionType': 'action_all'}, inplace=True)
orderFuture = pd.merge(orderFuture, action_1, on='userid', how='left')
orderFuture = pd.merge(orderFuture, action_2, on='userid', how='left')
orderFuture =
|
pd.merge(orderFuture, action_3, on='userid', how='left')
|
pandas.merge
|
import os
from loguru import logger
import googlemaps
import pandas as pd
geo = os.getenv("LTC_FACILITIES_GEOCODED_CSV")
if geo is None:
raise ValueError("you must set a value for the LTC_FACILITIES_GEOCODED_CSV env variable")
ltc_geo = pd.read_csv(geo)
google_key = os.getenv("GOOGLE_API_KEY")
if google_key is None:
raise ValueError("you must set a value for the GOOGLE_API_KEY env variable")
gmaps = googlemaps.Client(key=google_key)
# hits the google reverse geocoding api to find an address from lat/lon for each facility
# used for states that do not report city and county per facility
def rev_geocode(record):
if(record['lat'] == '' or record['lon'] == ''):
return record
if(record['geocoded_city'] != '' or record['geocoded_county'] != ''):
return record
latlon = (record['lat'],record['lon'])
try:
result = gmaps.reverse_geocode(latlon)
except Exception as err:
logger.error("geocode call failed for facility %s with error: %s" % (record['facility_name'], err))
return record
if not result:
logger.error("could not find geocode result for facility %s" % record['facility_name'])
return record
g = result[0]
if not 'address_components' in g:
logger.error("could not find reverse geocode results for facility %s" % record['facility_name'])
return record
address_components = g.get('address_components')
city_results = [a for a in address_components if a["types"] == ['locality', 'political']]
city = city_results[0] if city_results else {}
county_results = [a for a in address_components if a["types"] == ['administrative_area_level_2', 'political']]
county = county_results[0] if county_results else {}
record['geocoded_city'] = city.get('long_name')
record['geocoded_county'] = county.get('long_name')
return record
def main():
# these states do not provide facility location information, so we wish to capture city and county during geocoding
rev_states =['NM', 'UT', 'IN', 'SC', 'OK', 'VT']
df = ltc_geo
df = df.fillna(value='')
to_rev = df[df['state'].isin(rev_states)]
rev = to_rev.apply(rev_geocode, axis = 1)
not_rev = df[~df['state'].isin(rev_states)]
frames = [rev, not_rev]
df =
|
pd.concat(frames)
|
pandas.concat
|
"""
steep.py
Computes similarity (Pearson or Spearman correlation).
If given 1 gct, steep will compute all pairwise similarities between its columns.
If given 2 gcts, steep will compute pairwise similarities between the columns of
gct1 and the columns of gct2 (samples from gct1 will be in the rows).
Required input is a path to a gct file. Output is a gct file containing a
similarity matrix.
"""
import sys
import logging
import pandas as pd
import argparse
import merino.setup_logger as setup_logger
import cmapPy.pandasGEXpress.GCToo as GCToo
import cmapPy.pandasGEXpress.parse as parse
import cmapPy.pandasGEXpress.write_gct as wg
__author__ = "<NAME>"
__email__ = "<EMAIL>"
logger = logging.getLogger(setup_logger.LOGGER_NAME)
SIMILARITY_METRIC_FIELD = "similarity_metric"
def build_parser():
"""Build argument parser."""
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Required args
parser.add_argument("--in_gct_path", "-i", required=True, help="path to input gct file")
# Optional args
parser.add_argument("--in_gct2_path", "-i2", help="path to second gct file")
parser.add_argument("--out_name", "-o", default="steep_output.gct",
help="what to name the output similarity file")
parser.add_argument("--similarity_metric", "-s", default="spearman",
choices=["spearman", "pearson"],
help="similarity metric to use for comparing columns")
parser.add_argument("--verbose", "-v", action="store_true", default=False,
help="whether to increase the # of messages reported")
return parser
def main(args):
# Read in the first gct
gct1 = parse(args.in_gct_path, convert_neg_666=False, make_multiindex=True)
# If second gct provided, compute similarity between 2 gcts
if args.in_gct2_path is not None:
logger.info("in_gct2_path was provided. Will compute pairwise similarities " +
"between the columns of in_gct and in_gct2.")
# Read in the second gct
gct2 = parse(args.in_gct2_path, convert_neg_666=False, make_multiindex=True)
# Compute similarities between gct1 and gct2
out_df = compute_similarity_bw_two_dfs(gct1.data_df, gct2.data_df, args.similarity_metric)
# Row metadata is from gct1, column metadata is from gct2
row_metadata_df = gct1.col_metadata_df
col_metadata_df = gct2.col_metadata_df
# Append column to both metadata_dfs indicating which similarity_metric was used
row_metadata_df[SIMILARITY_METRIC_FIELD] = args.similarity_metric
col_metadata_df[SIMILARITY_METRIC_FIELD] = args.similarity_metric
# Assemble output gct
out_gct = GCToo.GCToo(out_df, row_metadata_df, col_metadata_df)
# If only 1 gct provided, compute similarities between the columns of gct1
else:
out_df = compute_similarity_within_df(gct1.data_df, args.similarity_metric)
# Row and column metadata are both from gct1
metadata_df = gct1.col_metadata_df
# Append column to metadata_df indicating which similarity_metric was used
metadata_df[SIMILARITY_METRIC_FIELD] = args.similarity_metric
# Assemble output gct
out_gct = GCToo.GCToo(out_df, metadata_df, metadata_df)
# Write output gct
wg.write(out_gct, args.out_name, data_null="NaN", metadata_null="NA", filler_null="NA")
def compute_similarity_bw_two_dfs(df1, df2, similarity_metric):
""" Compute similarity between the columns of df1 and the columns of df2.
The dfs are concated, all pairwise similarities are computed, and then only
the requested ones (namely between df1 and df2, not within df1 or within
df2) are returned. This can almost certainly be implemented more
efficiently, but this method is faster than iterating over columns with a
for-loop.
Args:
df1 (pandas df): size = m x n1
df2 (pandas df): size = m x n2
similarity_metric (string): "pearson" or "spearman"
Returns:
out_df (pandas df): size = n1 x n2
"""
# Concatenate the matrices together
df_concat =
|
pd.concat([df1, df2], axis=1)
|
pandas.concat
|
"""
MIT License
Copyright (c) 2019 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import pandas as pd
from sklearn.inspection import partial_dependence
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import r2_score
from sklearn import svm
from sklearn.datasets import load_boston
from articles.pd.support import load_rent, load_bulldozer, load_flights, \
toy_weather_data, toy_weight_data, \
df_cat_to_catcode, df_split_dates, \
df_string_to_cat, synthetic_interaction_data
from stratx import plot_stratpd, plot_catstratpd, \
plot_stratpd_gridsearch, plot_catstratpd_gridsearch
from stratx.partdep import partial_dependence
from stratx.plot import marginal_plot_, plot_ice, plot_catice
from stratx.ice import predict_ice, predict_catice, friedman_partial_dependence
import inspect
import matplotlib.patches as mpatches
from collections import OrderedDict
import matplotlib.pyplot as plt
import os
import shap
import xgboost as xgb
from colour import rgb2hex, Color
from dtreeviz.trees import tree, ShadowDecTree
figsize = (2.5, 2)
figsize2 = (3.8, 3.2)
GREY = '#444443'
# This genfigs.py code is just demonstration code to generate figures for the paper.
# There are lots of programming sins committed here; to not take this to be
# our idea of good code. ;)
# For data sources, please see notebooks/examples.ipynb
def addnoise(df, n=1, c=0.5, prefix=''):
if n == 1:
df[f'{prefix}noise'] = np.random.random(len(df)) * c
return
for i in range(n):
df[f'{prefix}noise{i + 1}'] = np.random.random(len(df)) * c
def fix_missing_num(df, colname):
df[colname + '_na'] = pd.isnull(df[colname])
df[colname].fillna(df[colname].median(), inplace=True)
def savefig(filename, pad=0):
plt.tight_layout(pad=pad, w_pad=0, h_pad=0)
plt.savefig(f"images/{filename}.pdf", bbox_inches="tight", pad_inches=0)
# plt.savefig(f"images/{filename}.png", dpi=150)
plt.tight_layout()
plt.show()
plt.close()
def rent():
print(f"----------- {inspect.stack()[0][3]} -----------")
np.random.seed(1) # pick seed for reproducible article images
X,y = load_rent(n=10_000)
df_rent = X.copy()
df_rent['price'] = y
colname = 'bedrooms'
colname = 'bathrooms'
TUNE_RF = False
TUNE_XGB = False
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
if TUNE_RF:
rf, bestparams = tune_RF(X, y) # does CV on entire data set to tune
# bedrooms
# RF best: {'max_features': 0.3, 'min_samples_leaf': 1, 'n_estimators': 125}
# validation R^2 0.7873724127323822
# bathrooms
# RF best: {'max_features': 0.3, 'min_samples_leaf': 1, 'n_estimators': 200}
# validation R^2 0.8066593395345907
else:
rf = RandomForestRegressor(n_estimators=200, min_samples_leaf=1, max_features=.3,
oob_score=True, n_jobs=-1)
rf.fit(X_train, y_train) # Use training set for plotting
print("RF OOB R^2", rf.oob_score_)
rf_score = rf.score(X_test, y_test)
print("RF validation R^2", rf_score)
if TUNE_XGB:
tuned_parameters = {'n_estimators': [400, 450, 500, 600, 1000],
'learning_rate': [0.008, 0.01, 0.02, 0.05, 0.08, 0.1, 0.11],
'max_depth': [3, 4, 5, 6, 7, 8, 9]}
grid = GridSearchCV(
xgb.XGBRegressor(), tuned_parameters, scoring='r2',
cv=5,
n_jobs=-1,
verbose=2
)
grid.fit(X, y) # does CV on entire data set to tune
print("XGB best:", grid.best_params_)
b = grid.best_estimator_
# bedrooms
# XGB best: {'max_depth': 7, 'n_estimators': 250}
# XGB validation R^2 0.7945797751555217
# bathrooms
# XGB best: {'learning_rate': 0.11, 'max_depth': 6, 'n_estimators': 1000}
# XGB train R^2 0.9834399795800324
# XGB validation R^2 0.8244958014380593
else:
b = xgb.XGBRegressor(n_estimators=1000,
max_depth=6,
learning_rate=.11,
verbose=2,
n_jobs=8)
b.fit(X_train, y_train)
xgb_score = b.score(X_test, y_test)
print("XGB validation R^2", xgb_score)
lm = LinearRegression()
lm.fit(X_train, y_train)
lm_score = lm.score(X_test, y_test)
print("OLS validation R^2", lm_score)
lm.fit(X, y)
model, r2_keras = rent_deep_learning_model(X_train, y_train, X_test, y_test)
fig, axes = plt.subplots(1, 6, figsize=(10, 1.8),
gridspec_kw = {'wspace':0.15})
for i in range(len(axes)):
axes[i].set_xlim(0-.3,4+.3)
axes[i].set_xticks([0,1,2,3,4])
axes[i].set_ylim(1800, 9000)
axes[i].set_yticks([2000,4000,6000,8000])
axes[1].get_yaxis().set_visible(False)
axes[2].get_yaxis().set_visible(False)
axes[3].get_yaxis().set_visible(False)
axes[4].get_yaxis().set_visible(False)
axes[0].set_title("(a) Marginal", fontsize=10)
axes[1].set_title("(b) RF", fontsize=10)
axes[1].text(2,8000, f"$R^2=${rf_score:.3f}", horizontalalignment='center', fontsize=9)
axes[2].set_title("(c) XGBoost", fontsize=10)
axes[2].text(2,8000, f"$R^2=${xgb_score:.3f}", horizontalalignment='center', fontsize=9)
axes[3].set_title("(d) OLS", fontsize=10)
axes[3].text(2,8000, f"$R^2=${lm_score:.3f}", horizontalalignment='center', fontsize=9)
axes[4].set_title("(e) Keras", fontsize=10)
axes[4].text(2,8000, f"$R^2=${r2_keras:.3f}", horizontalalignment='center', fontsize=9)
axes[5].set_title("(f) StratPD", fontsize=10)
avg_per_baths = df_rent.groupby(colname).mean()['price']
axes[0].scatter(df_rent[colname], df_rent['price'], alpha=0.07, s=5)
axes[0].scatter(np.unique(df_rent[colname]), avg_per_baths, s=6, c='black',
label="average price/{colname}")
axes[0].set_ylabel("price") # , fontsize=12)
axes[0].set_xlabel("bathrooms")
axes[0].spines['right'].set_visible(False)
axes[0].spines['top'].set_visible(False)
ice = predict_ice(rf, X, colname, 'price', numx=30, nlines=100)
plot_ice(ice, colname, 'price', alpha=.3, ax=axes[1], show_xlabel=True,
show_ylabel=False)
ice = predict_ice(b, X, colname, 'price', numx=30, nlines=100)
plot_ice(ice, colname, 'price', alpha=.3, ax=axes[2], show_ylabel=False)
ice = predict_ice(lm, X, colname, 'price', numx=30, nlines=100)
plot_ice(ice, colname, 'price', alpha=.3, ax=axes[3], show_ylabel=False)
scaler = StandardScaler()
X_train_ = pd.DataFrame(scaler.fit_transform(X_train), columns=X_train.columns)
# y_pred = model.predict(X_)
# print("Keras training R^2", r2_score(y, y_pred)) # y_test in y
ice = predict_ice(model, X_train_, colname, 'price', numx=30, nlines=100)
# replace normalized unique X with unnormalized
ice.iloc[0, :] = np.linspace(np.min(X_train[colname]), np.max(X_train[colname]), 30, endpoint=True)
plot_ice(ice, colname, 'price', alpha=.3, ax=axes[4], show_ylabel=True)
pdpx, pdpy, ignored = \
plot_stratpd(X, y, colname, 'price', ax=axes[5],
pdp_marker_size=6,
show_x_counts=False,
hide_top_right_axes=False,
show_xlabel=True, show_ylabel=False)
print(f"StratPD ignored {ignored} records")
axes[5].yaxis.tick_right()
axes[5].yaxis.set_label_position('right')
axes[5].set_ylim(-250,2250)
axes[5].set_yticks([0,1000,2000])
axes[5].set_ylabel("price")
savefig(f"{colname}_vs_price")
def tune_RF(X, y, verbose=2):
tuned_parameters = {'n_estimators': [50, 100, 125, 150, 200],
'min_samples_leaf': [1, 3, 5, 7],
'max_features': [.1, .3, .5, .7, .9]}
grid = GridSearchCV(
RandomForestRegressor(), tuned_parameters, scoring='r2',
cv=5,
n_jobs=-1,
verbose=verbose
)
grid.fit(X, y) # does CV on entire data set
rf = grid.best_estimator_
print("RF best:", grid.best_params_)
#
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# rf.fit(X_train, y_train)
# print("validation R^2", rf.score(X_test, y_test))
return rf, grid.best_params_
def plot_with_noise_col(df, colname):
features = ['bedrooms', 'bathrooms', 'latitude', 'longitude']
features_with_noise = ['bedrooms', 'bathrooms', 'latitude', 'longitude',
colname + '_noise']
type = "noise"
fig, axes = plt.subplots(2, 2, figsize=(5, 5), sharey=True, sharex=True)
df = df.copy()
addnoise(df, n=1, c=50, prefix=colname + '_')
X = df[features]
y = df['price']
# STRATPD ON ROW 1
X = df[features]
y = df['price']
plot_stratpd(X, y, colname, 'price', ax=axes[0, 0], slope_line_alpha=.15, show_xlabel=True,
show_ylabel=False)
axes[0, 0].set_ylim(-1000, 5000)
axes[0, 0].set_title(f"StratPD")
X = df[features_with_noise]
y = df['price']
plot_stratpd(X, y, colname, 'price', ax=axes[0, 1], slope_line_alpha=.15,
show_ylabel=False)
axes[0, 1].set_ylim(-1000, 5000)
axes[0, 1].set_title(f"StratPD w/{type} col")
# ICE ON ROW 2
X = df[features]
y = df['price']
rf = RandomForestRegressor(n_estimators=100, min_samples_leaf=1, oob_score=True,
n_jobs=-1)
rf.fit(X, y)
# do it w/o dup'd column
ice = predict_ice(rf, X, colname, 'price', nlines=1000)
uniq_x, pdp_curve = \
plot_ice(ice, colname, 'price', alpha=.05, ax=axes[1, 0], show_xlabel=True)
axes[1, 0].set_ylim(-1000, 5000)
axes[1, 0].set_title(f"FPD/ICE")
for i in range(2):
for j in range(2):
axes[i, j].set_xlim(0, 6)
X = df[features_with_noise]
y = df['price']
rf = RandomForestRegressor(n_estimators=100, min_samples_leaf=1, oob_score=True,
n_jobs=-1)
rf.fit(X, y)
ice = predict_ice(rf, X, colname, 'price', nlines=1000)
uniq_x_, pdp_curve_ = \
plot_ice(ice, colname, 'price', alpha=.05, ax=axes[1, 1], show_xlabel=True,
show_ylabel=False)
axes[1, 1].set_ylim(-1000, 5000)
axes[1, 1].set_title(f"FPD/ICE w/{type} col")
# print(f"max ICE curve {np.max(pdp_curve):.0f}, max curve with dup {np.max(pdp_curve_):.0f}")
axes[0, 0].get_xaxis().set_visible(False)
axes[0, 1].get_xaxis().set_visible(False)
def plot_with_dup_col(df, colname, min_samples_leaf):
features = ['bedrooms', 'bathrooms', 'latitude', 'longitude']
features_with_dup = ['bedrooms', 'bathrooms', 'latitude', 'longitude',
colname + '_dup']
fig, axes = plt.subplots(2, 3, figsize=(7.5, 5), sharey=True, sharex=True)
type = "dup"
verbose = False
df = df.copy()
df[colname + '_dup'] = df[colname]
# df_rent[colname+'_dupdup'] = df_rent[colname]
# STRATPD ON ROW 1
X = df[features]
y = df['price']
print(f"shape is {X.shape}")
plot_stratpd(X, y, colname, 'price', ax=axes[0, 0], slope_line_alpha=.15,
show_xlabel=True,
min_samples_leaf=min_samples_leaf,
show_ylabel=True,
verbose=verbose)
axes[0, 0].set_ylim(-1000, 5000)
axes[0, 0].set_title(f"StratPD")
X = df[features_with_dup]
y = df['price']
print(f"shape with dup is {X.shape}")
plot_stratpd(X, y, colname, 'price', ax=axes[0, 1], slope_line_alpha=.15, show_ylabel=False,
min_samples_leaf=min_samples_leaf,
verbose=verbose)
axes[0, 1].set_ylim(-1000, 5000)
axes[0, 1].set_title(f"StratPD w/{type} col")
plot_stratpd(X, y, colname, 'price', ax=axes[0, 2], slope_line_alpha=.15, show_xlabel=True,
min_samples_leaf=min_samples_leaf,
show_ylabel=False,
n_trees=15,
max_features=1,
bootstrap=False,
verbose=verbose
)
axes[0, 2].set_ylim(-1000, 5000)
axes[0, 2].set_title(f"StratPD w/{type} col")
axes[0, 2].text(.2, 4000, "ntrees=15")
axes[0, 2].text(.2, 3500, "max features per split=1")
# ICE ON ROW 2
X = df[features]
y = df['price']
rf = RandomForestRegressor(n_estimators=100, min_samples_leaf=1, oob_score=True,
n_jobs=-1)
rf.fit(X, y)
# do it w/o dup'd column
ice = predict_ice(rf, X, colname, 'price', nlines=1000)
plot_ice(ice, colname, 'price', alpha=.05, ax=axes[1, 0], show_xlabel=True)
axes[1, 0].set_ylim(-1000, 5000)
axes[1, 0].set_title(f"FPD/ICE")
for i in range(2):
for j in range(3):
axes[i, j].set_xlim(0, 6)
# with dup'd column
X = df[features_with_dup]
y = df['price']
rf = RandomForestRegressor(n_estimators=100, min_samples_leaf=1, oob_score=True,
n_jobs=-1)
rf.fit(X, y)
ice = predict_ice(rf, X, colname, 'price', nlines=1000)
plot_ice(ice, colname, 'price', alpha=.05, ax=axes[1, 1], show_xlabel=True, show_ylabel=False)
axes[1, 1].set_ylim(-1000, 5000)
axes[1, 1].set_title(f"FPD/ICE w/{type} col")
# print(f"max ICE curve {np.max(pdp_curve):.0f}, max curve with dup {np.max(pdp_curve_):.0f}")
axes[1, 2].set_title(f"FPD/ICE w/{type} col")
axes[1, 2].text(.2, 4000, "Cannot compensate")
axes[1, 2].set_xlabel(colname)
# print(f"max curve {np.max(curve):.0f}, max curve with dup {np.max(curve_):.0f}")
axes[0, 0].get_xaxis().set_visible(False)
axes[0, 1].get_xaxis().set_visible(False)
def rent_ntrees():
np.random.seed(1) # pick seed for reproducible article images
print(f"----------- {inspect.stack()[0][3]} -----------")
X, y = load_rent(n=10_000)
trees = [1, 5, 10, 30]
supervised = True
def onevar(colname, row, yrange=None):
alphas = [.1,.08,.05,.04]
for i, t in enumerate(trees):
plot_stratpd(X, y, colname, 'price', ax=axes[row, i], slope_line_alpha=alphas[i],
# min_samples_leaf=20,
yrange=yrange,
supervised=supervised,
show_ylabel=t == 1,
pdp_marker_size=2 if row==2 else 8,
n_trees=t,
max_features='auto',
bootstrap=True,
verbose=False)
fig, axes = plt.subplots(3, 4, figsize=(8, 6), sharey=True)
for i in range(1, 4):
axes[0, i].get_yaxis().set_visible(False)
axes[1, i].get_yaxis().set_visible(False)
axes[2, i].get_yaxis().set_visible(False)
for i in range(0, 4):
axes[0, i].set_title(f"{trees[i]} trees")
onevar('bedrooms', row=0, yrange=(-500, 4000))
onevar('bathrooms', row=1, yrange=(-500, 4000))
onevar('latitude', row=2, yrange=(-500, 4000))
savefig(f"rent_ntrees")
plt.close()
def meta_boston():
np.random.seed(1) # pick seed for reproducible article images
print(f"----------- {inspect.stack()[0][3]} -----------")
boston = load_boston()
print(len(boston.data))
df = pd.DataFrame(boston.data, columns=boston.feature_names)
df['MEDV'] = boston.target
X = df.drop('MEDV', axis=1)
y = df['MEDV']
plot_stratpd_gridsearch(X, y, 'AGE', 'MEDV',
show_slope_lines=True,
min_samples_leaf_values=[2,5,10,20,30],
yrange=(-10,10))
# yranges = [(-30, 0), (0, 30), (-8, 8), (-11, 0)]
# for nbins in range(6):
# plot_meta_multivar(X, y, colnames=['LSTAT', 'RM', 'CRIM', 'DIS'], targetname='MEDV',
# nbins=nbins,
# yranges=yranges)
savefig(f"meta_boston_age_medv")
def plot_meta_multivar(X, y, colnames, targetname, nbins, yranges=None):
np.random.seed(1) # pick seed for reproducible article images
min_samples_leaf_values = [2, 5, 10, 30, 50, 100, 200]
nrows = len(colnames)
ncols = len(min_samples_leaf_values)
fig, axes = plt.subplots(nrows, ncols + 2, figsize=((ncols + 2) * 2.5, nrows * 2.5))
if yranges is None:
yranges = [None] * len(colnames)
row = 0
for i, colname in enumerate(colnames):
marginal_plot_(X, y, colname, targetname, ax=axes[row, 0])
col = 2
for msl in min_samples_leaf_values:
print(
f"---------- min_samples_leaf={msl}, nbins={nbins:.2f} ----------- ")
plot_stratpd(X, y, colname, targetname, ax=axes[row, col],
min_samples_leaf=msl,
yrange=yranges[i],
n_trees=1)
axes[row, col].set_title(
f"leafsz={msl}, nbins={nbins:.2f}",
fontsize=9)
col += 1
row += 1
rf = RandomForestRegressor(n_estimators=100, min_samples_leaf=1, oob_score=True)
rf.fit(X, y)
row = 0
for i, colname in enumerate(colnames):
ice = predict_ice(rf, X, colname, targetname)
plot_ice(ice, colname, targetname, ax=axes[row, 1])
row += 1
def unsup_rent():
np.random.seed(1) # pick seed for reproducible article images
print(f"----------- {inspect.stack()[0][3]} -----------")
X, y = load_rent(n=10_000)
fig, axes = plt.subplots(4, 2, figsize=(4, 8))
plot_stratpd(X, y, 'bedrooms', 'price', ax=axes[0, 0], yrange=(-500,4000),
slope_line_alpha=.2, supervised=False)
plot_stratpd(X, y, 'bedrooms', 'price', ax=axes[0, 1], yrange=(-500,4000),
slope_line_alpha=.2, supervised=True)
plot_stratpd(X, y, 'bathrooms', 'price', ax=axes[1, 0], yrange=(-500,4000),
slope_line_alpha=.2, supervised=False)
plot_stratpd(X, y, 'bathrooms', 'price', ax=axes[1, 1], yrange=(-500,4000),
slope_line_alpha=.2, supervised=True)
plot_stratpd(X, y, 'latitude', 'price', ax=axes[2, 0], yrange=(-500,2000),
slope_line_alpha=.2, supervised=False, verbose=True)
plot_stratpd(X, y, 'latitude', 'price', ax=axes[2, 1], yrange=(-500,2000),
slope_line_alpha=.2, supervised=True, verbose=True)
plot_stratpd(X, y, 'longitude', 'price', ax=axes[3, 0], yrange=(-500,500),
slope_line_alpha=.2, supervised=False)
plot_stratpd(X, y, 'longitude', 'price', ax=axes[3, 1], yrange=(-500,500),
slope_line_alpha=.2, supervised=True)
axes[0, 0].set_title("Unsupervised")
axes[0, 1].set_title("Supervised")
for i in range(3):
axes[i, 1].get_yaxis().set_visible(False)
savefig(f"rent_unsup")
plt.close()
def weather():
np.random.seed(1) # pick seed for reproducible article images
print(f"----------- {inspect.stack()[0][3]} -----------")
TUNE_RF = False
df_raw = toy_weather_data()
df = df_raw.copy()
df_string_to_cat(df)
names = np.unique(df['state'])
catnames = OrderedDict()
for i,v in enumerate(names):
catnames[i+1] = v
df_cat_to_catcode(df)
X = df.drop('temperature', axis=1)
y = df['temperature']
# cats = catencoders['state'].values
# cats = np.insert(cats, 0, None) # prepend a None for catcode 0
if TUNE_RF:
rf, bestparams = tune_RF(X, y)
# RF best: {'max_features': 0.9, 'min_samples_leaf': 5, 'n_estimators': 150}
# validation R^2 0.9500072628270099
else:
rf = RandomForestRegressor(n_estimators=150, min_samples_leaf=5, max_features=0.9, oob_score=True)
rf.fit(X, y) # Use full data set for plotting
print("RF OOB R^2", rf.oob_score_)
fig, ax = plt.subplots(1, 1, figsize=figsize)
df = df_raw.copy()
avgtmp = df.groupby(['state', 'dayofyear'])[['temperature']].mean()
avgtmp = avgtmp.reset_index()
ca = avgtmp.query('state=="CA"')
co = avgtmp.query('state=="CO"')
az = avgtmp.query('state=="AZ"')
wa = avgtmp.query('state=="WA"')
nv = avgtmp.query('state=="NV"')
ax.plot(ca['dayofyear'], ca['temperature'], lw=.5, c='#fdae61', label="CA")
ax.plot(co['dayofyear'], co['temperature'], lw=.5, c='#225ea8', label="CO")
ax.plot(az['dayofyear'], az['temperature'], lw=.5, c='#41b6c4', label="AZ")
ax.plot(wa['dayofyear'], wa['temperature'], lw=.5, c='#a1dab4', label="WA")
ax.plot(nv['dayofyear'], nv['temperature'], lw=.5, c='#a1dab4', label="NV")
ax.legend(loc='upper left', borderpad=0, labelspacing=0)
ax.set_xlabel("dayofyear")
ax.set_ylabel("temperature")
ax.set_title("(a) State/day vs temp")
savefig(f"dayofyear_vs_temp")
fig, ax = plt.subplots(1, 1, figsize=figsize)
plot_stratpd(X, y, 'dayofyear', 'temperature', ax=ax,
show_x_counts=False,
yrange=(-10, 10),
pdp_marker_size=2, slope_line_alpha=.5, n_trials=1)
ax.set_title("(b) StratPD")
savefig(f"dayofyear_vs_temp_stratpd")
plt.close()
fig, ax = plt.subplots(1, 1, figsize=figsize)
plot_catstratpd(X, y, 'state', 'temperature', catnames=catnames,
show_x_counts=False,
# min_samples_leaf=30,
min_y_shifted_to_zero=True,
# alpha=.3,
ax=ax,
yrange=(-1, 55))
ax.set_yticks([0,10,20,30,40,50])
ax.set_title("(d) CatStratPD")
savefig(f"state_vs_temp_stratpd")
fig, ax = plt.subplots(1, 1, figsize=figsize)
ice = predict_ice(rf, X, 'dayofyear', 'temperature')
plot_ice(ice, 'dayofyear', 'temperature', ax=ax)
ax.set_title("(c) FPD/ICE")
savefig(f"dayofyear_vs_temp_pdp")
fig, ax = plt.subplots(1, 1, figsize=figsize)
ice = predict_catice(rf, X, 'state', 'temperature')
plot_catice(ice, 'state', 'temperature', catnames=catnames, ax=ax,
pdp_marker_size=15,
min_y_shifted_to_zero = True,
yrange=(-2, 50)
)
ax.set_yticks([0,10,20,30,40,50])
ax.set_title("(b) FPD/ICE")
savefig(f"state_vs_temp_pdp")
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.scatter(X['state'], y, alpha=.05, s=15)
ax.set_xticks(range(1,len(catnames)+1))
ax.set_xticklabels(catnames.values())
ax.set_xlabel("state")
ax.set_ylabel("temperature")
ax.set_title("(a) Marginal")
savefig(f"state_vs_temp")
plt.close()
def meta_weather():
np.random.seed(1) # pick seed for reproducible article images
print(f"----------- {inspect.stack()[0][3]} -----------")
# np.random.seed(66)
nyears = 5
years = []
for y in range(1980, 1980 + nyears):
df_ = toy_weather_data()
df_['year'] = y
years.append(df_)
df_raw =
|
pd.concat(years, axis=0)
|
pandas.concat
|
import os
import yaml
import utils
import pandas as pd
import datetime as dt
import matplotlib.pyplot as plt
def compile_data(nwis_var_names, source):
# select munged files we want to fetch data for
files_to_process = [f for f in os.listdir(os.path.join('.', '02_munge', 'out')) if f.startswith(f'{source}_')]
# create dataframe per variable
var_dfs = {}
for var in nwis_var_names:
var_dfs[var] = pd.DataFrame()
for f in files_to_process:
print(f'processing {f}')
# get site id
site_id = os.path.splitext(f)[0].split('_')[-1]
# read in data per site and append it to the appropriate dataframe
site_data_csv = os.path.join('.', '02_munge', 'out', f)
site_data_df = pd.read_csv(site_data_csv, index_col='datetime')
# make index into datetime type
site_data_df.index = pd.to_datetime(site_data_df.index)
for var in site_data_df.columns:
var_dfs[var][site_id] = site_data_df[var]
return var_dfs
def add_year_start_end(df, year):
date_ny = f'{year+1}-01-01'
df[date_ny] = np.nan
if f'{year}-01-01' not in df.index:
df[f'{year}-01-01'] = np.nan
return df.sort_index()
def gap_analysis_calc(source, var_dfs):
# make output directory if it doesn't exist
os.makedirs(os.path.join('data_exploration', 'out', 'gap_analysis_csvs'), exist_ok = True)
# define metric names that we will calculate
metrics = ['p_coverage', 'n_gaps', 'gap_median_days', 'gap_max_days']
gap_template_df = pd.DataFrame(columns=metrics)
metric_dfs = {}
for var, df in var_dfs.items():
print(f'calculating metrics for {var}')
df.dropna(axis=0, how='all', inplace=True)
if df.empty:
continue
metric_dfs[var] = {}
# get list of years available for this variable
# # include all years with any measurements at any of our sites
years = df.index.year.unique()
for site in df.columns:
var_site_gap_df = gap_template_df.copy()
for year in years:
year_df = df[df.index.year==year][site]
# year_df.dropna(inplace=True)
if year == datetime.datetime.today().year:
elapsed_days = datetime.datetime.today().timetuple().tm_yday
else:
elapsed_days = (datetime.datetime(year+1, 1, 1, 0, 0) - datetime.datetime(year, 1, 1, 0, 0)).days
var_site_gap_df.loc[year, 'p_coverage'] = year_df.count()/elapsed_days
# drop all nan values so that we can calculate gaps using indices
delta_data = year_df.dropna()
# add back first day of year and first day of subsequent year to capture
# gaps at start/end of year
delta_data = add_year_start_end(delta_data, year)
# calculate the length of all data gaps
deltas = delta_data.index.to_series().diff()[1:]
gaps = deltas[deltas > dt.timedelta(days=1)]
# populate metrics related to gaps
var_site_gap_df.loc[year, 'n_gaps'] = len(gaps)
var_site_gap_df.loc[year, 'gap_median_days'] = gaps.median().days if pd.notna(gaps.median().days) else 0
var_site_gap_df.loc[year, 'gap_max_days'] = gaps.max().days if pd.notna(gaps.max().days) else 0
var_site_gap_df.to_csv(os.path.join('data_exploration', 'out', 'gap_analysis_csvs', f'{source}_{var}_{site}_gap_analysis.csv'))
metric_dfs[var][site]= var_site_gap_df
return metric_dfs, metrics
def plot_gap_analysis(source, metric_dfs, metrics, site_colors):
# make output directory if it doesn't exist
os.makedirs(os.path.join('data_exploration', 'out', 'gap_analysis_plots'), exist_ok = True)
for var, data_by_site in metric_dfs.items():
print(f'plotting metrics for {var}')
plot_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
from typing import List, Tuple
import pandas as pd
class AgodaCancellationPreprocessor:
SATURDAY = 5
def __init__(self, full_data: pd.DataFrame):
self.number_of_times_customer_canceled = dict()
for id, cancellation in full_data[
["h_customer_id", "cancellation_datetime"]].itertuples(
index=False):
if cancellation == 0:
if id in self.number_of_times_customer_canceled:
self.number_of_times_customer_canceled[id] += 1
else:
self.number_of_times_customer_canceled[id] = 1
self.average_cancellation_days_from_booking = dict()
self.average_cancellation_days_to_checkin = dict()
dates =
|
pd.DataFrame([])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 26 10:52:43 2020
@author: Celina
"""
import pandas as pd
import outdoor.excel_wrapper.Wrapping_Functions as WF
def wrapp_GeneralData(obj, df1):
"""
Description
-----------
Get general Process Data: Lifetime and Group
Context
----------
Function is called in Wrapp_ProcessUnits
Parameters
----------
df1 : Dataframe which holds information of LT and Group
"""
Name = df1.iloc[0,0]
LifeTime = df1.iloc[4,0]
ProcessGroup = df1.iloc[3,0]
if not pd.isnull(df1.iloc[12,0]):
emissions = df1.iloc[12,0]
else:
emissions = 0
if not pd.isnull(df1.iloc[13,0]):
maintenance_factor = df1.iloc[13,0]
else:
maintenance_factor = 0.044875
cost_percentage = None
time_span = None
time_mode = 'No Mode'
if not pd.isnull(df1.iloc[14,0]):
cost_percentage = df1.iloc[14,0]
time_span = df1.iloc[15,0]
if df1.iloc[16,0] == 'Yearly':
time_mode = 'Yearly'
else:
time_mode = 'Hourly'
if not pd.isnull(df1.iloc[17,0]):
full_load_hours = df1.iloc[17,0]
else:
full_load_hours = None
obj.set_generalData(ProcessGroup,
LifeTime,
emissions,
full_load_hours,
maintenance_factor,
cost_percentage,
time_span,
time_mode)
def wrapp_ReacionData(obj, df1, df2 = None):
"""
Description
-----------
Get Reaction Data (Stoichiometric or Yield Function) from Excel sheet
Context
----------
Function is called in Wrapp_ProcessUnits
Parameters
----------
df1 : Dataframe which either holds Stoichiometric or Yield Coefficents
df2: Dataframe which is either empty or holds conversion factors
"""
if obj.Type == "Yield-Reactor":
dict1 = WF.read_type1(df1,0,1)
obj.set_xiFactors(dict1)
list1 = WF.read_list_new(df1, 2, 0)
obj.set_inertComponents(list1)
else:
dict1 = WF.read_type2(df1,0,1,2)
obj.set_gammaFactors(dict1)
dict2 = WF.read_type2(df2,0,1,2)
obj.set_thetaFactors(dict2)
def wrapp_EnergyData(obj, df, df2, df3):
"""
Description
-----------
Define specific columns from the spreadsheet to set the energydatas.
Sets Demands, ReferenceFlow Types and Components for El, Heat1 and Heat2.
But only if there are values in the Excel, if not, than these values are left
as None
Also: Calls wrapp_Temperatures, which sets Temperature and Tau for Heat
Context
----------
Function is called in Wrapp.ProcessUnits
Parameters
----------
df : Dataframe which holds inforation of energy demand and reference flow type
df2 : Dataframe which holds information of reference flow components
df3 : Dataframe which holds information on heat temperatures
"""
# Set Reference Flow Type:
if not pd.isnull(df.iloc[0,1]):
ProcessElectricityDemand = df.iloc[0,1]
ProcessElectricityReferenceFlow = df.iloc[1,1]
ProcessElectricityReferenceComponentList = WF.read_list_new(df2, 1, 2)
else:
ProcessElectricityDemand = 0
ProcessElectricityReferenceFlow = None
ProcessElectricityReferenceComponentList = []
if not pd.isnull(df.iloc[0,2]):
ProcessHeatDemand = df.iloc[0,2]
ProcessHeatReferenceFlow = df.iloc[1,2]
ProcessHeatReferenceComponentList = WF.read_list_new(df2, 2, 2)
else:
ProcessHeatDemand = None
ProcessHeatReferenceFlow = None
ProcessHeatReferenceComponentList = []
if not
|
pd.isnull(df.iloc[0,3])
|
pandas.isnull
|
#!/Tsan/bin/python
# -*- coding: utf-8 -*-
# Libraries to use
from __future__ import division
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
import json
import mysql.connector
# 读取数据库的指针设置
with open('conf.json', 'r') as fd:
conf = json.load(fd)
src_db = mysql.connector.connect(**conf['src_db'])
# 一些常量
riskFreeRate = 0.02 # 无风险利率
varThreshold =0.05 # 5%VAR阈值
scaleParameter = 50 # 一年50周
# 表名
index_data_table = 'fund_weekly_index' # index时间序列数据
index_name_table = 'index_id_name_mapping'
type_index_table = 'index_stype_code_mapping' # 表格名称-不同基金种类对应的指数
# 私募指数基金分类表格对应(只需要跑一次)
def get_type_index_table(tableName = type_index_table):
try:
#sql_query='select id,name from student where age > %s'
cursor = src_db .cursor()
sql = "select * from %s" % (tableName)
cursor.execute(sql)
result = cursor.fetchall()
finally:
pass
#pdResult = dict(result)
pdResult = pd.DataFrame(result)
pdResult = pdResult.dropna(axis=0)
pdResult.columns = [i[0] for i in cursor.description]
pdResult.set_index('stype_code',inplace=True)
return pdResult
# 私募指数名称及ID分类表格对应(只需要跑一次)
def get_index_table(tableName = index_name_table):
try:
#sql_query='select id,name from student where age > %s'
cursor = src_db .cursor()
sql = "select * from %s" % (tableName)
cursor.execute(sql)
result = cursor.fetchall()
finally:
pass
#pdResult = dict(result)
pdResult = pd.DataFrame(result)
pdResult = pdResult.dropna(axis=0)
pdResult.columns = [i[0] for i in cursor.description]
pdResult.set_index('index_id',inplace=True)
return pdResult
# 私募指数净值的时间序列
def get_index(index, tableName=index_data_table):
try:
# sql_query='select id,name from student where age > %s'
cursor = src_db.cursor()
sql = "select index_id,statistic_date,index_value from %s where index_id = '%s'" % (tableName, index)
cursor.execute(sql)
result = cursor.fetchall()
finally:
pass
pdResult = pd.DataFrame(result, dtype=float)
pdResult.columns = ['index', 'date', 'net_worth']
pdResult = pdResult.drop_duplicates().set_index('date')
pdResult = pdResult.dropna(axis=0)
pdResult = pdResult.fillna(method='ffill')
return pdResult
# 按季度分类
def byseasons(x):
if 1<=x.month<=3:
return str(x.year)+'_'+str(1)
elif 4<= x.month <=6:
return str(x.year)+'_'+str(2)
elif 7<= x.month <=9:
return str(x.year)+'_'+str(3)
else:
return str(x.year)+'_'+str(4)
# 计算最大回撤,最大回撤开始结束时间
def cal_max_dd_indicator(networthSeries):
maxdd = pd.DataFrame(index = networthSeries.index, data=None, columns =['max_dd','max_dd_start_date','max_dd_end_date'],dtype = float)
maxdd.iloc[0] = 0
maxdd.is_copy = False
for date in networthSeries.index[1:]:
maxdd.loc[date] = [1 - networthSeries.loc[date] / networthSeries.loc[:date].max(),networthSeries.loc[:date].idxmax(),date]
#maxdd[['max_dd_start_date','max_dd_end_date']].loc[date] = [[networthSeries.loc[:date].idxmax(),date]]
#maxdd['max_dd_start_date'].loc[date] = networthSeries.loc[:date].idxmax()
return maxdd['max_dd'].max(), maxdd.loc[maxdd['max_dd'].idxmax]['max_dd_start_date'],maxdd.loc[maxdd['max_dd'].idxmax]['max_dd_end_date']
# 计算最大回撤(每季度),输入为dataframe,输出也为dataframe
def cal_maxdd_by_season(df):
seasonList = sorted(list(set(df['season'].values)))
maxdd_dict = {}
for season in seasonList:
temp = df[df['season'] == season]
maxdd_dict[season] = np.round(cal_max_dd_indicator(temp['net_worth'])[0],4)
maxdd_df = pd.DataFrame([maxdd_dict]).T
maxdd_df.columns =[df['index'].iloc[0]]
maxdd_df.index.name = 'season'
return maxdd_df
# 计算最大回撤(每年),输入为dataframe,输出也为dataframe
def cal_maxdd_by_year(df):
seasonList = sorted(list(set(df['year'].values)))
maxdd_dict = {}
for season in seasonList:
temp = df[df['year'] == season]
maxdd_dict[season] = np.round(cal_max_dd_indicator(temp['net_worth'])[0],4)
maxdd_df = pd.DataFrame([maxdd_dict]).T
maxdd_df.columns =[df['index'].iloc[0]]
maxdd_df.index.name = 'year'
return maxdd_df
# 准备数据原始dataframe
def get_count_data(cnx):
cursor = cnx.cursor()
sql = "select fund_id,foundation_date,fund_type_strategy from fund_info"
cursor.execute(sql)
result = cursor.fetchall()
df = pd.DataFrame(result)
df.columns = ['fund_id', 'found_date', 'strategy']
sql = "select type_id, strategy from index_type_mapping"
cursor.execute(sql)
result = cursor.fetchall()
meg = pd.DataFrame(result)
meg.columns = ['type_id', 'strategy']
# 数据清理
df = df.dropna()
df = df[df['strategy'] != u'']
# 合并对应表
df = pd.merge(df, meg)
# 加年份列
df['year'] = [str(i.year) for i in df['found_date']]
# 加月份列
df['month'] = [str(i.year) + '_' + str(i.month) for i in df['found_date']]
return df.drop('strategy', axis=1)
# 得到按年份分类统计,输出 dataframe
def get_ann_fund(df):
temp = df.groupby(['type_id', 'year'])['fund_id'].count().to_frame() # 分类转dataframe
temp = pd.pivot_table(temp, values='fund_id', index='year', columns=['type_id'])
temp['Type_0'] = df.groupby(['year'])['fund_id'].count().to_frame()['fund_id'] # 添加全市场数据
temp.sort_index(axis=0)
temp.sort_index(axis=1, inplace=True)
return temp
# 得到按月份分类统计, 输出dataframe
def get_month_fund(df):
temp = df.groupby(['type_id', 'month'])['fund_id'].count().to_frame() # 分类转dataframe
temp = pd.pivot_table(temp, values='fund_id', index=['month'], columns=['type_id'])
temp['Type_0'] = df.groupby(['month'])['fund_id'].count().to_frame()['fund_id'] # 添加全市场数据
temp.sort_index(axis=0)
temp.sort_index(axis=1, inplace=True)
return temp
# 准备数据原始dataframe
def get_org_count(cnx):
cursor = cnx.cursor()
sql = "SELECT org_id, found_date FROM PrivateEquityFund.org_info WHERE org_category LIKE '4%'"
cursor.execute(sql)
result = cursor.fetchall()
df = pd.DataFrame(result)
df.columns = ['org_id', 'found_date']
# 数据清理
df = df.dropna()
# 加年份列
df['year'] = [str(i.year) for i in df['found_date']]
# 加月份列
df['month'] = [str(i.year) + '_0' + str(i.month) if i.month < 10 else str(i.year) + '_' + str(i.month) for i in
df['found_date']]
return df
# 得到按年份分类统计,输出 dataframe
def get_ann_org(df):
temp = df.groupby(['year'])['org_id'].count().to_frame() # 分类转dataframe
temp.sort_index(axis=0)
return temp
# 得到按月份分类统计, 输出dataframe
def get_month_org(df):
temp = df.groupby(['month'])['org_id'].count().to_frame() # 分类转dataframe
temp.sort_index(axis=0)
return temp
if __name__ == '__main__':
# 计算季度指标
maxddbyseason = pd.DataFrame() # 季度最大回撤
retbyseason =
|
pd.DataFrame()
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
# pylint: disable=line-too-long
"""
Created on Tue Mar 5 10:25:35 2019
@authors: <NAME>, Maxence, <NAME>
"""
import sys
import logging
import time
import pandas as pd
import numpy as np
import rdmmp.scraping as scraping
import rdmmp.db as db
import rdmmp.cleaning as cleaning
import rdmmp.preprocessing as preprocessing
import rdmmp.modeling as modeling
import rdmmp.reporting as reporting
import rdmmp.configvalues as cv
def test():
"""
This is an example of Google style.
Args:
param1: This is the first param.
param2: This is a second param.
Returns:
This is a description of what is returned.
Raises:
KeyError: Raises an exception.
"""
# %% DoScraping
def do_scraping(automatic, db_data):
""" Handle the data scraping on Indeed
If not in automatic mode ask for user inputs to let them choose to run the automatic scraping
or to scrap data for the job and location they enter
In automatic mode, Loop on the jobs and locations and call a scraping function for each tuple
Args:
automatic: boolean, ask for user inputs if false then scrap, otherwise scrap predefined lists of jobs and locations
db_data: dataframe from database
"""
log = logging.getLogger('main')
log.info("")
log.info("****************************************")
log.info("*** do_scraping")
log.info("****************************************")
if not automatic:
# Ask if they want to choose what to scrape
choose = input("Do you want to choose what to scrape (y/n) ? ")
if choose in ("y", "Y"):
# Ask the job and location
job = input("Please enter the title to scrape data for: \n").lower()
location = input("Please enter the location to scrape data for: \n").lower()
# Ask the amount of pages to scrap
while True:
num_pages = input("Please enter the number of pages needed (integer only, negative or 0 for all): \n")
try:
num_pages = int(num_pages)
break
except ValueError:
log.error("Invalid number of pages! Please try again.")
# Scrap inputs
log.info("Scraping %s in %s...", job, location)
scraping.get_data(job, num_pages, location, db_data)
else:
# answer is n or N or something else
automatic = True
if automatic:
# Init job and location lists
jobs = cv.CFG.targets
locations = cv.CFG.locations
# Scrap
thread_list = []
for location in locations:
for job in jobs:
log.info("Scraping %s in %s...", job, location)
thread = scraping.ScrapingThread(job, -1, location, db_data)
thread_list.append(thread)
thread.start()
time.sleep(3)
for thread in thread_list:
thread.join()
# %% GetWorkingData
def get_working_data(database_data):
"""
Combine data from CSV and mongoDB
Returns:
A pandas dataframe
"""
log = logging.getLogger('main')
log.info("")
log.info("****************************************")
log.info("*** get_working_data")
log.info("****************************************")
# Get a dataframe from csv files created by the scraping
csv_data = scraping.import_data_from_csv(cv.CFG.csv_dir, cv.CFG.targets, cv.CFG.locations)
log.info('%d rows from database', database_data.shape[0])
log.info('%d rows from csv files', csv_data.shape[0])
# Concat the 2 dataframes
dataframe = pd.concat([database_data, csv_data], join='inner')
# drop duplicates except the first(database)
dataframe.drop_duplicates(['Title', 'Company', 'Salary', 'City', 'Posting'], inplace=True)
# reset index
dataframe.reset_index(drop=True, inplace=True)
log.info('%d rows in the merge', dataframe.shape[0])
return dataframe
# %% Cleaner
def do_cleaning(data):
"""
Clean the data to be used in the model
Args:
data: pandas dataframe to clean
"""
log = logging.getLogger('main')
log.info("")
log.info("****************************************")
log.info("*** do_cleaning")
log.info("****************************************")
return cleaning.clean(data)
# %% Preprocess
def pre_processing(data):
"""
Preprocess the data to be used in the model
Args:
data: pandas dataframe to preprocessed
"""
log = logging.getLogger('main')
log.info("")
log.info("****************************************")
log.info("*** pre_process")
log.info("****************************************")
#return preprocessing.preprocess(data)
return preprocessing.prepro(data)
# %% DoModel
def make_model(x_train, x_test, y_train, y_test, x_pred, dnan):
"""
Fit the model on the data and predict salary when it's unknown
Args:
data: pandas dataframe to train and predict our model on
"""
log = logging.getLogger('main')
log.info("")
log.info("****************************************")
log.info("*** make_model")
log.info("****************************************")
return modeling.modelize(x_train, x_test, y_train, y_test, x_pred, dnan)
# %% UpdateDB
def update_db(dataframe, data_krbf, data_forest):
"""
Save the data in the DB
Args:
data: pandas dataframe to be saved in mongoDB
"""
log = logging.getLogger('main')
log.info("")
log.info("****************************************")
log.info("*** update_db")
log.info("****************************************")
krbd_df =
|
pd.DataFrame(index=dataframe.index)
|
pandas.DataFrame
|
import datetime
import pandas as pd
import numpy as np
todays_date = datetime.datetime.now().date()
index = pd.date_range(todays_date-datetime.timedelta(10), periods=10, freq='D')
columns = ['A','B', 'C']
print(columns)
df_ =
|
pd.DataFrame(index=index, columns=columns)
|
pandas.DataFrame
|
# Copyright 2021 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from datetime import timedelta as td
from itertools import product
import numpy as np
import pandas as pd
from floris.utilities import wrap_360
from . import utilities as fsut
def df_movingaverage(
df_in,
cols_angular,
window_width=td(seconds=60),
min_periods=1,
center=True,
calc_median_min_max_std=False,
return_index_mapping=False,
):
# Copy and ensure dataframe is indexed by time
df = df_in.copy()
if "time" in df.columns:
df = df.set_index("time")
# Find non-angular columns
if isinstance(cols_angular, bool):
if cols_angular:
cols_angular = [c for c in df.columns]
else:
cols_angular = []
cols_regular = [c for c in df.columns if c not in cols_angular]
# Now calculate cos and sin components for angular columns
sin_cols = ["{:s}_sin".format(c) for c in cols_angular]
cos_cols = ["{:s}_cos".format(c) for c in cols_angular]
df[sin_cols] = np.sin(df[cols_angular] * np.pi / 180.0)
df[cos_cols] = np.cos(df[cols_angular] * np.pi / 180.0)
# Drop angular columns
df = df.drop(columns=cols_angular)
# Now calculate rolling (moving) average
df_roll = df.rolling(
window_width,
center=center,
axis=0,
min_periods=min_periods
)
# First calculate mean values of non-angular columns
df_ma = df_roll[cols_regular].mean().copy()
# Now add mean values of angular columns
df_ma[cols_angular] = wrap_360(
np.arctan2(
df_roll[sin_cols].mean().values,
df_roll[cos_cols].mean().values
) * 180.0 / np.pi
)
# Figure out which indices/data points belong to each window
if (return_index_mapping or calc_median_min_max_std):
df_tmp = df_ma[[]].copy().reset_index(drop=False)
df_tmp["tmp"] = 1
df_tmp = df_tmp.rolling(window_width, center=center, axis=0, on="time")["tmp"]
# Grab index of first and last time entry for each window
windows_min = list(df_tmp.apply(lambda x: x.index[0]).astype(int))
windows_max = list(df_tmp.apply(lambda x: x.index[-1]).astype(int))
# Now create a large array that contains the array of indices, with
# the values in each row corresponding to the indices upon which that
# row's moving/rolling average is based. Note that we purposely create
# a larger matrix than necessary, since some rows/windows rely on more
# data (indices) than others. This is the case e.g., at the start of
# the dataset, at the end, and when there are gaps in the data. We fill
# the remaining matrix entries with "-1".
dn = int(np.ceil(window_width/fsut.estimate_dt(df_in["time"]))) + 5
data_indices = -1 * np.ones((df_ma.shape[0], dn), dtype=int)
for ii in range(len(windows_min)):
lb = windows_min[ii]
ub = windows_max[ii]
ind = np.arange(lb, ub + 1, dtype=int)
data_indices[ii, ind - lb] = ind
# Calculate median, min, max, std if necessary
if calc_median_min_max_std:
# Append all current columns with "_mean"
df_ma.columns = ["{:s}_mean".format(c) for c in df_ma.columns]
# Add statistics for regular columns
funs = ["median", "min", "max", "std"]
cols_reg_stats = ["_".join(i) for i in product(cols_regular, funs)]
df_ma[cols_reg_stats] = df_roll[cols_regular].agg(funs).copy()
# Add statistics for angular columns
# Firstly, create matrix with indices for the mean values
data_indices_mean = np.tile(np.arange(0, df_ma.shape[0]), (dn, 1)).T
# Grab raw and mean data and format as numpy arrays
D = df_in[cols_angular].values
M = df_ma[["{:s}_mean".format(c) for c in cols_angular]].values
# Add NaN row as last row. This corresponds to the -1 indices
# that we use as placeholders. This way, those indices do not
# count towards the final statistics (median, min, max, std).
D = np.vstack([D, np.nan * np.ones(D.shape[1])])
M = np.vstack([M, np.nan * np.ones(M.shape[1])])
# Now create a 3D matrix containing all values. The three dimensions
# come from:
# > [0] one dimension containing the rolling windows,
# > [1] one with the raw data underlying each rolling window,
# > [2] one for each angular column within the dataset
values = D[data_indices, :]
values_mean = M[data_indices_mean, :]
# Center values around values_mean
values[values > (values_mean + 180.0)] += -360.0
values[values < (values_mean - 180.0)] += 360.0
# Calculate statistical properties and wrap to [0, 360)
values_median = wrap_360(np.nanmedian(values, axis=1))
values_min = wrap_360(np.nanmin(values, axis=1))
values_max = wrap_360(np.nanmax(values, axis=1))
values_std = wrap_360(np.nanstd(values, axis=1))
# Save to dataframe
df_ma[["{:s}_median".format(c) for c in cols_angular]] = values_median
df_ma[["{:s}_min".format(c) for c in cols_angular]] = values_min
df_ma[["{:s}_max".format(c) for c in cols_angular]] = values_max
df_ma[["{:s}_std".format(c) for c in cols_angular]] = values_std
if return_index_mapping:
return df_ma, data_indices
return df_ma
def df_downsample(
df_in,
cols_angular,
window_width=td(seconds=60),
min_periods=1,
center=False,
calc_median_min_max_std=False,
return_index_mapping=False,
):
# Copy and ensure dataframe is indexed by time
df = df_in.copy()
if "time" in df.columns:
df = df.set_index("time")
# Find non-angular columns
cols_regular = [c for c in df.columns if c not in cols_angular]
# Now calculate cos and sin components for angular columns
sin_cols = ["{:s}_sin".format(c) for c in cols_angular]
cos_cols = ["{:s}_cos".format(c) for c in cols_angular]
df[sin_cols] = np.sin(df[cols_angular] * np.pi / 180.0)
df[cos_cols] = np.cos(df[cols_angular] * np.pi / 180.0)
# Drop angular columns
df = df.drop(columns=cols_angular)
# Add _N for each variable to keep track of n.o. data points
cols_all = df.columns
cols_N = ["{:s}_N".format(c) for c in cols_all]
df[cols_N] = 1 - df[cols_all].isna().astype(int)
# Now calculate downsampled dataframe, automatically
# mark by label on the right (i.e., "past 10 minutes").
df_resample = df.resample(window_width, label="right", axis=0)
# First calculate mean values of non-angular columns
df_out = df_resample[cols_regular].mean().copy()
# Now add mean values of angular columns
df_out[cols_angular] = wrap_360(
np.arctan2(
df_resample[sin_cols].mean().values,
df_resample[cos_cols].mean().values
) * 180.0 / np.pi
)
# Check if we have enough samples for every measurement
if min_periods > 1:
N_counts = df_resample[cols_N].sum()
df_out[N_counts < min_periods] = None # Remove data relying on too few samples
# Figure out which indices/data points belong to each window
if (return_index_mapping or calc_median_min_max_std):
df_tmp = df[[]].copy().reset_index()
df_tmp["tmp"] = 1
df_tmp = df_tmp.resample(window_width, on="time", label="right", axis=0)["tmp"]
# Grab index of first and last time entry for each window
def get_first_index(x):
if len(x) <= 0:
return -1
else:
return x.index[0]
def get_last_index(x):
if len(x) <= 0:
return -1
else:
return x.index[-1]
windows_min = list(df_tmp.apply(get_first_index).astype(int))
windows_max = list(df_tmp.apply(get_last_index).astype(int))
# Now create a large array that contains the array of indices, with
# the values in each row corresponding to the indices upon which that
# row's moving/rolling average is based. Note that we purposely create
# a larger matrix than necessary, since some rows/windows rely on more
# data (indices) than others. This is the case e.g., at the start of
# the dataset, at the end, and when there are gaps in the data. We fill
# the remaining matrix entries with "-1".
dn = int(np.ceil(window_width/fsut.estimate_dt(df_in["time"]))) + 5
data_indices = -1 * np.ones((df_out.shape[0], dn), dtype=int)
for ii in range(len(windows_min)):
lb = windows_min[ii]
ub = windows_max[ii]
if not ((lb == -1) | (ub == -1)):
ind = np.arange(lb, ub + 1, dtype=int)
data_indices[ii, ind - lb] = ind
# Calculate median, min, max, std if necessary
if calc_median_min_max_std:
# Append all current columns with "_mean"
df_out.columns = ["{:s}_mean".format(c) for c in df_out.columns]
# Add statistics for regular columns
funs = ["median", "min", "max", "std"]
cols_reg_stats = ["_".join(i) for i in product(cols_regular, funs)]
df_out[cols_reg_stats] = df_resample[cols_regular].agg(funs).copy()
# Add statistics for angular columns
# Firstly, create matrix with indices for the mean values
data_indices_mean = np.tile(np.arange(0, df_out.shape[0]), (dn, 1)).T
# Grab raw and mean data and format as numpy arrays
D = df_in[cols_angular].values
M = df_out[["{:s}_mean".format(c) for c in cols_angular]].values
# Add NaN row as last row. This corresponds to the -1 indices
# that we use as placeholders. This way, those indices do not
# count towards the final statistics (median, min, max, std).
D = np.vstack([D, np.nan * np.ones(D.shape[1])])
M = np.vstack([M, np.nan * np.ones(M.shape[1])])
# Now create a 3D matrix containing all values. The three dimensions
# come from:
# > [0] one dimension containing the rolling windows,
# > [1] one with the raw data underlying each rolling window,
# > [2] one for each angular column within the dataset
values = D[data_indices, :]
values_mean = M[data_indices_mean, :]
# Center values around values_mean
values[values > (values_mean + 180.0)] += -360.0
values[values < (values_mean - 180.0)] += 360.0
# Calculate statistical properties and wrap to [0, 360)
values_median = wrap_360(np.nanmedian(values, axis=1))
values_min = wrap_360(np.nanmin(values, axis=1))
values_max = wrap_360(np.nanmax(values, axis=1))
values_std = wrap_360(np.nanstd(values, axis=1))
# Save to dataframe
df_out[["{:s}_median".format(c) for c in cols_angular]] = values_median
df_out[["{:s}_min".format(c) for c in cols_angular]] = values_min
df_out[["{:s}_max".format(c) for c in cols_angular]] = values_max
df_out[["{:s}_std".format(c) for c in cols_angular]] = values_std
if center:
# Shift time column towards center of the bin
df_out.index = df_out.index - window_width / 2.0
if return_index_mapping:
return df_out, data_indices
return df_out
def df_resample_by_interpolation(
df,
time_array,
circular_cols,
interp_method='linear',
max_gap=None,
verbose=True
):
# Copy with properties but no actual data
df_res = df.head(0).copy()
# Remove timezones, if any
df = df.copy()
time_array = [
|
pd.to_datetime(t)
|
pandas.to_datetime
|
from __future__ import division
from contextlib import contextmanager
from datetime import datetime
from functools import wraps
import locale
import os
import re
from shutil import rmtree
import string
import subprocess
import sys
import tempfile
import traceback
import warnings
import numpy as np
from numpy.random import rand, randn
from pandas._libs import testing as _testing
import pandas.compat as compat
from pandas.compat import (
PY2, PY3, Counter, StringIO, callable, filter, httplib, lmap, lrange, lzip,
map, raise_with_traceback, range, string_types, u, unichr, zip)
from pandas.core.dtypes.common import (
is_bool, is_categorical_dtype, is_datetime64_dtype, is_datetime64tz_dtype,
is_datetimelike_v_numeric, is_datetimelike_v_object,
is_extension_array_dtype, is_interval_dtype, is_list_like, is_number,
is_period_dtype, is_sequence, is_timedelta64_dtype, needs_i8_conversion)
from pandas.core.dtypes.missing import array_equivalent
import pandas as pd
from pandas import (
Categorical, CategoricalIndex, DataFrame, DatetimeIndex, Index,
IntervalIndex, MultiIndex, Panel, PeriodIndex, RangeIndex, Series,
bdate_range)
from pandas.core.algorithms import take_1d
from pandas.core.arrays import (
DatetimeArrayMixin as DatetimeArray, ExtensionArray, IntervalArray,
PeriodArray, TimedeltaArrayMixin as TimedeltaArray, period_array)
import pandas.core.common as com
from pandas.io.common import urlopen
from pandas.io.formats.printing import pprint_thing
N = 30
K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, compat.ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None')
if 'deprecate' in testing_mode:
warnings.simplefilter('always', _testing_mode_warnings)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None')
if 'deprecate' in testing_mode:
warnings.simplefilter('ignore', _testing_mode_warnings)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option('^display.', silent=True)
def round_trip_pickle(obj, path=None):
"""
Pickle an object and then read it again.
Parameters
----------
obj : pandas object
The object to pickle and then re-read.
path : str, default None
The path where the pickled object is written and then read.
Returns
-------
round_trip_pickled_object : pandas object
The original object that was pickled and then re-read.
"""
if path is None:
path = u('__{random_bytes}__.pickle'.format(random_bytes=rands(10)))
with ensure_clean(path) as path:
pd.to_pickle(obj, path)
return pd.read_pickle(path)
def round_trip_pathlib(writer, reader, path=None):
"""
Write an object to file specified by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
round_trip_object : pandas object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip('pathlib').Path
if path is None:
path = '___pathlib___'
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def round_trip_localpath(writer, reader, path=None):
"""
Write an object to file specified by a py.path LocalPath and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
round_trip_object : pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip('py.path').local
if path is None:
path = '___localpath___'
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
@contextmanager
def decompress_file(path, compression):
"""
Open a compressed file and return a file object
Parameters
----------
path : str
The path where the file is read from
compression : {'gzip', 'bz2', 'zip', 'xz', None}
Name of the decompression to use
Returns
-------
f : file object
"""
if compression is None:
f = open(path, 'rb')
elif compression == 'gzip':
import gzip
f = gzip.open(path, 'rb')
elif compression == 'bz2':
import bz2
f = bz2.BZ2File(path, 'rb')
elif compression == 'xz':
lzma = compat.import_lzma()
f = lzma.LZMAFile(path, 'rb')
elif compression == 'zip':
import zipfile
zip_file = zipfile.ZipFile(path)
zip_names = zip_file.namelist()
if len(zip_names) == 1:
f = zip_file.open(zip_names.pop())
else:
raise ValueError('ZIP file {} error. Only one file per ZIP.'
.format(path))
else:
msg = 'Unrecognized compression type: {}'.format(compression)
raise ValueError(msg)
try:
yield f
finally:
f.close()
if compression == "zip":
zip_file.close()
def assert_almost_equal(left, right, check_dtype="equiv",
check_less_precise=False, **kwargs):
"""
Check that the left and right objects are approximately equal.
By approximately equal, we refer to objects that are numbers or that
contain numbers which may be equivalent to specific levels of precision.
Parameters
----------
left : object
right : object
check_dtype : bool / string {'equiv'}, default 'equiv'
Check dtype if both a and b are the same type. If 'equiv' is passed in,
then `RangeIndex` and `Int64Index` are also considered equivalent
when doing type checking.
check_less_precise : bool or int, default False
Specify comparison precision. 5 digits (False) or 3 digits (True)
after decimal points are compared. If int, then specify the number
of digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
"""
if isinstance(left, pd.Index):
return assert_index_equal(left, right,
check_exact=False,
exact=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
elif isinstance(left, pd.Series):
return assert_series_equal(left, right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
elif isinstance(left, pd.DataFrame):
return assert_frame_equal(left, right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
else:
# Other sequences.
if check_dtype:
if is_number(left) and is_number(right):
# Do not compare numeric classes, like np.float64 and float.
pass
elif is_bool(left) and is_bool(right):
# Do not compare bool classes, like np.bool_ and bool.
pass
else:
if (isinstance(left, np.ndarray) or
isinstance(right, np.ndarray)):
obj = "numpy array"
else:
obj = "Input"
assert_class_equal(left, right, obj=obj)
return _testing.assert_almost_equal(
left, right,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
err_msg = "{name} Expected type {exp_type}, found {act_type} instead"
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(err_msg.format(name=cls_name, exp_type=cls,
act_type=type(left)))
if not isinstance(right, cls):
raise AssertionError(err_msg.format(name=cls_name, exp_type=cls,
act_type=type(right)))
def assert_dict_equal(left, right, compare_keys=True):
_check_isinstance(left, right, dict)
return _testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def randbool(size=(), p=0.5):
return rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits),
dtype=(np.str_, 1))
RANDU_CHARS = np.array(list(u("").join(map(unichr, lrange(1488, 1488 + 26))) +
string.digits), dtype=(np.unicode_, 1))
def rands_array(nchars, size, dtype='O'):
"""Generate an array of byte strings."""
retval = (np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.astype(dtype)
def randu_array(nchars, size, dtype='O'):
"""Generate an array of unicode strings."""
retval = (np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return ''.join(np.random.choice(RANDS_CHARS, nchars))
def randu(nchars):
"""
Generate one random unicode string.
See `randu_array` if you want to create an array of random unicode strings.
"""
return ''.join(np.random.choice(RANDU_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import get_fignums, close as _close
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
# -----------------------------------------------------------------------------
# locale utilities
def check_output(*popenargs, **kwargs):
# shamelessly taken from Python 2.7 source
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, stderr=subprocess.PIPE,
*popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
def _default_locale_getter():
try:
raw_locales = check_output(['locale -a'], shell=True)
except subprocess.CalledProcessError as e:
raise type(e)("{exception}, the 'locale -a' command cannot be found "
"on your system".format(exception=e))
return raw_locales
def get_locales(prefix=None, normalize=True,
locale_getter=_default_locale_getter):
"""Get all the locales that are available on the system.
Parameters
----------
prefix : str
If not ``None`` then return only those locales with the prefix
provided. For example to get all English language locales (those that
start with ``"en"``), pass ``prefix="en"``.
normalize : bool
Call ``locale.normalize`` on the resulting list of available locales.
If ``True``, only locales that can be set without throwing an
``Exception`` are returned.
locale_getter : callable
The function to use to retrieve the current locales. This should return
a string with each locale separated by a newline character.
Returns
-------
locales : list of strings
A list of locale strings that can be set with ``locale.setlocale()``.
For example::
locale.setlocale(locale.LC_ALL, locale_string)
On error will return None (no locale available, e.g. Windows)
"""
try:
raw_locales = locale_getter()
except Exception:
return None
try:
# raw_locales is "\n" separated list of locales
# it may contain non-decodable parts, so split
# extract what we can and then rejoin.
raw_locales = raw_locales.split(b'\n')
out_locales = []
for x in raw_locales:
if PY3:
out_locales.append(str(
x, encoding=pd.options.display.encoding))
else:
out_locales.append(str(x))
except TypeError:
pass
if prefix is None:
return _valid_locales(out_locales, normalize)
pattern = re.compile('{prefix}.*'.format(prefix=prefix))
found = pattern.findall('\n'.join(out_locales))
return _valid_locales(found, normalize)
@contextmanager
def set_locale(new_locale, lc_var=locale.LC_ALL):
"""Context manager for temporarily setting a locale.
Parameters
----------
new_locale : str or tuple
A string of the form <language_country>.<encoding>. For example to set
the current locale to US English with a UTF8 encoding, you would pass
"en_US.UTF-8".
lc_var : int, default `locale.LC_ALL`
The category of the locale being set.
Notes
-----
This is useful when you want to run a particular block of code under a
particular locale, without globally setting the locale. This probably isn't
thread-safe.
"""
current_locale = locale.getlocale()
try:
locale.setlocale(lc_var, new_locale)
normalized_locale = locale.getlocale()
if com._all_not_none(*normalized_locale):
yield '.'.join(normalized_locale)
else:
yield new_locale
finally:
locale.setlocale(lc_var, current_locale)
def can_set_locale(lc, lc_var=locale.LC_ALL):
"""
Check to see if we can set a locale, and subsequently get the locale,
without raising an Exception.
Parameters
----------
lc : str
The locale to attempt to set.
lc_var : int, default `locale.LC_ALL`
The category of the locale being set.
Returns
-------
is_valid : bool
Whether the passed locale can be set
"""
try:
with set_locale(lc, lc_var=lc_var):
pass
except (ValueError,
locale.Error): # horrible name for a Exception subclass
return False
else:
return True
def _valid_locales(locales, normalize):
"""Return a list of normalized locales that do not throw an ``Exception``
when set.
Parameters
----------
locales : str
A string where each locale is separated by a newline.
normalize : bool
Whether to call ``locale.normalize`` on each locale.
Returns
-------
valid_locales : list
A list of valid locales.
"""
if normalize:
normalizer = lambda x: locale.normalize(x.strip())
else:
normalizer = lambda x: x.strip()
return list(filter(can_set_locale, map(normalizer, locales)))
# -----------------------------------------------------------------------------
# Stdout / stderr decorators
@contextmanager
def set_defaultencoding(encoding):
"""
Set default encoding (as given by sys.getdefaultencoding()) to the given
encoding; restore on exit.
Parameters
----------
encoding : str
"""
if not PY2:
raise ValueError("set_defaultencoding context is only available "
"in Python 2.")
orig = sys.getdefaultencoding()
reload(sys) # noqa:F821
sys.setdefaultencoding(encoding)
try:
yield
finally:
sys.setdefaultencoding(orig)
def capture_stdout(f):
r"""
Decorator to capture stdout in a buffer so that it can be checked
(or suppressed) during testing.
Parameters
----------
f : callable
The test that is capturing stdout.
Returns
-------
f : callable
The decorated test ``f``, which captures stdout.
Examples
--------
>>> from pandas.util.testing import capture_stdout
>>> import sys
>>>
>>> @capture_stdout
... def test_print_pass():
... print("foo")
... out = sys.stdout.getvalue()
... assert out == "foo\n"
>>>
>>> @capture_stdout
... def test_print_fail():
... print("foo")
... out = sys.stdout.getvalue()
... assert out == "bar\n"
...
AssertionError: assert 'foo\n' == 'bar\n'
"""
@compat.wraps(f)
def wrapper(*args, **kwargs):
try:
sys.stdout = StringIO()
f(*args, **kwargs)
finally:
sys.stdout = sys.__stdout__
return wrapper
def capture_stderr(f):
r"""
Decorator to capture stderr in a buffer so that it can be checked
(or suppressed) during testing.
Parameters
----------
f : callable
The test that is capturing stderr.
Returns
-------
f : callable
The decorated test ``f``, which captures stderr.
Examples
--------
>>> from pandas.util.testing import capture_stderr
>>> import sys
>>>
>>> @capture_stderr
... def test_stderr_pass():
... sys.stderr.write("foo")
... out = sys.stderr.getvalue()
... assert out == "foo\n"
>>>
>>> @capture_stderr
... def test_stderr_fail():
... sys.stderr.write("foo")
... out = sys.stderr.getvalue()
... assert out == "bar\n"
...
AssertionError: assert 'foo\n' == 'bar\n'
"""
@compat.wraps(f)
def wrapper(*args, **kwargs):
try:
sys.stderr = StringIO()
f(*args, **kwargs)
finally:
sys.stderr = sys.__stderr__
return wrapper
# -----------------------------------------------------------------------------
# Console debugging tools
def debug(f, *args, **kwargs):
from pdb import Pdb as OldPdb
try:
from IPython.core.debugger import Pdb
kw = dict(color_scheme='Linux')
except ImportError:
Pdb = OldPdb
kw = {}
pdb = Pdb(**kw)
return pdb.runcall(f, *args, **kwargs)
def pudebug(f, *args, **kwargs):
import pudb
return pudb.runcall(f, *args, **kwargs)
def set_trace():
from IPython.core.debugger import Pdb
try:
Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)
except Exception:
from pdb import Pdb as OldPdb
OldPdb().set_trace(sys._getframe().f_back)
# -----------------------------------------------------------------------------
# contextmanager to ensure the file cleanup
@contextmanager
def ensure_clean(filename=None, return_filelike=False):
"""Gets a temporary path and agrees to remove on close.
Parameters
----------
filename : str (optional)
if None, creates a temporary file which is then removed when out of
scope. if passed, creates temporary file with filename as ending.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
"""
filename = filename or ''
fd = None
if return_filelike:
f = tempfile.TemporaryFile(suffix=filename)
try:
yield f
finally:
f.close()
else:
# don't generate tempfile if using a path with directory specified
if len(os.path.dirname(filename)):
raise ValueError("Can't pass a qualified name to ensure_clean()")
try:
fd, filename = tempfile.mkstemp(suffix=filename)
except UnicodeEncodeError:
import pytest
pytest.skip('no unicode file names on this system')
try:
yield filename
finally:
try:
os.close(fd)
except Exception:
print("Couldn't close file descriptor: {fdesc} (file: {fname})"
.format(fdesc=fd, fname=filename))
try:
if os.path.exists(filename):
os.remove(filename)
except Exception as e:
print("Exception on removing file: {error}".format(error=e))
@contextmanager
def ensure_clean_dir():
"""
Get a temporary directory path and agrees to remove on close.
Yields
------
Temporary directory path
"""
directory_name = tempfile.mkdtemp(suffix='')
try:
yield directory_name
finally:
try:
rmtree(directory_name)
except Exception:
pass
@contextmanager
def ensure_safe_environment_variables():
"""
Get a context manager to safely set environment variables
All changes will be undone on close, hence environment variables set
within this contextmanager will neither persist nor change global state.
"""
saved_environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2):
"""Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def assert_index_equal(left, right, exact='equiv', check_names=True,
check_less_precise=False, check_exact=True,
check_categorical=True, obj='Index'):
"""Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
def _check_types(l, r, obj='Index'):
if exact:
assert_class_equal(l, r, exact=exact, obj=obj)
# Skip exact dtype checking when `check_categorical` is False
if check_categorical:
assert_attr_equal('dtype', l, r, obj=obj)
# allow string-like to have different inferred_types
if l.inferred_type in ('string', 'unicode'):
assert r.inferred_type in ('string', 'unicode')
else:
assert_attr_equal('inferred_type', l, r, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
labels = index.codes[level]
filled = take_1d(unique.values, labels, fill_value=unique._na_value)
values = unique._shallow_copy(filled, name=index.names[level])
return values
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
msg1 = '{obj} levels are different'.format(obj=obj)
msg2 = '{nlevels}, {left}'.format(nlevels=left.nlevels, left=left)
msg3 = '{nlevels}, {right}'.format(nlevels=right.nlevels, right=right)
raise_assert_detail(obj, msg1, msg2, msg3)
# length comparison
if len(left) != len(right):
msg1 = '{obj} length are different'.format(obj=obj)
msg2 = '{length}, {left}'.format(length=len(left), left=left)
msg3 = '{length}, {right}'.format(length=len(right), right=right)
raise_assert_detail(obj, msg1, msg2, msg3)
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = 'MultiIndex level [{level}]'.format(level=level)
assert_index_equal(llevel, rlevel,
exact=exact, check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact, obj=lobj)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
# skip exact index checking when `check_categorical` is False
if check_exact and check_categorical:
if not left.equals(right):
diff = np.sum((left.values != right.values)
.astype(int)) * 100.0 / len(left)
msg = '{obj} values are different ({pct} %)'.format(
obj=obj, pct=np.round(diff, 5))
raise_assert_detail(obj, msg, left, right)
else:
_testing.assert_almost_equal(left.values, right.values,
check_less_precise=check_less_precise,
check_dtype=exact,
obj=obj, lobj=left, robj=right)
# metadata comparison
if check_names:
assert_attr_equal('names', left, right, obj=obj)
if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):
assert_attr_equal('freq', left, right, obj=obj)
if (isinstance(left, pd.IntervalIndex) or
isinstance(right, pd.IntervalIndex)):
assert_interval_array_equal(left.values, right.values)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(left.values, right.values,
obj='{obj} category'.format(obj=obj))
def assert_class_equal(left, right, exact=True, obj='Input'):
"""checks classes are equal."""
__tracebackhide__ = True
def repr_class(x):
if isinstance(x, Index):
# return Index as it is to include values in the error message
return x
try:
return x.__class__.__name__
except AttributeError:
return repr(type(x))
if exact == 'equiv':
if type(left) != type(right):
# allow equivalence of Int64Index/RangeIndex
types = {type(left).__name__, type(right).__name__}
if len(types - {'Int64Index', 'RangeIndex'}):
msg = '{obj} classes are not equivalent'.format(obj=obj)
raise_assert_detail(obj, msg, repr_class(left),
repr_class(right))
elif exact:
if type(left) != type(right):
msg = '{obj} classes are different'.format(obj=obj)
raise_assert_detail(obj, msg, repr_class(left),
repr_class(right))
def assert_attr_equal(attr, left, right, obj='Attributes'):
"""checks attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif (is_number(left_attr) and np.isnan(left_attr) and
is_number(right_attr) and np.isnan(right_attr)):
# np.nan
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if not isinstance(result, bool):
result = result.all()
if result:
return True
else:
msg = 'Attribute "{attr}" are different'.format(attr=attr)
raise_assert_detail(obj, msg, left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (pd.Series, np.ndarray)):
for el in objs.ravel():
msg = ("one of 'objs' is not a matplotlib Axes instance, type "
"encountered {name!r}").format(name=el.__class__.__name__)
assert isinstance(el, (plt.Axes, dict)), msg
else:
assert isinstance(objs, (plt.Artist, tuple, dict)), (
'objs is neither an ndarray of Artist instances nor a '
'single Artist instance, tuple, or dict, "objs" is a {name!r}'
.format(name=objs.__class__.__name__))
def isiterable(obj):
return hasattr(obj, '__iter__')
def is_sorted(seq):
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
return assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(left, right, check_dtype=True,
check_category_order=True, obj='Categorical'):
"""Test that Categoricals are equivalent.
Parameters
----------
left : Categorical
right : Categorical
check_dtype : bool, default True
Check that integer dtype of the codes are the same
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, Categorical)
if check_category_order:
assert_index_equal(left.categories, right.categories,
obj='{obj}.categories'.format(obj=obj))
assert_numpy_array_equal(left.codes, right.codes,
check_dtype=check_dtype,
obj='{obj}.codes'.format(obj=obj))
else:
assert_index_equal(left.categories.sort_values(),
right.categories.sort_values(),
obj='{obj}.categories'.format(obj=obj))
assert_index_equal(left.categories.take(left.codes),
right.categories.take(right.codes),
obj='{obj}.values'.format(obj=obj))
assert_attr_equal('ordered', left, right, obj=obj)
def assert_interval_array_equal(left, right, exact='equiv',
obj='IntervalArray'):
"""Test that two IntervalArrays are equivalent.
Parameters
----------
left, right : IntervalArray
The IntervalArrays to compare.
exact : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
obj : str, default 'IntervalArray'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, IntervalArray)
assert_index_equal(left.left, right.left, exact=exact,
obj='{obj}.left'.format(obj=obj))
assert_index_equal(left.right, right.right, exact=exact,
obj='{obj}.left'.format(obj=obj))
assert_attr_equal('closed', left, right, obj=obj)
def assert_period_array_equal(left, right, obj='PeriodArray'):
_check_isinstance(left, right, PeriodArray)
assert_numpy_array_equal(left._data, right._data,
obj='{obj}.values'.format(obj=obj))
assert_attr_equal('freq', left, right, obj=obj)
def assert_datetime_array_equal(left, right, obj='DatetimeArray'):
__tracebackhide__ = True
_check_isinstance(left, right, DatetimeArray)
assert_numpy_array_equal(left._data, right._data,
obj='{obj}._data'.format(obj=obj))
assert_attr_equal('freq', left, right, obj=obj)
assert_attr_equal('tz', left, right, obj=obj)
def assert_timedelta_array_equal(left, right, obj='TimedeltaArray'):
__tracebackhide__ = True
_check_isinstance(left, right, TimedeltaArray)
assert_numpy_array_equal(left._data, right._data,
obj='{obj}._data'.format(obj=obj))
assert_attr_equal('freq', left, right, obj=obj)
def raise_assert_detail(obj, message, left, right, diff=None):
__tracebackhide__ = True
if isinstance(left, np.ndarray):
left = pprint_thing(left)
elif is_categorical_dtype(left):
left = repr(left)
if PY2 and isinstance(left, string_types):
# left needs to be printable in native text type in python2
left = left.encode('utf-8')
if isinstance(right, np.ndarray):
right = pprint_thing(right)
elif is_categorical_dtype(right):
right = repr(right)
if PY2 and isinstance(right, string_types):
# right needs to be printable in native text type in python2
right = right.encode('utf-8')
msg = """{obj} are different
{message}
[left]: {left}
[right]: {right}""".format(obj=obj, message=message, left=left, right=right)
if diff is not None:
msg += "\n[diff]: {diff}".format(diff=diff)
raise AssertionError(msg)
def assert_numpy_array_equal(left, right, strict_nan=False,
check_dtype=True, err_msg=None,
check_same=None, obj='numpy array'):
""" Checks that 'np.ndarray' is equivalent
Parameters
----------
left : np.ndarray or iterable
right : np.ndarray or iterable
strict_nan : bool, default False
If True, consider NaN and None to be different.
check_dtype: bool, default True
check dtype if both a and b are np.ndarray
err_msg : str, default None
If provided, used as assertion message
check_same : None|'copy'|'same', default None
Ensure left and right refer/do not refer to the same memory area
obj : str, default 'numpy array'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
# instance validation
# Show a detailed error message when classes are different
assert_class_equal(left, right, obj=obj)
# both classes must be an np.ndarray
_check_isinstance(left, right, np.ndarray)
def _get_base(obj):
return obj.base if getattr(obj, 'base', None) is not None else obj
left_base = _get_base(left)
right_base = _get_base(right)
if check_same == 'same':
if left_base is not right_base:
msg = "{left!r} is not {right!r}".format(
left=left_base, right=right_base)
raise AssertionError(msg)
elif check_same == 'copy':
if left_base is right_base:
msg = "{left!r} is {right!r}".format(
left=left_base, right=right_base)
raise AssertionError(msg)
def _raise(left, right, err_msg):
if err_msg is None:
if left.shape != right.shape:
raise_assert_detail(obj, '{obj} shapes are different'
.format(obj=obj), left.shape, right.shape)
diff = 0
for l, r in zip(left, right):
# count up differences
if not array_equivalent(l, r, strict_nan=strict_nan):
diff += 1
diff = diff * 100.0 / left.size
msg = '{obj} values are different ({pct} %)'.format(
obj=obj, pct=np.round(diff, 5))
raise_assert_detail(obj, msg, left, right)
raise AssertionError(err_msg)
# compare shape and values
if not array_equivalent(left, right, strict_nan=strict_nan):
_raise(left, right, err_msg)
if check_dtype:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
assert_attr_equal('dtype', left, right, obj=obj)
return True
def assert_extension_array_equal(left, right, check_dtype=True,
check_less_precise=False,
check_exact=False):
"""Check that left and right ExtensionArrays are equal.
Parameters
----------
left, right : ExtensionArray
The two arrays to compare
check_dtype : bool, default True
Whether to check if the ExtensionArray dtypes are identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
check_exact : bool, default False
Whether to compare number exactly.
Notes
-----
Missing values are checked separately from valid values.
A mask of missing values is computed for each and checked to match.
The remaining all-valid values are cast to object dtype and checked.
"""
assert isinstance(left, ExtensionArray), 'left is not an ExtensionArray'
assert isinstance(right, ExtensionArray), 'right is not an ExtensionArray'
if check_dtype:
assert_attr_equal('dtype', left, right, obj='ExtensionArray')
left_na = np.asarray(left.isna())
right_na = np.asarray(right.isna())
assert_numpy_array_equal(left_na, right_na, obj='ExtensionArray NA mask')
left_valid = np.asarray(left[~left_na].astype(object))
right_valid = np.asarray(right[~right_na].astype(object))
if check_exact:
assert_numpy_array_equal(left_valid, right_valid, obj='ExtensionArray')
else:
_testing.assert_almost_equal(left_valid, right_valid,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
obj='ExtensionArray')
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(left, right, check_dtype=True,
check_index_type='equiv',
check_series_type=True,
check_less_precise=False,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
obj='Series'):
"""Check that left and right Series are equal.
Parameters
----------
left : Series
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_series_type : bool, default True
Whether to check the Series class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
# instance validation
_check_isinstance(left, right, Series)
if check_series_type:
# ToDo: There are some tests using rhs is sparse
# lhs is dense. Should use assert_class_equal in future
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# length comparison
if len(left) != len(right):
msg1 = '{len}, {left}'.format(len=len(left), left=left.index)
msg2 = '{len}, {right}'.format(len=len(right), right=right.index)
raise_assert_detail(obj, 'Series length are different', msg1, msg2)
# index comparison
assert_index_equal(left.index, right.index, exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj='{obj}.index'.format(obj=obj))
if check_dtype:
# We want to skip exact dtype checking when `check_categorical`
# is False. We'll still raise if only one is a `Categorical`,
# regardless of `check_categorical`
if (is_categorical_dtype(left) and is_categorical_dtype(right) and
not check_categorical):
pass
else:
assert_attr_equal('dtype', left, right)
if check_exact:
assert_numpy_array_equal(left.get_values(), right.get_values(),
check_dtype=check_dtype,
obj='{obj}'.format(obj=obj),)
elif check_datetimelike_compat:
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check
# the values in that case
if (is_datetimelike_v_numeric(left, right) or
is_datetimelike_v_object(left, right) or
needs_i8_conversion(left) or
needs_i8_conversion(right)):
# datetimelike may have different objects (e.g. datetime.datetime
# vs Timestamp) but will compare equal
if not Index(left.values).equals(Index(right.values)):
msg = ('[datetimelike_compat=True] {left} is not equal to '
'{right}.').format(left=left.values, right=right.values)
raise AssertionError(msg)
else:
assert_numpy_array_equal(left.get_values(), right.get_values(),
check_dtype=check_dtype)
elif is_interval_dtype(left) or is_interval_dtype(right):
assert_interval_array_equal(left.array, right.array)
elif (is_extension_array_dtype(left) and not is_categorical_dtype(left) and
is_extension_array_dtype(right) and not is_categorical_dtype(right)):
return assert_extension_array_equal(left.array, right.array)
else:
_testing.assert_almost_equal(left.get_values(), right.get_values(),
check_less_precise=check_less_precise,
check_dtype=check_dtype,
obj='{obj}'.format(obj=obj))
# metadata comparison
if check_names:
assert_attr_equal('name', left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(left.values, right.values,
obj='{obj} category'.format(obj=obj))
# This could be refactored to use the NDFrame.equals method
def assert_frame_equal(left, right, check_dtype=True,
check_index_type='equiv',
check_column_type='equiv',
check_frame_type=True,
check_less_precise=False,
check_names=True,
by_blocks=False,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_like=False,
obj='DataFrame'):
"""
Check that left and right DataFrame are equal.
This function is intended to compare two DataFrames and output any
differences. Is is mostly intended for use in unit tests.
Additional parameters allow varying the strictness of the
equality checks performed.
Parameters
----------
left : DataFrame
First DataFrame to compare.
right : DataFrame
Second DataFrame to compare.
check_dtype : bool, default True
Whether to check the DataFrame dtype is identical.
check_index_type : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_column_type : bool / string {'equiv'}, default 'equiv'
Whether to check the columns class, dtype and inferred_type
are identical. Is passed as the ``exact`` argument of
:func:`assert_index_equal`.
check_frame_type : bool, default True
Whether to check the DataFrame class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
check_names : bool, default True
Whether to check that the `names` attribute for both the `index`
and `column` attributes of the DataFrame is identical, i.e.
* left.index.names == right.index.names
* left.columns.names == right.columns.names
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_like : bool, default False
If True, ignore the order of index & columns.
Note: index labels must match their respective rows
(same as in columns) - same labels must be with the same data.
obj : str, default 'DataFrame'
Specify object name being compared, internally used to show appropriate
assertion message.
See Also
--------
assert_series_equal : Equivalent method for asserting Series equality.
DataFrame.equals : Check DataFrame equality.
Examples
--------
This example shows comparing two DataFrames that are equal
but with columns of differing dtypes.
>>> from pandas.util.testing import assert_frame_equal
>>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
>>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]})
df1 equals itself.
>>> assert_frame_equal(df1, df1)
df1 differs from df2 as column 'b' is of a different type.
>>> assert_frame_equal(df1, df2)
Traceback (most recent call last):
AssertionError: Attributes are different
Attribute "dtype" are different
[left]: int64
[right]: float64
Ignore differing dtypes in columns with check_dtype.
>>> assert_frame_equal(df1, df2, check_dtype=False)
"""
__tracebackhide__ = True
# instance validation
_check_isinstance(left, right, DataFrame)
if check_frame_type:
# ToDo: There are some tests using rhs is SparseDataFrame
# lhs is DataFrame. Should use assert_class_equal in future
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# shape comparison
if left.shape != right.shape:
raise_assert_detail(obj,
'DataFrame shape mismatch',
'{shape!r}'.format(shape=left.shape),
'{shape!r}'.format(shape=right.shape))
if check_like:
left, right = left.reindex_like(right), right
# index comparison
assert_index_equal(left.index, right.index, exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj='{obj}.index'.format(obj=obj))
# column comparison
assert_index_equal(left.columns, right.columns, exact=check_column_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj='{obj}.columns'.format(obj=obj))
# compare by blocks
if by_blocks:
rblocks = right._to_dict_of_blocks()
lblocks = left._to_dict_of_blocks()
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
assert_frame_equal(lblocks[dtype], rblocks[dtype],
check_dtype=check_dtype, obj='DataFrame.blocks')
# compare by columns
else:
for i, col in enumerate(left.columns):
assert col in right
lcol = left.iloc[:, i]
rcol = right.iloc[:, i]
assert_series_equal(
lcol, rcol, check_dtype=check_dtype,
check_index_type=check_index_type,
check_less_precise=check_less_precise,
check_exact=check_exact, check_names=check_names,
check_datetimelike_compat=check_datetimelike_compat,
check_categorical=check_categorical,
obj='DataFrame.iloc[:, {idx}]'.format(idx=i))
def assert_panel_equal(left, right,
check_dtype=True,
check_panel_type=False,
check_less_precise=False,
check_names=False,
by_blocks=False,
obj='Panel'):
"""Check that left and right Panels are equal.
Parameters
----------
left : Panel (or nd)
right : Panel (or nd)
check_dtype : bool, default True
Whether to check the Panel dtype is identical.
check_panel_type : bool, default False
Whether to check the Panel class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
check_names : bool, default True
Whether to check the Index names attribute.
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
obj : str, default 'Panel'
Specify the object name being compared, internally used to show
the appropriate assertion message.
"""
if check_panel_type:
assert_class_equal(left, right, obj=obj)
for axis in left._AXIS_ORDERS:
left_ind = getattr(left, axis)
right_ind = getattr(right, axis)
assert_index_equal(left_ind, right_ind, check_names=check_names)
if by_blocks:
rblocks = right._to_dict_of_blocks()
lblocks = left._to_dict_of_blocks()
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
array_equivalent(lblocks[dtype].values, rblocks[dtype].values)
else:
# can potentially be slow
for i, item in enumerate(left._get_axis(0)):
msg = "non-matching item (right) '{item}'".format(item=item)
assert item in right, msg
litem = left.iloc[i]
ritem = right.iloc[i]
assert_frame_equal(litem, ritem,
check_less_precise=check_less_precise,
check_names=check_names)
for i, item in enumerate(right._get_axis(0)):
msg = "non-matching item (left) '{item}'".format(item=item)
assert item in left, msg
def assert_equal(left, right, **kwargs):
"""
Wrapper for tm.assert_*_equal to dispatch to the appropriate test function.
Parameters
----------
left : Index, Series, DataFrame, ExtensionArray, or np.ndarray
right : Index, Series, DataFrame, ExtensionArray, or np.ndarray
**kwargs
"""
__tracebackhide__ = True
if isinstance(left, pd.Index):
assert_index_equal(left, right, **kwargs)
elif isinstance(left, pd.Series):
assert_series_equal(left, right, **kwargs)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(left, right, **kwargs)
elif isinstance(left, IntervalArray):
assert_interval_array_equal(left, right, **kwargs)
elif isinstance(left, PeriodArray):
assert_period_array_equal(left, right, **kwargs)
elif isinstance(left, DatetimeArray):
assert_datetime_array_equal(left, right, **kwargs)
elif isinstance(left, TimedeltaArray):
assert_timedelta_array_equal(left, right, **kwargs)
elif isinstance(left, ExtensionArray):
assert_extension_array_equal(left, right, **kwargs)
elif isinstance(left, np.ndarray):
assert_numpy_array_equal(left, right, **kwargs)
else:
raise NotImplementedError(type(left))
def box_expected(expected, box_cls, transpose=True):
"""
Helper function to wrap the expected output of a test in a given box_class.
Parameters
----------
expected : np.ndarray, Index, Series
box_cls : {Index, Series, DataFrame}
Returns
-------
subclass of box_cls
"""
if box_cls is pd.Index:
expected = pd.Index(expected)
elif box_cls is pd.Series:
expected = pd.Series(expected)
elif box_cls is pd.DataFrame:
expected = pd.Series(expected).to_frame()
if transpose:
# for vector operations, we we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length.
expected = expected.T
elif box_cls is PeriodArray:
# the PeriodArray constructor is not as flexible as period_array
expected = period_array(expected)
elif box_cls is DatetimeArray:
expected = DatetimeArray(expected)
elif box_cls is TimedeltaArray:
expected = TimedeltaArray(expected)
elif box_cls is np.ndarray:
expected = np.array(expected)
elif box_cls is to_array:
expected = to_array(expected)
else:
raise NotImplementedError(box_cls)
return expected
def to_array(obj):
# temporary implementation until we get pd.array in place
if is_period_dtype(obj):
return period_array(obj)
elif is_datetime64_dtype(obj) or is_datetime64tz_dtype(obj):
return DatetimeArray._from_sequence(obj)
elif is_timedelta64_dtype(obj):
return TimedeltaArray._from_sequence(obj)
else:
return np.array(obj)
# -----------------------------------------------------------------------------
# Sparse
def assert_sp_array_equal(left, right, check_dtype=True, check_kind=True,
check_fill_value=True,
consolidate_block_indices=False):
"""Check that the left and right SparseArray are equal.
Parameters
----------
left : SparseArray
right : SparseArray
check_dtype : bool, default True
Whether to check the data dtype is identical.
check_kind : bool, default True
Whether to just the kind of the sparse index for each column.
check_fill_value : bool, default True
Whether to check that left.fill_value matches right.fill_value
consolidate_block_indices : bool, default False
Whether to consolidate contiguous blocks for sparse arrays with
a BlockIndex. Some operations, e.g. concat, will end up with
block indices that could be consolidated. Setting this to true will
create a new BlockIndex for that array, with consolidated
block indices.
"""
_check_isinstance(left, right, pd.SparseArray)
assert_numpy_array_equal(left.sp_values, right.sp_values,
check_dtype=check_dtype)
# SparseIndex comparison
assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex)
assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex)
if not check_kind:
left_index = left.sp_index.to_block_index()
right_index = right.sp_index.to_block_index()
else:
left_index = left.sp_index
right_index = right.sp_index
if consolidate_block_indices and left.kind == 'block':
# we'll probably remove this hack...
left_index = left_index.to_int_index().to_block_index()
right_index = right_index.to_int_index().to_block_index()
if not left_index.equals(right_index):
raise_assert_detail('SparseArray.index', 'index are not equal',
left_index, right_index)
else:
# Just ensure a
pass
if check_fill_value:
assert_attr_equal('fill_value', left, right)
if check_dtype:
assert_attr_equal('dtype', left, right)
assert_numpy_array_equal(left.values, right.values,
check_dtype=check_dtype)
def assert_sp_series_equal(left, right, check_dtype=True, exact_indices=True,
check_series_type=True, check_names=True,
check_kind=True,
check_fill_value=True,
consolidate_block_indices=False,
obj='SparseSeries'):
"""Check that the left and right SparseSeries are equal.
Parameters
----------
left : SparseSeries
right : SparseSeries
check_dtype : bool, default True
Whether to check the Series dtype is identical.
exact_indices : bool, default True
check_series_type : bool, default True
Whether to check the SparseSeries class is identical.
check_names : bool, default True
Whether to check the SparseSeries name attribute.
check_kind : bool, default True
Whether to just the kind of the sparse index for each column.
check_fill_value : bool, default True
Whether to check that left.fill_value matches right.fill_value
consolidate_block_indices : bool, default False
Whether to consolidate contiguous blocks for sparse arrays with
a BlockIndex. Some operations, e.g. concat, will end up with
block indices that could be consolidated. Setting this to true will
create a new BlockIndex for that array, with consolidated
block indices.
obj : str, default 'SparseSeries'
Specify the object name being compared, internally used to show
the appropriate assertion message.
"""
_check_isinstance(left, right, pd.SparseSeries)
if check_series_type:
assert_class_equal(left, right, obj=obj)
assert_index_equal(left.index, right.index,
obj='{obj}.index'.format(obj=obj))
assert_sp_array_equal(left.values, right.values,
check_kind=check_kind,
check_fill_value=check_fill_value,
consolidate_block_indices=consolidate_block_indices)
if check_names:
assert_attr_equal('name', left, right)
if check_dtype:
assert_attr_equal('dtype', left, right)
assert_numpy_array_equal(np.asarray(left.values),
np.asarray(right.values))
def assert_sp_frame_equal(left, right, check_dtype=True, exact_indices=True,
check_frame_type=True, check_kind=True,
check_fill_value=True,
consolidate_block_indices=False,
obj='SparseDataFrame'):
"""Check that the left and right SparseDataFrame are equal.
Parameters
----------
left : SparseDataFrame
right : SparseDataFrame
check_dtype : bool, default True
Whether to check the Series dtype is identical.
exact_indices : bool, default True
SparseSeries SparseIndex objects must be exactly the same,
otherwise just compare dense representations.
check_frame_type : bool, default True
Whether to check the SparseDataFrame class is identical.
check_kind : bool, default True
Whether to just the kind of the sparse index for each column.
check_fill_value : bool, default True
Whether to check that left.fill_value matches right.fill_value
consolidate_block_indices : bool, default False
Whether to consolidate contiguous blocks for sparse arrays with
a BlockIndex. Some operations, e.g. concat, will end up with
block indices that could be consolidated. Setting this to true will
create a new BlockIndex for that array, with consolidated
block indices.
obj : str, default 'SparseDataFrame'
Specify the object name being compared, internally used to show
the appropriate assertion message.
"""
_check_isinstance(left, right, pd.SparseDataFrame)
if check_frame_type:
assert_class_equal(left, right, obj=obj)
assert_index_equal(left.index, right.index,
obj='{obj}.index'.format(obj=obj))
assert_index_equal(left.columns, right.columns,
obj='{obj}.columns'.format(obj=obj))
if check_fill_value:
assert_attr_equal('default_fill_value', left, right, obj=obj)
for col, series in compat.iteritems(left):
assert (col in right)
# trade-off?
if exact_indices:
assert_sp_series_equal(
series, right[col],
check_dtype=check_dtype,
check_kind=check_kind,
check_fill_value=check_fill_value,
consolidate_block_indices=consolidate_block_indices
)
else:
assert_series_equal(series.to_dense(), right[col].to_dense(),
check_dtype=check_dtype)
# do I care?
# assert(left.default_kind == right.default_kind)
for col in right:
assert (col in left)
# -----------------------------------------------------------------------------
# Others
def assert_contains_all(iterable, dic):
for k in iterable:
assert k in dic, "Did not contain item: '{key!r}'".format(key=k)
def assert_copy(iter1, iter2, **eql_kwargs):
"""
iter1, iter2: iterables that produce elements
comparable with assert_almost_equal
Checks that the elements are equal, but not
the same object. (Does not check that items
in sequences are also not the same object)
"""
for elem1, elem2 in zip(iter1, iter2):
assert_almost_equal(elem1, elem2, **eql_kwargs)
msg = ("Expected object {obj1!r} and object {obj2!r} to be "
"different objects, but they were the same object."
).format(obj1=type(elem1), obj2=type(elem2))
assert elem1 is not elem2, msg
def getCols(k):
return string.ascii_uppercase[:k]
def getArangeMat():
return np.arange(N * K).reshape((N, K))
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(np.random.choice(x, k), name=name, **kwargs)
def makeIntervalIndex(k=10, name=None, **kwargs):
""" make a length k IntervalIndex """
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeIntIndex(k=10, name=None):
return Index(lrange(k), name=name)
def makeUIntIndex(k=10, name=None):
return Index([2**63 + i for i in lrange(k)], name=name)
def makeRangeIndex(k=10, name=None, **kwargs):
return RangeIndex(0, k, 1, name=name, **kwargs)
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
def makeDateIndex(k=10, freq='B', name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name, **kwargs)
def makeTimedeltaIndex(k=10, freq='D', name=None, **kwargs):
return pd.timedelta_range(start='1 day', periods=k, freq=freq,
name=name, **kwargs)
def makePeriodIndex(k=10, name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = PeriodIndex(start=dt, periods=k, freq='B', name=name, **kwargs)
return dr
def makeMultiIndex(k=10, names=None, **kwargs):
return MultiIndex.from_product(
(('foo', 'bar'), (1, 2)), names=names, **kwargs)
def all_index_generator(k=10):
"""Generator which can be iterated over to get instances of all the various
index classes.
Parameters
----------
k: length of each of the index instances
"""
all_make_index_funcs = [makeIntIndex, makeFloatIndex, makeStringIndex,
makeUnicodeIndex, makeDateIndex, makePeriodIndex,
makeTimedeltaIndex, makeBoolIndex, makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex]
for make_index_func in all_make_index_funcs:
yield make_index_func(k=k)
def index_subclass_makers_generator():
make_index_funcs = [
makeDateIndex, makePeriodIndex,
makeTimedeltaIndex, makeRangeIndex,
makeIntervalIndex, makeCategoricalIndex,
makeMultiIndex
]
for make_index_func in make_index_funcs:
yield make_index_func
def all_timeseries_index_generator(k=10):
"""Generator which can be iterated over to get instances of all the classes
which represent time-seires.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex]
for make_index_func in make_index_funcs:
yield make_index_func(k=k)
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(N)
return Series(randn(N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(N)
return Series(randn(N), index=index, name=name)
def makeObjectSeries(name=None):
dateIndex = makeDateIndex(N)
dateIndex = Index(dateIndex, dtype=object)
index = makeStringIndex(N)
return Series(dateIndex, index=index, name=name)
def getSeriesData():
index = makeStringIndex(N)
return {c: Series(randn(N), index=index) for c in getCols(K)}
def makeTimeSeries(nper=None, freq='B', name=None):
if nper is None:
nper = N
return Series(randn(nper), index=makeDateIndex(nper, freq=freq), name=name)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = N
return Series(randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq='B'):
return {c: makeTimeSeries(nper, freq) for c in getCols(K)}
def getPeriodData(nper=None):
return {c: makePeriodSeries(nper) for c in getCols(K)}
# make frame
def makeTimeDataFrame(nper=None, freq='B'):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame():
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(['a', 'b', 'c', 'd', 'e'])
data = {
'A': [0., 1., 2., 3., 4.],
'B': [0., 1., 0., 1., 0.],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': bdate_range('1/1/2009', periods=5)
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makePanel(nper=None):
with warnings.catch_warnings(record=True):
warnings.filterwarnings("ignore", "\\nPanel", FutureWarning)
cols = ['Item' + c for c in string.ascii_uppercase[:K - 1]]
data = {c: makeTimeDataFrame(nper) for c in cols}
return Panel.fromDict(data)
def makePeriodPanel(nper=None):
with warnings.catch_warnings(record=True):
warnings.filterwarnings("ignore", "\\nPanel", FutureWarning)
cols = ['Item' + c for c in string.ascii_uppercase[:K - 1]]
data = {c: makePeriodFrame(nper) for c in cols}
return Panel.fromDict(data)
def makeCustomIndex(nentries, nlevels, prefix='#', names=False, ndupe_l=None,
idx_type=None):
"""Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default
names, if false will use no names, if a list is given, the name of
each level in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert (is_sequence(ndupe_l) and len(ndupe_l) <= nlevels)
assert (names is None or names is False or
names is True or len(names) is nlevels)
assert idx_type is None or (idx_type in ('i', 'f', 's', 'u',
'dt', 'p', 'td')
and nlevels == 1)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singelton case uniform
if isinstance(names, compat.string_types) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func = dict(i=makeIntIndex, f=makeFloatIndex,
s=makeStringIndex, u=makeUnicodeIndex,
dt=makeDateIndex, td=makeTimedeltaIndex,
p=makePeriodIndex).get(idx_type)
if idx_func:
idx = idx_func(nentries)
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError('"{idx_type}" is not a legal value for `idx_type`, '
'use "i"/"f"/"s"/"u"/"dt/"p"/"td".'
.format(idx_type=idx_type))
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all(x > 0 for x in ndupe_l)
tuples = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_")
return lmap(int, numeric_tuple)
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
cnt = Counter()
for j in
|
range(div_factor)
|
pandas.compat.range
|
#!/usr/bin/env python3
"""exfi.new_correct.py: fill small overlaps and gaps with abyss-sealer"""
import logging
from tempfile import \
mkstemp
from subprocess import Popen
import os
import pandas as pd
from exfi.io.bed import \
BED3_COLS, \
bed3_to_bed4, \
bed4_to_node2sequence, \
bed4_to_edge2overlap
def prepare_sealer(bed4, transcriptome_dict, args):
"""exfi.new_correct.prepare_sealer: inspect the bed4 file and create a fasta
file where pairs of exons have a small gap between them or have a small
overlap.
"""
sealer_input = mkstemp()
max_fp_bases = args["max_fp_bases"]
max_gap_size = args["max_gap_size"]
node2sequence = bed4_to_node2sequence(bed4, transcriptome_dict)
edge2overlap = bed4_to_edge2overlap(bed4)
node2sequence_dict = node2sequence.set_index("name").to_dict()["sequence"]
# Disable warnings
pd.options.mode.chained_assignment = None
# Compute the small gaps
small_gaps = edge2overlap\
.loc[(edge2overlap.overlap < 0) & (edge2overlap.overlap <= max_gap_size)]
small_gaps["identifier"] = small_gaps['u'] + "~" + small_gaps['v']
small_gaps["data_to_map"] = tuple(zip(small_gaps.u, small_gaps.v))
small_gaps["sequence"] = small_gaps.data_to_map\
.map(
lambda x: \
node2sequence_dict[x[0]][0:-max_fp_bases] + \
100 * 'N' + \
node2sequence_dict[x[1]][max_fp_bases:]
)
small_gaps = small_gaps[["identifier", "sequence"]]
# Compute pairs of overlapping exons
overlaps = edge2overlap.loc[edge2overlap.overlap >= 0]
overlaps["data_to_map"] = tuple(zip(overlaps.u, overlaps.v, overlaps.overlap))
overlaps["identifier"] = overlaps.u + "~" + overlaps.v
overlaps["sequence"] = overlaps.data_to_map\
.map(
lambda x: \
node2sequence_dict[x[0]][0:-x[2] - max_fp_bases] + \
100 * 'N' + \
node2sequence_dict[x[1]][x[2] + max_fp_bases:]
)
overlaps = overlaps[["identifier", "sequence"]]
# Put again the warning
pd.options.mode.chained_assignment = 'warn'
# Merge the results
for_sealer =
|
pd.concat([small_gaps, overlaps])
|
pandas.concat
|
# ---
# jupyter:
# jupytext:
# formats: notebooks//ipynb,rmd//Rmd,scripts//py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Catchment Distance
import pandas as pd
import matplotlib.pyplot as plt
import osmnx as ox
import networkx as nx
# +
churches = ['mossleyhill','sthelens','gateway','stoneycroftsalvationarmy']
data = {}
for church in churches:
data[church] = pd.read_csv(f'../data/sensitive/derived/{church}.csv')
# -
pcdlatlng = pd.read_csv('../data/csv/pcdlatlng.csv.gz')
location = {'mossleyhill' : 'L18 8DB',
'sthelens' : 'WA10 2DT',
'gateway' : 'L24 9HJ',
'stoneycroftsalvationarmy': 'L13 3BT'}
def get_coords(postcode):
"Lookup coordinates from conversion table."
row = pcdlatlng.loc[pcdlatlng.postcode==postcode].iloc[0]
return row.latitude, row.longitude
distance = {}
shortest = {}
all_routes = {}
mile = 1609 # metres
BigMap = ox.graph_from_point(get_coords(location['mossleyhill']), distance=20*mile, network_type='drive')
# +
def get_distances(church):
church_coords = get_coords(location[church])
dest_node = ox.get_nearest_node(BigMap, church_coords)
shortest[church] = {}
distance[church] = {}
all_routes[church] = []
exceptions = []
postcodes = list(data[church].Postcode)
for postcode in postcodes:
try:
orig_node = ox.get_nearest_node(BigMap, get_coords(postcode))
shortest[church][postcode] = nx.shortest_path(BigMap, orig_node, dest_node, weight='length')
distance[church][postcode] = nx.shortest_path_length(BigMap, orig_node, dest_node, weight='length') / mile
all_routes[church].append(shortest[church][postcode])
except:
exceptions.append(postcode)
postcodes.remove(postcode)
# print(f'Could not find {exceptions} -- are these valid postcodes?')
return distance[church]
# -
for church in churches:
print(church)
distance[church] = get_distances(church)
dist_df = {}
for church in churches:
dist_df[church] =
|
pd.DataFrame(distance[church],index=['Distance'])
|
pandas.DataFrame
|
import pandas as pd
from sklearn.model_selection import KFold
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.metrics import roc_auc_score, accuracy_score, recall_score, precision_score
from keras.models import Sequential
from keras.layers import Dense
from keras.callbacks import EarlyStopping
class TuneSVM(object):
def __init__(self, train_data, kernel, cv_num, stopwords, title):
self.data = train_data
self.kernel = kernel
self.stopwords = stopwords
self.title = title
self.k_folds = KFold(n_splits=cv_num, shuffle=True)
self.cv_scores = pd.DataFrame()
def tune_parameters(self, params, vector):
ngram_range = params['ngram_range']
max_df = params['max_df']
min_df = params['min_df']
C = params['C']
for n in ngram_range:
for mx in max_df:
for mn in min_df:
for c in C:
self.run_cv(n, mx, mn, c, vector)
return None
def save_scores_csv(self, title):
self.cv_scores.to_csv('../../../results/tuning/%s_tuning.csv' % title)
return None
def run_cv(self, ngram_range, max_df, min_df, C, vector):
fold = 0
for train_index, val_index in self.k_folds.split(self.data):
fold += 1
print(fold)
X_train = self.data.iloc[train_index]['text'].values
y_train = self.data.iloc[train_index]['label'].values
X_val = self.data.iloc[val_index]['text'].values
y_val = self.data.iloc[val_index]['label'].values
if vector == 'count':
vectorizer = CountVectorizer(ngram_range=ngram_range,
max_df=max_df,
min_df=min_df,
stop_words=self.stopwords)
else:
vectorizer = TfidfVectorizer(ngram_range=ngram_range,
max_df=max_df,
min_df=min_df,
stop_words=self.stopwords)
try:
X_train_vec = vectorizer.fit_transform(X_train)
X_val_vec = vectorizer.transform(X_val)
except:
return None
else:
clf = SVC(C=C, kernel=self.kernel, probability=True, gamma='scale')
clf.fit(X_train_vec, y_train)
y_train_pred = clf.predict(X_train_vec)
y_train_prob = clf.predict_proba(X_train_vec)
y_train_prob = y_train_prob[:, 1]
train_scores = self.evaluate_cv_results(y_train, y_train_pred, y_train_prob,
ngram_range, max_df, min_df, C)
y_val_pred = clf.predict(X_val_vec)
y_val_prob = clf.predict_proba(X_val_vec)
y_val_prob = y_val_prob[:, 1]
val_scores = self.evaluate_cv_results(y_val, y_val_pred, y_val_prob,
ngram_range, max_df, min_df, C)
eval_df = self.create_scores_dataframe(train_scores, val_scores, fold, vector)
self.cv_scores = pd.concat([self.cv_scores, eval_df])
self.save_scores_csv('temp_%s' % self.title)
return None
def evaluate_cv_results(self, y_true, y_pred, y_prob, ngram_range, max_df, min_df, C):
scores = {'ngram_range': [], 'max_df': [], 'min_df': [], 'C': [],
'Acc': [], 'recall': [], 'PPV': [], 'AUC': []}
scores['ngram_range'].append(ngram_range)
scores['max_df'].append(max_df)
scores['min_df'].append(min_df)
scores['C'].append(C)
scores['Acc'].append(accuracy_score(y_true, y_pred))
scores['recall'].append(recall_score(y_true, y_pred))
scores['PPV'].append(precision_score(y_true, y_pred))
scores['AUC'].append(roc_auc_score(y_true, y_prob))
return scores
def create_scores_dataframe(self, train_dict, val_dict, fold, vector):
train_df = pd.DataFrame(train_dict)
train_df['dataset'] = 'train'
train_df['fold'] = fold
train_df['vector'] = vector
val_df = pd.DataFrame(val_dict)
val_df['dataset'] = 'val'
val_df['fold'] = fold
val_df['vector'] = vector
eval_df =
|
pd.concat([train_df, val_df])
|
pandas.concat
|
from __future__ import print_function
from os.path import dirname, abspath
import pandas as pd
import numpy as np
import glob
import pysam
from Bio import SeqIO
from optaux import resources
resource_dir = resources.__path__[0]
here = dirname(abspath(__file__))
strain_to_characteristic = \
{'hisD': ['2,400,257', '3,178,254'], # lrhA/alaA, yqiC
'gltB': ['3,178,277', '4,546,084'], # yqiC, yjiC
'gltA': ['2,719,426'], # kgtP/rrfG
'pyrC': ['444,779'] # cyoB
}
pair_to_kos = {'hisD_pyrC': [['hisD'], ['pyrC']],
'hisD_gltA': [['hisD'], ['gltA', 'prpC']],
'hisD_gltB': [['hisD'], ['gltB', 'gdhA']]}
gene_to_reaction_ko_abbrev = {'hisD': 'HISTD',
'gltA': 'CS',
'pyrC': 'DHORTS',
'gltB': 'GLUDy'}
def _fill_missing_df_entries(df, pair):
if pair != 'hisDpyrC':
return df
# lrhA, alaA (hisD characteristic mutation)
df.loc['2,400,257', 'AUX hisD pyrC A3 F26 I0 R1'] = .144 # likely same mutation
# cyoB (pyrC)
df.loc['444,779', 'AUX hisD pyrC A3 F17 I0 R1'] = .905
df.loc['444,779', 'AUX hisD pyrC A3 F26 I0 R2'] = .961
df.loc['444,779', 'AUX hisD pyrC A4 F11 I0 R1'] = .836
df.loc['444,779', 'AUX hisD pyrC A4 F11 I0 R2'] = .857
df.loc['444,779', 'AUX hisD pyrC A4 F17 I0 R1'] = .892
# yqiC (hisD characteristic mutation)
df.loc['3,178,254', 'AUX hisD pyrC A2 F10 I0 R1'] = .292
df.loc['3,178,254', 'AUX hisD pyrC A3 F26 I0 R1'] = .132
df.loc['3,178,254', 'AUX hisD pyrC A3 F26 I0 R2'] = 0. # not found
df.loc['3,178,254', 'AUX hisD pyrC A4 F11 I0 R1'] = 0. # not found
df.loc['3,178,254', 'AUX hisD pyrC A4 F11 I0 R2'] = .167
df.loc['3,178,254', 'AUX hisD pyrC A4 F17 I0 R1'] = .111
df.loc['3,178,254', 'AUX hisD pyrC A4 F24 I0 R1'] = 0. # not found
df.loc['3,178,254', 'AUX hisD pyrC A4 F24 I0 R2'] = .088
# envZ (shouldn't be 100%, breseq rounds values to 100 in some cases)
df.loc['3,529,197', 'AUX hisD pyrC A4 F11 I0 R2'] = .719
df.loc['3,529,197', 'AUX hisD pyrC A4 F17 I0 R1'] = .915
df.loc['3,529,197', 'AUX hisD pyrC A4 F24 I0 R2'] = .913
return df
def _add_ale_numbers_to_df(df):
df = df.T
for i, exp in enumerate(df.T):
df.loc[exp, 'Ale'] = int(exp.split(' ')[3].replace('A', ''))
df.loc[exp, 'Flask'] = int(exp.split(' ')[4].replace('F', ''))
df.loc[exp, 'Isolate'] = int(exp.split(' ')[5].replace('I', ''))
df.set_index(['Ale', 'Flask', 'Isolate'], inplace=True)
df = df.T.sort_index(axis=1)
return df
def _append_rows_to_abundance_df(out_df, strain1_name, strain2_name,
strain1_abundance, strain2_abundance):
# get reaction name from gene KO
strain_1_ko = gene_to_reaction_ko_abbrev[strain1_name]
strain_2_ko = gene_to_reaction_ko_abbrev[strain2_name]
# normalize abundance to 100%
strain_1 = strain1_abundance / (strain1_abundance + strain2_abundance)
strain_2 = strain2_abundance / (strain1_abundance + strain2_abundance)
out_df = \
out_df.append(pd.Series(strain_1, name='__'.join([strain_1_ko,
strain_2_ko])))
out_df = \
out_df.append(pd.Series(strain_2, name='__'.join([strain_2_ko,
strain_1_ko])))
return out_df
def _get_abundance_statistics(df, strain_2_name, stats_df):
df_new = df.T.reset_index()
for ale in set(df_new['Ale']):
filtered_df = df_new[df_new['Ale'] == ale]
analysis_series = filtered_df['hisD'] / (filtered_df['hisD'] +
filtered_df[strain_2_name])
stats_df.loc[ale, 'mean'] = analysis_series.mean()
stats_df.loc[ale, 'Stdev'] = analysis_series.std()
print(ale, '%.2f %.2f' % (analysis_series.mean(),
analysis_series.std()))
def abundances_to_df(pair, abundance_1, abundance_2):
df = pd.DataFrame.from_dict({pair[:4]: abundance_1,
pair[4:]: abundance_2})
return df
def get_characteristic_abundance_df(save_loc):
out_df = pd.DataFrame()
stats_df = pd.DataFrame()
for pair in ['hisDgltA', 'hisDgltB', 'hisDpyrC']:
strain_1_name = pair[:4]
strain_2_name = pair[4:]
def _drop_column(name):
if 'AUX' not in name:
return True
if 'A0' in name or 'A1 ' in name or 'A7' in name:
return True
if 'I40' in name or 'I30' in name:
return True
# Low coverage and no detected gltA characteristic mutation
# for ALE '10-23-1-1'. Remove this point
if 'A10' in name and 'F23' in name:
return True
return False
df_raw = \
pd.read_csv('%s/resequencing_data/AUX %s %s Mutation Table.csv' %
(resource_dir, strain_1_name, strain_2_name))
df_raw.set_index('Position', inplace=True)
df = df_raw[[i for i in df_raw.columns if not _drop_column(i)]]
df = _fill_missing_df_entries(df, pair)
def _get_average_mutation_fraction(strain_name):
"""Some characteristic mutations are more difficult for breseq
to pick up so they appear as being present at 0%. In these cases
defer to the value of the other characteristic mutation"""
strain_fraction = np.zeros(len(df.columns))
num_nonzero_array = np.zeros(len(df.columns))
for characteristic in strain_to_characteristic[strain_name]:
strain_fraction += df.loc[characteristic].values
num_nonzero_array += (1. * df.loc[characteristic].values > 0)
return strain_fraction / num_nonzero_array
strain_1_abundances = _get_average_mutation_fraction(strain_1_name)
strain_2_abundances = _get_average_mutation_fraction(strain_2_name)
abundance_df = abundances_to_df(pair, strain_1_abundances,
strain_2_abundances).T
abundance_df.columns = df.columns
abundance_df = _add_ale_numbers_to_df(abundance_df)
abundance_df.to_csv('%s/abundance_by_characteristic_%s.csv' %
(save_loc, pair))
_get_abundance_statistics(abundance_df, strain_2_name, stats_df)
out_df = \
_append_rows_to_abundance_df(out_df, strain_1_name, strain_2_name,
strain_1_abundances,
strain_2_abundances)
print(out_df)
out_df.to_excel('%s/characteristic_abundances_strain1.xlsx' % save_loc)
stats_df.to_csv('%s/characteristic_hisD_abundance_stats.csv' % save_loc)
def get_coverage_abundance_df(save_loc, alignment_loc):
out_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
#!/usr/bin/env python
# coding: utf-8
# # Anomaly Detection Techniques in Python
# ## Importing Libraries
# In[1]:
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import os
from IPython.display import HTML
from sklearn.preprocessing import MinMaxScaler
from sklearn.cluster import DBSCAN
from matplotlib import cm
from sklearn.ensemble import IsolationForest
import seaborn as sns
from sklearn.neighbors import LocalOutlierFactor
from lazypredict.Supervised import LazyClassifier
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from imblearn.over_sampling import SMOTE
import plotly as plty
import plotly.graph_objs as go
# ## Loading Dataset
# In[2]:
def fileRead(directory_path):
file_name = list()
for root, dirs, files in os.walk(directory_path):
for filename in files:
file_name.append(filename)
return file_name
directoryPath1 = "/home/mishraanurag218/Anurag/Projects/Untitled Folder/data/s1/"
directoryPath2 = "/home/mishraanurag218/Anurag/Projects/Untitled Folder/data/s2/"
s1 = fileRead(directoryPath1)
#print(s1)
s2 = fileRead(directoryPath2)
#print(s2)
# In[3]:
cols = ['time','acc_frontal','acc_vertical','acc_lateral','id','rssi','phase','frequency','activity']
def folder_to_csv(directory_path,file_name,col_name):
df_temp = pd.DataFrame()
for f_n in file_name:
df = pd.read_csv(directory_path+f_n,names=col_name)
df['device_id'] = f_n[0:-1]
df['sex'] = f_n[-1]
df_temp = pd.concat([df_temp, df], ignore_index=True)
return df_temp
df_s1 = folder_to_csv(directoryPath1,s1,cols)
df_s2 = folder_to_csv(directoryPath2,s2,cols)
df = pd.concat([df_s1, df_s2], ignore_index=True)
df.head(5)
# ### Data Pre-processing`
# ### Changing the sex into binary
# In[4]:
def categorical_to_binary(x):
if x=='F':
return 0
else:
return 1
df['sex_b'] = df['sex'].apply(categorical_to_binary)
df.head(5)
# In[5]:
df
# ### removing non numerical attributes and categorical values and ids as machine learning algorithms only works on numerical values
# In[6]:
dfX = df.copy().drop(['sex','device_id','id','sex_b','time'],axis=1)
dfY = df['sex_b'].copy()
dfX.head()
# ### Making the dataset to standard scale
# In[7]:
scaler = MinMaxScaler()
data = scaler.fit_transform(dfX)
dfX = pd.DataFrame(data, columns = dfX.columns)
dfX.head(5)
# ## Univariate Analysis on dataset
# #### Function for Histogram plot
# In[8]:
def hist_plot(x,y):
for i in y:
sns.distplot(x[i],bins=150)
plt.show()
# In[9]:
cols = ['time','acc_frontal','acc_vertical','acc_lateral','rssi','phase']
hist_plot(df,cols)
# #### Function for joint plot
# In[10]:
def joint_plot(x,y,z):
for i in y:
sns.jointplot(x=i, y=z, data=x);
plt.show()
# In[11]:
cols = ['acc_frontal','acc_vertical','acc_lateral','rssi','phase']
joint_plot(df,cols,'time')
# #### Pair Plot
# In[12]:
sns.set_style("whitegrid");
sns.pairplot(df, hue="sex_b", height=3);
plt.show()
# #### implot
# In[13]:
def implot(x,y,z):
for i in y:
for j in y:
if i!=j:
sns.lmplot(x = i, y = j, data = x, hue = z, col = z)
plt.show()
implot(df,['acc_frontal','acc_vertical','acc_lateral','rssi','phase'],'sex_b')
# In[14]:
implot(df,['acc_frontal','acc_vertical','acc_lateral','rssi','phase'],'activity')
# #### countplot
# In[15]:
def countPlot(x,y):
for i in y:
sns.countplot(x =i, data = x)
plt.show()
sns.countplot(x =y[0], hue = y[1], data = x)
plt.show()
countPlot(df,['sex_b','activity'])
# #### Balancing the data
# In[16]:
sm = SMOTE(random_state = 2)
df_X, df_Y = sm.fit_sample(dfX, dfY.ravel())
df_Y = pd.DataFrame(df_Y, columns = ['sex_b'])
# In[17]:
sns.countplot(x ='sex_b', data = df_Y)
# #### Lazy predict classification
# In[18]:
X_train, X_test, y_train, y_test = train_test_split(df_X, df_Y,test_size=.33,random_state =123)
clf = LazyClassifier(verbose=0,ignore_warnings=True, custom_metric=None)
models,predictions = clf.fit(X_train, X_test, y_train, y_test)
models
# ## DBSCAN (Density-Based Spatial Clustering of Applications with Noise)
# #### This is a clustering algorithm (an alternative to K-Means) that clusters points together and identifies any points not belonging to a cluster as outliers. It’s like K-means, except the number of clusters does not need to be specified in advance.
# #### The method, step-by-step:
# #### Randomly select a point not already assigned to a cluster or designated as an outlier. Determine if it’s a core point by seeing if there are at least min_samples points around it within epsilon distance.
# #### Create a cluster of this core point and all points within epsilon distance of it (all directly reachable points).
# #### Find all points that are within epsilon distance of each point in the cluster and add them to the cluster. Find all points that are within epsilon distance of all newly added points and add these to the cluster. Rinse and repeat. (i.e. perform “neighborhood jumps” to find all density-reachable points and add them to the cluster).
# ### Sklearn Implementation of DBSCAN:
# In[19]:
outlier_detection = DBSCAN(
eps = .2,
metric='euclidean',
min_samples = 5,
n_jobs = -1)
clusters = outlier_detection.fit_predict(dfX)
cmap = cm.get_cmap('Set1')
# #### DBSCAN will output an array of -1’s and 0’s, where -1 indicates an outlier. Below, I visualize outputted outliers in red by plotting two variables.
# In[20]:
df.plot.scatter(x='time',y='acc_vertical', c=clusters, cmap=cmap,
colorbar = False)
plt.show()
# In[21]:
# fig = go.Figure(data=go.Scatter(x=df['time'],
# y=df['acc_vertical'],
# mode='markers',
# marker_color=clusters,
# text=clusters)) # hover text goes here
# fig.update_layout(title='Scatter Plot to identify the outliers')
# fig.show()
# In[22]:
import plotly.express as px
fig = px.scatter(df, x="time", y="acc_vertical", color=clusters,
hover_data=['time'])
fig.show()
fig = px.scatter(df[clusters==-1], x="time", y="acc_vertical",
hover_data=['time'])
fig.show()
# In[23]:
outliers = np.where(clusters==-1)
df_X_db = df_X.drop(list(outliers[0]))
df_Y_db = df_Y.drop(list(outliers[0]))
df_dbScan = result = pd.concat([df_X_db,df_Y_db], axis=1, sort=False)
df_dbScan.to_csv (r'Filtered_DBSCAN.csv', index = False, header=True)
print(df_dbScan.head())
# In[24]:
sns.countplot(x ='sex_b', data = df_dbScan)
fig = px.histogram(df_dbScan, x="sex_b", color="sex_b")
fig.update_layout(barmode='group')
fig.show()
# In[25]:
X_train, X_test, y_train, y_test = train_test_split(df_X_db, df_Y_db,test_size=.33,random_state =123)
clf = LazyClassifier(verbose=0,ignore_warnings=True, custom_metric=None)
models,predictions = clf.fit(X_train, X_test, y_train, y_test)
models
# ## Isolation Forests
# #### Randomly select a feature and randomly select a value for that feature within its range.
# #### If the observation’s feature value falls above (below) the selected value, then this value becomes the new min (max) of that feature’s range.
# #### Check if at least one other observation has values in the range of each feature in the dataset, where some ranges were altered via step 2. If no, then the observation is isolated.
# #### Repeat steps 1–3 until the observation is isolated. The number of times you had to go through these steps is the isolation number. The lower the number, the more anomalous the observation is.
# ## Sklearn Implementation of Isolation Forests:
# In[26]:
rs=np.random.RandomState(0)
clf = IsolationForest(max_samples=100,random_state=rs, contamination=.1)
clf.fit(dfX)
if_scores = clf.decision_function(dfX)
if_anomalies=clf.predict(dfX)
if_anomalies=
|
pd.Series(if_anomalies)
|
pandas.Series
|
import json
import time, datetime
import csv
import os
import preProcess
import dataVis
from pandas import DataFrame
from pandas import TimeGrouper
import pandas as pd
from matplotlib import pyplot
def readWholeCSV(docName):
Folder_Path = r'/Users/siqiyaoyao/git/python3/fnirs/fnirsAnalysis/dataset/'+ docName #要拼接的文件夹及其完整路径,注意不要包含中文
#修改当前工作目录
os.chdir(Folder_Path)
#将该文件夹下的所有文件名存入一个列表
file_list = os.listdir()
file_list.remove('.DS_Store')
print('doc list:', file_list)
return file_list
def getWholeParticipents(fileList):
allGroupData =[]
allDataSet =[]
for file in fileList:
seriesGroups,dataSet = dataVis.readDataFromcsv(file)
allGroupData.append(seriesGroups)
allDataSet.append(dataSet)
return allGroupData,allDataSet
def processPergroup(allSets,allParicipants):
l = len(allParicipants)
participants_label_arr = []
g1 = []
g2 = []
g3 =[]
g4 = []
g5 = []
g6 = []
g7 = []
g8 =[]
for i in range(0,l):# debug 1
indexList = dataVis.findGroupIndex(allParicipants[i].label)
group_label = dataVis.groupData(indexList,allSets[i]) # 0-19 0 group 1 time 2-19 channel
#normalizeData_devide(group_label)
g1.append(group_label[0]) # 0-19
g2.append(group_label[1])
g3.append(group_label[2])
g4.append(group_label[3])
g5.append(group_label[4])
g6.append(group_label[5])
g7.append(group_label[6])
g8.append(group_label[7])
#participants_label_arr.append(group_label)
return g1,g2,g3,g4,g5,g6,g7,g8
def normalizeData_devide(groups):
labelArr =[]
for index,data in enumerate(groups): # 8 groups, data [0] groupindex [1] time [2:19] channel
n = len(data)
groupIndex = data[0].mean()
print(n,data[0].mean())
# def normalizeData_score(data):
def plotPerGroup(group):
labels1 = DataFrame()
labels2 =
|
DataFrame()
|
pandas.DataFrame
|
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas.errors import (
NullFrequencyError, OutOfBoundsDatetime, PerformanceWarning)
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, NaT, Series, Timedelta, TimedeltaIndex,
Timestamp, timedelta_range)
import pandas.util.testing as tm
def get_upcast_box(box, vector):
"""
Given two box-types, find the one that takes priority
"""
if box is DataFrame or isinstance(vector, DataFrame):
return DataFrame
if box is Series or isinstance(vector, Series):
return Series
if box is pd.Index or isinstance(vector, pd.Index):
return pd.Index
return box
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Comparisons
class TestTimedelta64ArrayLikeComparisons:
# Comparison tests for timedelta64[ns] vectors fully parametrized over
# DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_timedelta64_zerodim(self, box_with_array):
# GH#26689 should unbox when comparing with zerodim array
box = box_with_array
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = pd.timedelta_range('2H', periods=4)
other = np.array(tdi.to_numpy()[0])
tdi = tm.box_expected(tdi, box)
res = tdi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
with pytest.raises(TypeError):
# zero-dim of wrong dtype should still raise
tdi >= np.array(4)
class TestTimedelta64ArrayComparisons:
# TODO: All of these need to be parametrized over box
def test_compare_timedelta_series(self):
# regression test for GH#5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_tdi_cmp_str_invalid(self, box_with_array):
# GH#13624
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = TimedeltaIndex(['1 day', '2 days'])
tdarr = tm.box_expected(tdi, box_with_array)
for left, right in [(tdarr, 'a'), ('a', tdarr)]:
with pytest.raises(TypeError):
left > right
with pytest.raises(TypeError):
left >= right
with pytest.raises(TypeError):
left < right
with pytest.raises(TypeError):
left <= right
result = left == right
expected = np.array([False, False], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = left != right
expected = np.array([True, True], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, object])
def test_comp_nat(self, dtype):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = rhs != lhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_comparisons_nat(self):
tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
'1 day 00:00:01', '5 day 00:00:03'])
tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
'1 day 00:00:02', '5 days 00:00:03'])
tdarr = np.array([np.timedelta64(2, 'D'),
np.timedelta64(2, 'D'), np.timedelta64('nat'),
np.timedelta64('nat'),
np.timedelta64(1, 'D') + np.timedelta64(2, 's'),
np.timedelta64(5, 'D') + np.timedelta64(3, 's')])
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
# TODO: better name
def test_comparisons_coverage(self):
rng = timedelta_range('1 days', periods=10)
result = rng < rng[3]
expected = np.array([True, True, True] + [False] * 7)
tm.assert_numpy_array_equal(result, expected)
# raise TypeError for now
with pytest.raises(TypeError):
rng < rng[3].value
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedelta64ArithmeticUnsorted:
# Tests moved from type-specific test files but not
# yet sorted/parametrized/de-duplicated
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dt
with pytest.raises(TypeError, match=msg):
tdi - dti
msg = (r"descriptor '__sub__' requires a 'datetime\.datetime' object"
" but received a 'Timedelta'")
with pytest.raises(TypeError, match=msg):
td - dt
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
td - dti
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = pd.date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = pd.date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt_tz - dt
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts_tz2
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt - dt_tz
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
ts - dt_tz
with pytest.raises(TypeError, match=msg):
ts_tz2 - ts
with pytest.raises(TypeError, match=msg):
ts_tz2 - dt
with pytest.raises(TypeError, match=msg):
ts_tz - ts_tz2
# with dti
with pytest.raises(TypeError, match=msg):
dti - ts_tz
with pytest.raises(TypeError, match=msg):
dti_tz - ts
with pytest.raises(TypeError, match=msg):
dti_tz - ts_tz2
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
|
tm.assert_index_equal(result, expected)
|
pandas.util.testing.assert_index_equal
|
import pandas as pd
from models.JensModel import JensModel
from utils.Recording import Recording
def jens_windowize(dataCollection, window_size):
"""
in the working version expects dataframe with these columns:
['IMU-BACK-accX', 'IMU-BACK-accY', 'IMU-BACK-accZ',
'IMU-BACK-Quaternion1', 'IMU-BACK-Quaternion2', 'IMU-BACK-Quaternion3',
'IMU-BACK-Quaternion4', 'IMU-RLA-accX', 'IMU-RLA-accY', 'IMU-RLA-accZ',
'IMU-RLA-Quaternion1', 'IMU-RLA-Quaternion2', 'IMU-RLA-Quaternion3',
'IMU-RLA-Quaternion4', 'IMU-LLA-accX', 'IMU-LLA-accY', 'IMU-LLA-accZ',
'IMU-LLA-Quaternion1', 'IMU-LLA-Quaternion2', 'IMU-LLA-Quaternion3',
'IMU-LLA-Quaternion4', 'IMU-L-SHOE-EuX', 'IMU-L-SHOE-EuY',
'IMU-L-SHOE-EuZ', 'IMU-L-SHOE-Nav_Ax', 'IMU-L-SHOE-Nav_Ay',
'IMU-L-SHOE-Nav_Az', 'IMU-L-SHOE-Body_Ax', 'IMU-L-SHOE-Body_Ay',
'IMU-L-SHOE-Body_Az', 'IMU-L-SHOE-AngVelBodyFrameX',
'IMU-L-SHOE-AngVelBodyFrameY', 'IMU-L-SHOE-AngVelBodyFrameZ',
'IMU-L-SHOE-AngVelNavFrameX', 'IMU-L-SHOE-AngVelNavFrameY',
'IMU-L-SHOE-AngVelNavFrameZ', 'IMU-R-SHOE-EuX', 'IMU-R-SHOE-EuY',
'IMU-R-SHOE-EuZ', 'IMU-R-SHOE-Nav_Ax', 'IMU-R-SHOE-Nav_Ay',
'IMU-R-SHOE-Nav_Az', 'IMU-R-SHOE-Body_Ax', 'IMU-R-SHOE-Body_Ay',
'IMU-R-SHOE-Body_Az', 'IMU-R-SHOE-AngVelBodyFrameX',
'IMU-R-SHOE-AngVelBodyFrameY', 'IMU-R-SHOE-AngVelBodyFrameZ',
'IMU-R-SHOE-AngVelNavFrameX', 'IMU-R-SHOE-AngVelNavFrameY',
'IMU-R-SHOE-AngVelNavFrameZ', 'Locomotion', 'HL_Activity',
'file_index']
test subset:
['IMU-BACK-accX', 'IMU-BACK-accY', 'IMU-BACK-accZ', 'Locomotion', 'HL_Activity', 'file_index']
"""
# print(dataCollection.columns)
HL_Activity_i = dataCollection.columns.get_loc("HL_Activity")
# convert the data frame to numpy array
data = dataCollection.to_numpy()
# segment the data
n = len(data)
X = []
y = []
start = 0
end = 0
while start + window_size - 1 < n:
end = start + window_size - 1
# has planned window the same activity in the beginning and the end, is from the same file in the beginning and the end
# what if it changes back and forth?
if (
data[start][HL_Activity_i] == data[end][HL_Activity_i]
and data[start][-1] == data[end][-1] # the last index is the file index
):
# print(data[start:(end+1),0:(HL_Activity_i)])
# first part time axis, second part sensor axis -> get window
X.append(
data[start : (end + 1), 0 : (HL_Activity_i - 1)] # data[timeaxis/row, featureaxis/column] data[1, 2] gives specific value, a:b gives you an interval
) # slice before locomotion
y.append(data[start][HL_Activity_i]) # the first data point is enough
start += window_size // 2 # 50% overlap!!!!!!!!!
# if the frame contains different activities or from different objects, find the next start point
# if there is a rest smaller than the window size -> skip (window small enough?)
else:
while start + window_size - 1 < n:
# find the switch point -> the next start point
# different file check missing! will come here again (little overhead)
if data[start][HL_Activity_i] != data[start + 1][HL_Activity_i]:
break
start += 1
start += 1 # dirty fix for the missing 'different file' check?
return X, y
# same example_recordings (first recording duplicated) from test_our_jens_windowize.py
# same window expectation!!!!
example_recordings = [
Recording(
# 30 timesteps, 3 features
sensor_frame = pd.DataFrame([[1,11,111],[2,22,222],[3,33,333],[4,44,444],[5,55,555], [6,66,666], [7,77,777], [8,88,888], [9,99,999], [10,100,1000], [11, 111, 1111], [12, 222, 2222], [13, 333, 3333], [14, 444, 4444], [15, 555, 5555], [16, 666, 6666], [17, 777, 7777], [18, 888, 8888], [19, 999, 9999], [20, 1000, 10000], [21, 111, 1111], [22, 222, 2222], [23, 333, 3333], [24, 444, 4444], [25, 555, 5555], [26, 666, 6666], [27, 777, 7777], [28, 888, 8888], [29, 999, 9999], [30, 1000, 10000]]),
time_frame = pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30]),
activities = pd.Series([0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0]),
subject = "Bruder Jakob"
),
Recording(
# 30 timesteps, 3 features
sensor_frame = pd.DataFrame([[1,11,111],[2,22,222],[3,33,333],[4,44,444],[5,55,555], [6,66,666], [7,77,777], [8,88,888], [9,99,999], [10,100,1000], [11, 111, 1111], [12, 222, 2222], [13, 333, 3333], [14, 444, 4444], [15, 555, 5555], [16, 666, 6666], [17, 777, 7777], [18, 888, 8888], [19, 999, 9999], [20, 1000, 10000], [21, 111, 1111], [22, 222, 2222], [23, 333, 3333], [24, 444, 4444], [25, 555, 5555], [26, 666, 6666], [27, 777, 7777], [28, 888, 8888], [29, 999, 9999], [30, 1000, 10000]]),
time_frame = pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30]),
activities = pd.Series([0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0]),
subject = "Bruder Jakob 2"
),
Recording(
# only one timestep
sensor_frame = pd.DataFrame([[1, 11, 111]]),
time_frame = pd.Series([1]),
activities =
|
pd.Series([0])
|
pandas.Series
|
import __main__ as main
import sys
import pandas as pd
if not hasattr(main, '__file__'):
argv = ['a',
'data/processed/census/oa_tile_reference.csv',
'data/raw/census/Eng_Wal_OA_Mid_Pop.csv',
'data/raw/census/OA_to_DZ.csv',
'data/raw/census/simd2020_withinds.csv',
'data/raw/census/NI_Mid_Pop.csv',
"data/processed/census/tile_imd.csv"]
else:
argv = sys.argv
tiles = pd.read_csv(argv[1])
england_oa =
|
pd.read_csv(argv[2])
|
pandas.read_csv
|
import os
import pandas as pd
import csv
def process(path):
files = os.listdir(path)
s = []
neg, pos, neu = [], [], []
label = []
for file in files: # 遍历文件夹
f = pd.read_csv(path+'/'+file, header=0, index_col=0)
s.extend(list(f['review']))
label.extend(list(f['rating']))
for i in range(len(f)):
if int(f['rating'][i]) < 3:
neg.append(f['review'][i])
elif int(f['rating'][i]) > 3:
pos.append(f['review'][i])
else:
neu.append(f['review'][i])
pd.DataFrame({'review': neg}).to_csv("../data/neg.csv", encoding='utf-8')
pd.DataFrame({'review': pos}).to_csv("../data/pos.csv", encoding='utf-8')
|
pd.DataFrame({'review': neu})
|
pandas.DataFrame
|
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import pandas as pd
import pyprojroot
import scipy.stats
def convert_seg_error_rate_pct(df):
df.avg_segment_error_rate = df.avg_segment_error_rate * 100
return df
RESULTS_ROOT = pyprojroot.here() / 'results'
segmentation_map = {
'ground_truth': 'segmented audio, manually cleaned',
'resegment': 'segmented audio, not cleaned',
'semi-automated-cleaning': 'segmented audio, semi-automated cleaning',
'not-cleaned': 'segmented audio, not cleaned',
'manually-cleaned': 'segmented audio, manually cleaned'
}
hvc_dfs = []
csv_filename = 'segment_error_across_birds.hvc.csv'
for species in ('Bengalese_Finches', 'Canaries'):
species_csv = RESULTS_ROOT / f'{species}/hvc/{csv_filename}'
df = pd.read_csv(species_csv)
df['Model'] = 'SVM'
df['Input to model'] = df['segmentation'].map(segmentation_map)
df['Species'] = species
hvc_dfs.append(df)
hvc_df = pd.concat(hvc_dfs)
curve_df = []
for species in ('Bengalese_Finches', 'Canaries'):
LEARNCURVE_RESULTS_ROOT = pyprojroot.here() / 'results' / species / 'learncurve'
error_csv_path = LEARNCURVE_RESULTS_ROOT.joinpath('error_across_birds_with_cleanup.csv')
df = pd.read_csv(error_csv_path)
df = df[df.animal_id.isin(hvc_df.animal_id.unique())]
df['Model'] = 'TweetyNet'
df['Input to model'] = 'spectrogram'
df['Species'] = species
curve_df.append(df)
del df
curve_df = pd.concat(curve_df)
CLEANUP = 'min_segment_dur_majority_vote'
curve_df = curve_df[
curve_df.cleanup == CLEANUP
]
all_df =
|
pd.concat([hvc_df, curve_df])
|
pandas.concat
|
import numpy as np
import cv2
from astropy.io import fits
import pandas as pd
import io
import requests
from bs4 import BeautifulSoup
import warnings
# The aia image size is fixed by the size of the detector. For AIA raw data, this has no reason to change.
aia_image_size = 4096
def scale_rotate(image, angle=0, scale_factor=1, reference_pixel=None):
"""
Perform scaled rotation with opencv. About 20 times faster than with Sunpy & scikit/skimage warp methods.
The output is a padded image that holds the entire rescaled,rotated image, recentered around the reference pixel.
Positive-angle rotation rotates image clockwise if the array origin (0,0) map to the bottom left of the image,
and counterclockwise if the array origin map to the top left of the image.
:param image: Numpy 2D array
:param angle: rotation angle in degrees. Positive-angle rotation rotates image clockwise if the array origin (0,0)
map to the bottom left of the image, and counterclockwise if the array origin map to the top left of the image.
:param scale_factor: ratio of the wavelength-dependent pixel scale over the target scale of 0.6 arcsec
:param reference_pixel: tuple of (x, y) coordinate. Given as (x, y) = (col, row) and not (row, col).
:return: padded scaled and rotated image
"""
array_center = (np.array(image.shape)[::-1] - 1) / 2.0
if reference_pixel is None:
reference_pixel = array_center
# convert angle to radian
angler = angle * np.pi / 180
# Get basic rotation matrix to calculate initial padding extent
rmatrix = np.matrix([[np.cos(angler), -np.sin(angler)],
[np.sin(angler), np.cos(angler)]])
extent = np.max(np.abs(np.vstack((image.shape * rmatrix,
image.shape * rmatrix.T))), axis=0)
# Calculate the needed padding or unpadding
diff = np.asarray(np.ceil((extent - image.shape) / 2), dtype=int).ravel()
diff2 = np.max(np.abs(reference_pixel - array_center)) + 1
# Pad the image array
pad_x = int(np.ceil(np.max((diff[1], 0)) + diff2))
pad_y = int(np.ceil(np.max((diff[0], 0)) + diff2))
padded_reference_pixel = reference_pixel + np.array([pad_x, pad_y])
# padded_image = np.pad(image, ((pad_y, pad_y), (pad_x, pad_x)), mode='constant', constant_values=(0, 0))
padded_image = aia_pad(image, pad_x, pad_y)
padded_array_center = (np.array(padded_image.shape)[::-1] - 1) / 2.0
# Get scaled rotation matrix accounting for padding
rmatrix_cv = cv2.getRotationMatrix2D((padded_reference_pixel[0], padded_reference_pixel[1]), angle, scale_factor)
# Adding extra shift to recenter:
# move image so the reference pixel aligns with the center of the padded array
shift = padded_array_center - padded_reference_pixel
rmatrix_cv[0, 2] += shift[0]
rmatrix_cv[1, 2] += shift[1]
# Do the scaled rotation with opencv. ~20x faster than Sunpy's map.rotate()
rotated_image = cv2.warpAffine(padded_image, rmatrix_cv, padded_image.shape, cv2.INTER_CUBIC)
return rotated_image
def aiaprep(fitsfile, cropsize=aia_image_size):
hdul = fits.open(fitsfile)
hdul[1].verify('silentfix')
header = hdul[1].header
data = hdul[1].data.astype(np.float64)
data /= header['EXPTIME']
# Target scale is 0.6 arcsec/px
target_scale = 0.6
scale_factor = header['CDELT1'] / target_scale
# Center of rotation at reference pixel converted to a coordinate origin at 0
reference_pixel = [header['CRPIX1'] - 1, header['CRPIX2'] - 1]
# Rotation angle with openCV uses coordinate origin at top-left corner. For solar images in numpy we need to invert the angle.
angle = -header['CROTA2']
# Run scaled rotation. The output will be a rotated, rescaled, padded array.
prepdata = scale_rotate(data, angle=angle, scale_factor=scale_factor, reference_pixel=reference_pixel)
prepdata[prepdata < 0] = 0
if cropsize is not None:
center = ((np.array(prepdata.shape) - 1) / 2.0).astype(int)
half_size = int(cropsize / 2)
prepdata = prepdata[center[1] - half_size:center[1] + half_size, center[0] - half_size:center[0] + half_size]
return prepdata
# Alternate padding method. On AIA, it is ~6x faster than numpy.pad used in Sunpy's aiaprep
def aia_pad(image, pad_x, pad_y):
newsize = [image.shape[0]+2*pad_y, image.shape[1]+2*pad_x]
pimage = np.empty(newsize)
pimage[0:pad_y,:] = 0
pimage[:,0:pad_x]=0
pimage[pad_y+image.shape[0]:, :] = 0
pimage[:, pad_x+image.shape[1]:] = 0
pimage[pad_y:image.shape[0]+pad_y, pad_x:image.shape[1]+pad_x] = image
return pimage
class AIAEffectiveArea:
def __init__(self, url='https://hesperia.gsfc.nasa.gov/ssw/sdo/aia/response/', filename=None):
'''
:param url: the online location of the response table
:param filename: string optional location of a local response table to read, overrides url
Usage
aia_effective_area = AIAEffectiveArea()
effective_area_ratio = aia_effective_area.effective_area_ratio(171, '2010-10-24 15:00:00')
'''
# Local input possible else fetch response table file from GSFC mirror of SSW
if filename is not None:
response_table = filename
else:
soup = BeautifulSoup(requests.get(url).text, 'html.parser')
all_versions = [node.get('href') for node in soup.find_all('a') if
node.get('href').endswith('_response_table.txt')]
latest_table_url = url + sorted([table_files for table_files in all_versions if
table_files.startswith('aia_V')])[-1]
tbl = requests.get(latest_table_url).content
response_table = io.StringIO(tbl.decode('utf-8'))
# Read in response table
self.response_table = pd.read_csv(response_table, sep='\s+', parse_dates=[1], infer_datetime_format=True, index_col=1)
def effective_area(self, wavelength, time):
'''
:param wavelength: float wavelength of the aia target image
:param time: string in a format to be read by pandas.to_datetime; the time of the aia target image
:return: the effective area of the AIA detector interpolated to the target_time
'''
eff_area_series = self._parse_series(wavelength.value)
if (
|
pd.to_datetime(time)
|
pandas.to_datetime
|
import datetime
import calendar
import numpy as np
import pandas as pd
from scipy import stats
from collections import defaultdict
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import LabelEncoder
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import (StandardScaler, MinMaxScaler,
RobustScaler, MaxAbsScaler,
PowerTransformer, QuantileTransformer,
OneHotEncoder, OrdinalEncoder,
KBinsDiscretizer)
from pyod.models.knn import KNN
from pyod.models.iforest import IForest
from pyod.models.pca import PCA as PCA_RO
from sklearn.covariance import EllipticEnvelope
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_selection import RFE
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import RandomForestClassifier
# from lightgbm import LGBMClassifier
from sklearn.linear_model import LogisticRegression
class Handle_Datatype(BaseEstimator,TransformerMixin):
def __init__(self,target,ml_usecase,categorical_features=[],numerical_features=[],time_features=[],features_todrop=[],display_types=True):
self.target = target
self.ml_usecase= ml_usecase
self.categorical_features =categorical_features
self.numerical_features = numerical_features
self.time_features =time_features
self.features_todrop = features_todrop
self.display_types = display_types
def fit(self,dataset,y=None):
data = dataset.copy()
# drop any columns that were asked to drop
data.drop(columns=self.features_todrop,errors='ignore',inplace=True)
# if there are inf or -inf then replace them with NaN
data.replace([np.inf,-np.inf],np.NaN,inplace=True)
# also make sure that all the column names are string
data.columns = [str(i) for i in data.columns]
# try to clean columns names
data.columns = data.columns.str.replace(r'[\,\}\{\]\[\:\"\']','')
# try to convert categoric columns into numerical if possible
for i in data.select_dtypes(include=['object']).columns:
try:
data[i] = data[i].astype('int64')
except:
None
# convert pandas bool and categorical into categorical datatype
for i in data.select_dtypes(include=['bool', 'category']).columns:
data[i] = data[i].astype('object')
# with csv format, if we have any null in a colum that was int -> panda will read it as float.
for i in data.select_dtypes(include=['float64']).columns:
na_count = sum(data[i].isna())
# count how many digits are there that have decimiles
count_float = np.nansum([ False if r.is_integer() else True for r in data[i]])
# total decimiels digits
count_float = count_float - na_count # reducing it because we know NaN is counted as a float digit
# now if there isnt any float digit , & unique levales are less than 20 and there are Na's then convert it to object
if ( (count_float == 0) & (data[i].nunique() <=20) & (na_count>0) ):
data[i] = data[i].astype('object')
for i in data.select_dtypes(include=['float64']).columns:
if data[i].nunique()==2:
data[i]= data[i].apply(str)
for i in data.select_dtypes(include=['object']).drop(self.target,axis=1,errors='ignore').columns:
try:
data[i] = pd.to_datetime(data[i], infer_datetime_format=True, utc=False, errors='raise')
except:
continue
# now in case we were given any specific columns dtypes in advance , we will over ride theos
for i in self.categorical_features:
try:
data[i]=data[i].apply(str)
except:
data[i]=dataset[i].apply(str)
for i in self.numerical_features:
try:
data[i]=data[i].astype('float64')
except:
data[i]=dataset[i].astype('float64')
for i in self.time_features:
try:
data[i] =
|
pd.to_datetime(data[i], infer_datetime_format=True, utc=False, errors='raise')
|
pandas.to_datetime
|
from __future__ import print_function
import unittest
from unittest import mock
import datetime
import six
import warnings
from collections import defaultdict
import pandas as pd
import numpy as np
from . import utils
from .. import test_utils
from dataprofiler.profilers import DateTimeColumn
from dataprofiler.profilers.profiler_options import DateTimeOptions
# This is taken from: https://github.com/rlworkgroup/dowel/pull/36/files
# undo when cpython#4800 is merged.
unittest.case._AssertWarnsContext.__enter__ = test_utils.patched_assert_warns
class TestDateTimeColumnProfiler(unittest.TestCase):
def setUp(self):
utils.set_seed(seed=0)
@staticmethod
def _generate_datetime_data(date_format):
gen_data = []
for i in range(50):
start_date = pd.Timestamp(1950, 7, 14)
end_date = pd.Timestamp(2020, 7, 14)
date_sample = utils.generate_random_date_sample(
start_date, end_date, [date_format]
)
gen_data.append(date_sample)
return pd.Series(gen_data)
def _test_datetime_detection_helper(self, date_formats):
for date_format in date_formats:
# generate a few samples for each date format
gen_data = self._generate_datetime_data(date_format)
# Test to see if the format and col type is detected correctly.
datetime_profile = DateTimeColumn(gen_data.name)
datetime_profile.update(gen_data)
self.assertEqual(date_format, datetime_profile.date_formats[0])
def test_base_case(self):
data = pd.Series([], dtype=object)
profiler = DateTimeColumn(data.name)
profiler.update(data)
profiler.update(data) # intentional to validate no changes if empty
self.assertEqual(profiler.match_count, 0)
self.assertIsNone(profiler.min)
self.assertIsNone(profiler.max)
self.assertListEqual([], profiler.date_formats)
self.assertIsNone(profiler.data_type_ratio)
def test_profiled_date_time_formats(self):
"""
Checks whether the profiler properly determines all datetime formats.
:return:
"""
date_formats_1 = [
"%Y-%m-%d %H:%M:%S", # 2013-03-5 15:43:30
"%Y-%m-%dT%H:%M:%S", # 2013-03-6T15:43:30
"%Y-%m-%dT%H:%M:%S.%fZ", # 2013-03-6T15:43:30.123456Z
"%m/%d/%y %H:%M", # 03/10/13 15:43
"%m/%d/%Y %H:%M", # 3/8/2013 15:43
"%Y%m%dT%H%M%S", # 2013036T154330
"%H:%M:%S.%f", # 05:46:30.258509
]
df_1 = pd.Series([], dtype=object)
for date_format in date_formats_1:
# generate a few samples for each date format
df_1 = pd.concat(
[df_1, self._generate_datetime_data(date_format)]
)
date_formats_2 = [
"%Y-%m-%d", # 2013-03-7
"%m/%d/%Y", # 3/8/2013
"%m/%d/%y", # 03/10/13
"%B %d, %Y", # March 9, 2013
"%b %d, %Y", # Mar 11, 2013
"%d%b%y", # 12Mar13
"%b-%d-%y", # Mar-13-13
"%m%d%Y", # 03142013
]
df_2 = pd.Series([], dtype=object)
for date_format in date_formats_2:
# generate a few samples for each date format
df_2 = pd.concat(
[df_2, self._generate_datetime_data(date_format)]
)
date_formats_all = date_formats_1 + date_formats_2
df_all = pd.concat([df_1, df_2])
datetime_profile = DateTimeColumn(df_all.name)
datetime_profile.update(df_all)
six.assertCountEqual(self,
date_formats_all,
set(datetime_profile.date_formats))
# Test chunks
datetime_profile = DateTimeColumn(df_1.name)
datetime_profile.update(df_1)
six.assertCountEqual(self,
date_formats_1,
set(datetime_profile.date_formats))
datetime_profile.update(df_2)
six.assertCountEqual(self,
date_formats_all,
datetime_profile.date_formats)
def test_profiled_min(self):
def date_linspace(start, end, steps):
delta = (end - start) / steps
increments = list(range(0, steps)) * np.array([delta] * steps)
return start + increments
df = pd.core.series.Series(
date_linspace(datetime.datetime.min, datetime.datetime.max, 11)
)
df = df.apply(
lambda x: x - datetime.timedelta(microseconds=x.microsecond)
).apply(str)
datetime_profile = DateTimeColumn(df[1:].name)
datetime_profile.update(df[1:])
self.assertEqual(datetime_profile.min, df.iloc[1])
datetime_profile.update(df)
self.assertEqual(datetime_profile.min, df.iloc[0])
datetime_profile.update(pd.Series([np.nan, df.iloc[3]]))
self.assertEqual(datetime_profile.min, df.iloc[0])
datetime_profile.update(df[1:2]) # only way to keep as df
self.assertEqual(datetime_profile.min, df.iloc[0])
def test_profiled_max(self):
def date_linspace(start, end, steps):
delta = (end - start) / steps
increments = list(range(0, steps)) * np.array([delta] * steps)
return start + increments
df = pd.core.series.Series(
date_linspace(datetime.datetime.min, datetime.datetime.max, 11)
)
df = df.apply(
lambda x: x - datetime.timedelta(microseconds=x.microsecond)
).apply(str)
datetime_profile = DateTimeColumn(df[:-1].name)
datetime_profile.update(df[:-1])
self.assertEqual(datetime_profile.max, df.iloc[-2])
datetime_profile.update(df)
self.assertEqual(datetime_profile.max, df.iloc[-1])
datetime_profile.update(pd.Series([np.nan, df.iloc[3]]))
self.assertEqual(datetime_profile.max, df.iloc[-1])
datetime_profile.update(df[1:2]) # only way to keep as df
self.assertEqual(datetime_profile.max, df.iloc[-1])
def test_date_time_detection(self):
"""
Tests if get_datetime_params is able to detect the date time cols
correctly
:return:
"""
date_formats = [
"%Y-%m-%d %H:%M:%S", # 2013-03-5 15:43:30
"%Y-%m-%dT%H:%M:%S", # 2013-03-6T15:43:30
"%Y-%m-%dT%H:%M:%S.%fZ", # 2013-03-6T15:43:30.123456Z
"%m/%d/%y %H:%M", # 03/10/13 15:43
"%m/%d/%Y %H:%M", # 3/8/2013 15:43
"%Y%m%dT%H%M%S", # 2013036T154330
"%H:%M:%S.%f" # 05:46:30.258509
]
self._test_datetime_detection_helper(date_formats)
def test_date_time_detection_without_time(self):
"""
Tests if get_datetime_params is able to detect the date cols correctly
:return:
"""
date_formats = [
"%Y-%m-%d", # 2013-03-7
"%m/%d/%Y", # 3/8/2013
"%m/%d/%y", # 03/10/13
"%B %d, %Y", # March 9, 2013
"%b %d, %Y", # Mar 11, 2013
"%d%b%y", # 12Mar13
"%b-%d-%y", # Mar-13-13
"%m%d%Y", # 03142013
]
self._test_datetime_detection_helper(date_formats)
def test_data_ratio(self):
data = [
2.5, 12.5, '2013-03-5 15:43:30', 5, '03/10/13 15:43', 'Mar 11, 2013'
]
df = pd.Series(data).apply(str)
profiler = DateTimeColumn(df.name)
self.assertEqual(profiler.data_type_ratio, None)
profiler.update(df)
self.assertEqual(profiler.data_type_ratio, 0.5)
profiler.update(
|
pd.Series([None, '10/20/13', 'nan'])
|
pandas.Series
|
"""
Run modisco
"""
import logging
import matplotlib.pyplot as plt
from argh.decorators import named, arg
import shutil
import pandas as pd
import os
from collections import OrderedDict
from tqdm import tqdm
from pathlib import Path
from bpnet.utils import write_pkl, render_ipynb, remove_exists, add_file_logging, create_tf_session, pd_first_cols
from bpnet.cli.contrib import ContribFile
from bpnet.cli.train import _get_gin_files, log_gin_config
from bpnet.modisco.files import ModiscoFile
from bpnet.utils import write_json, read_json
import gin
import numpy as np
import inspect
filename = inspect.getframeinfo(inspect.currentframe()).filename
this_path = os.path.dirname(os.path.abspath(filename))
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
# --------------------------------------------
# load functions for the modisco directory
def load_included_samples(modisco_dir):
return np.load(os.path.join(modisco_dir, "modisco-run.subset-contrib-file.npy"))
def load_ranges(modisco_dir):
modisco_dir = Path(modisco_dir)
included_samples = load_included_samples(modisco_dir)
kwargs = read_json(modisco_dir / "modisco-run.kwargs.json")
d = ContribFile(kwargs["contrib_file"], included_samples)
df = d.get_ranges()
d.close()
return df
def load_contrib_type(modisco_kwargs):
"""Load the contrib_wildcard contribution score
"""
# use the first one as the default
contrib_types = [wildcard.split("/", maxsplit=1)[1]
for wildcard in modisco_kwargs['contrib_wildcard'].split(",")]
if not len(set(contrib_types)):
contrib_wildcard = modisco_kwargs['contrib_wildcard']
logger.warn(f"contrib_wildcard: {contrib_wildcard} contains multiple contrib_types. "
"Current code can by default only handle a single one.")
contrib_type = contrib_types[0]
return contrib_type
def get_nonredundant_example_idx(ranges, width=200):
"""Get non - overlapping intervals(in the central region)
Args:
ranges: pandas.DataFrame returned by bpnet.cli.modisco.load_ranges
width: central region considered that should not overlap between
any interval
"""
from pybedtools import BedTool
from bpnet.preproc import resize_interval
# 1. resize ranges
ranges['example_idx'] = np.arange(len(ranges)) # make sure
r = ranges[['chrom', 'start', 'end', 'example_idx']] # add also the strand information
if width is not None:
r = resize_interval(r, width, ignore_strand=True)
bt = BedTool.from_dataframe(r)
btm = bt.sort().merge()
df = btm.to_dataframe()
df = df[(df.end - df.start) < width * 2]
r_overlaps = bt.intersect(BedTool.from_dataframe(df), wb=True).to_dataframe()
keep_idx = r_overlaps.drop_duplicates(['score', 'strand', 'thickStart'])['name'].astype(int)
return keep_idx
# --------------------------------------------
@gin.configurable
def modisco_run(output_path, # specified by bpnet_modisco_run
task_names,
contrib_scores,
hypothetical_contribs,
one_hot,
null_per_pos_scores,
# specified by gin-config
workflow=gin.REQUIRED, # TfModiscoWorkflow
report=None): # reports to use
"""
Args:
workflow: TfModiscoWorkflow objects
report: path to the report ipynb
"""
import h5py
modisco_results = workflow(task_names=task_names,
contrib_scores=contrib_scores,
hypothetical_contribs=hypothetical_contribs,
one_hot=one_hot,
null_per_pos_scores=null_per_pos_scores)
# save the results
logger.info(f"Saving modisco file to {output_path}")
grp = h5py.File(output_path)
modisco_results.save_hdf5(grp)
grp.flush()
grp.close()
if report is not None:
if report is not None:
report = os.path.abspath(os.path.expanduser(report))
if not os.path.exists(report):
raise ValueError(f"Report file {report} doesn't exist")
logger.info("Running the report")
# Run the jupyter notebook
report_path = os.path.join(os.path.dirname(output_path), os.path.basename(report))
render_ipynb(report,
report_path,
params=dict(modisco_file=output_path,
modisco_dir=os.path.dirname(output_path)))
logger.info(f"Done rendering the report file: {report_path}")
@named("modisco-run")
@arg('contrib_file',
help='path to the hdf5 file containing contribution scores')
@arg('output_dir',
help='output file directory')
@arg('--null-contrib-file',
help='Path to the null contribution scores')
@arg('--premade',
help='pre-made config file specifying modisco hyper-paramters to use.')
@arg('--config',
help='gin config file path(s) specifying the modisco workflow parameters.'
' Parameters specified here override the --premade parameters. Multiple '
'config files can be separated by comma separation (i.e. --config=file1.gin,file2.gin)')
@arg('--override',
help='semi-colon separated list of additional gin bindings to use')
@arg("--contrib-wildcard",
help="Wildcard of the contribution scores to use for running modisco. For example, */profile/wn computes"
"uses the profile contribution scores for all the tasks (*) using the wn normalization (see bpnet.heads.py)."
"*/counts/pre-act uses the total count contribution scores for all tasks w.r.t. the pre-activation output "
"of prediction heads. Multiple wildcards can be by comma-separating them.")
@arg('--only-task-regions',
help='If specified, only the contribution scores from regions corresponding to the tasks specified '
'in --contrib-wildcard will be used. For example, if dataspec.yml contained Oct4 and Sox2 peaks when '
'generating the contrib_file and `--contrib-wildcard=Oct4/profile/wn`, then modisco will be only ran '
'in the Oct4 peaks. If `--contrib-wildcard=Oct4/profile/wn,Sox2/profile/wn` or `--contrib-wildcard=*/profile/wn`, '
'then peaks of both Sox2 and Oct4 will be used.')
@arg('--filter-npy',
help='File path to the .npz file containing a boolean one-dimensional numpy array of the same length'
'as the contrib_file. Modisco will be ran on a subset of regions in the contrib_file '
'where this array has value=True.')
@arg('--exclude-chr',
help='Comma-separated list of chromosomes to exclude.')
@arg('--num-workers',
help='number of workers to use in parallel for running modisco')
@arg('--gpu',
help='which gpu to use. Example: gpu=1')
@arg('--memfrac-gpu',
help='what fraction of the GPU memory to use')
@arg('--overwrite',
help='If True, the output files will be overwritten if they already exist.')
def bpnet_modisco_run(contrib_file,
output_dir,
null_contrib_file=None,
premade='modisco-50k',
config=None,
override='',
contrib_wildcard="*/profile/wn", # on which contribution scores to run modisco
only_task_regions=False,
filter_npy=None,
exclude_chr="",
num_workers=10,
gpu=None, # no need to use a gpu by default
memfrac_gpu=0.45,
overwrite=False,
):
"""Run TF-MoDISco on the contribution scores stored in the contribution score file
generated by `bpnet contrib`.
"""
add_file_logging(output_dir, logger, 'modisco-run')
if gpu is not None:
logger.info(f"Using gpu: {gpu}, memory fraction: {memfrac_gpu}")
create_tf_session(gpu, per_process_gpu_memory_fraction=memfrac_gpu)
else:
# Don't use any GPU's
os.environ['CUDA_VISIBLE_DEVICES'] = ''
os.environ['MKL_THREADING_LAYER'] = 'GNU'
import modisco
assert '/' in contrib_wildcard
if filter_npy is not None:
filter_npy = os.path.abspath(str(filter_npy))
if config is not None:
config = os.path.abspath(str(config))
# setup output file paths
output_path = os.path.abspath(os.path.join(output_dir, "modisco.h5"))
remove_exists(output_path, overwrite=overwrite)
output_filter_npy = os.path.abspath(os.path.join(output_dir, 'modisco-run.subset-contrib-file.npy'))
remove_exists(output_filter_npy, overwrite=overwrite)
kwargs_json_file = os.path.join(output_dir, "modisco-run.kwargs.json")
remove_exists(kwargs_json_file, overwrite=overwrite)
if config is not None:
config_output_file = os.path.join(output_dir, 'modisco-run.input-config.gin')
remove_exists(config_output_file, overwrite=overwrite)
shutil.copyfile(config, config_output_file)
# save the hyper-parameters
write_json(dict(contrib_file=os.path.abspath(contrib_file),
output_dir=str(output_dir),
null_contrib_file=null_contrib_file,
config=str(config),
override=override,
contrib_wildcard=contrib_wildcard,
only_task_regions=only_task_regions,
filter_npy=str(filter_npy),
exclude_chr=exclude_chr,
num_workers=num_workers,
overwrite=overwrite,
output_filter_npy=output_filter_npy,
gpu=gpu,
memfrac_gpu=memfrac_gpu),
kwargs_json_file)
# setup the gin config using premade, config and override
cli_bindings = [f'num_workers={num_workers}']
gin.parse_config_files_and_bindings(_get_gin_files(premade, config),
bindings=cli_bindings + override.split(";"),
# NOTE: custom files were inserted right after
# ther user's config file and before the `override`
# parameters specified at the command-line
skip_unknown=False)
log_gin_config(output_dir, prefix='modisco-run.')
# --------------------------------------------
# load the contribution file
logger.info(f"Loading the contribution file: {contrib_file}")
cf = ContribFile(contrib_file)
tasks = cf.get_tasks()
# figure out subset_tasks
subset_tasks = set()
for w in contrib_wildcard.split(","):
task, head, head_summary = w.split("/")
if task == '*':
subset_tasks = None
else:
if task not in tasks:
raise ValueError(f"task {task} not found in tasks: {tasks}")
subset_tasks.add(task)
if subset_tasks is not None:
subset_tasks = list(subset_tasks)
# --------------------------------------------
# subset the intervals
logger.info(f"Loading ranges")
ranges = cf.get_ranges()
# include all samples at the beginning
include_samples = np.ones(len(cf)).astype(bool)
# --only-task-regions
if only_task_regions:
if subset_tasks is None:
logger.warn("contrib_wildcard contains all tasks (specified by */<head>/<summary>). Not using --only-task-regions")
elif np.all(ranges['interval_from_task'] == ''):
raise ValueError("Contribution file wasn't created from multiple set of peaks. "
"E.g. interval_from_task='' for all ranges. Please disable --only-task-regions")
else:
logger.info(f"Subsetting ranges according to `interval_from_task`")
include_samples = include_samples & ranges['interval_from_task'].isin(subset_tasks).values
logger.info(f"Using {include_samples.sum()} / {len(include_samples)} regions after --only-task-regions subset")
# --exclude-chr
if exclude_chr:
logger.info(f"Excluding chromosomes: {exclude_chr}")
chromosomes = ranges['chr']
include_samples = include_samples & (~
|
pd.Series(chromosomes)
|
pandas.Series
|
import logging, os
from typing import Tuple, Optional
import pandas as pd
import dask
import dask.dataframe as dd
from dask.distributed import Client
from dask.distributed import LocalCluster
from .report_generator import combine_data_and_create_report
def read_bcl2fastq_stats_data_using_dask(x: dict) -> Tuple[list, list, list]:
try:
i = x.get('ConversionResults')
lane_number = i.get('LaneNumber')
total_cluster_raw = i.get('TotalClustersRaw')
total_cluster_pf = i.get('TotalClustersPF')
total_yield = i.get('Yield')
row_l = [{
'Lane': lane_number,
'Total_cluster_raw': total_cluster_raw,
'Total_cluster_pf': total_cluster_pf,
'Total_yield': total_yield}]
row_s = list()
demux_results = i.get('DemuxResults')
for j in demux_results:
sample_id = j.get('SampleId')
sample_name = j.get('SampleName')
index = j.get('IndexMetrics')[0].get('IndexSequence')
num_reads = j.get('NumberReads')
yield_val = j.get('Yield')
perfect_barcodes = j['IndexMetrics'][0]['MismatchCounts']['0']
yield_q30 = 0
qual_score_sum = 0
read_metrics = j.get('ReadMetrics')
for read in read_metrics:
q30_bases = int(read.get('YieldQ30'))
yield_q30 += q30_bases
qual_score = int(read.get('QualityScoreSum'))
qual_score_sum += qual_score
row_s.append({
'Lane': lane_number,
'Sample_ID': sample_id,
'Sample_Name': sample_name,
'Index_seq': index,
'Num_reads': num_reads,
'Perfect_barcode': perfect_barcodes,
'Yield_q30': yield_q30,
'Yield': int(yield_val),
'Qual_score_sum': qual_score_sum})
unknown_df = list()
unknown_entry = x['UnknownBarcodes']
lane_id = unknown_entry.get('Lane')
barcodes = unknown_entry.get('Barcodes')
for barcode, read in barcodes.items():
unknown_df.\
append({
'Lane': lane_id,
'Barcode': barcode,
'Reads': read })
return row_l, row_s, unknown_df
except Exception as e:
logging.errror(e)
raise ValueError(e)
# def get_local_dask_cluster(
# n_workers: Optional[int] = 4,
# threads_per_worker: Optional[int] = 1,
# memory_limit: Optional[str] = '1GB') -> LocalCluster:
# try:
# cluster = \
# LocalCluster(
# n_workers=n_workers,
# processes=True,
# threads_per_worker=threads_per_worker,
# memory_limit=memory_limit)
# return cluster
# except Exception as e:
# logging.error(e)
# raise ValueError(e)
def read_data_via_dask_cluster(
data_path: list,
temp_dir: str,
n_workers: Optional[int] = 4,
threads_per_worker: Optional[int] = 1,
memory_limit: Optional[str] = '1GB') -> \
Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
try:
# client = \
# Client(dask_cluster)
if not os.path.exists(temp_dir):
raise IOError("Path {0} doesn't exists".format(temp_dir))
with LocalCluster(
n_workers=n_workers,
processes=False,
threads_per_worker=threads_per_worker,
local_dir=temp_dir,
memory_limit=memory_limit) as cluster, Client(cluster) as client:
# dask.config.set({'temporary_directory': temp_dir})
df_all = dd.read_json(data_path, orient='columns')
df_all = df_all.compute()
all_pdf = \
df_all.apply(
read_bcl2fastq_stats_data_using_dask,
axis=1,
result_type='expand')
lane_df = \
pd.DataFrame([v[0] for v in all_pdf[0].values])
sum_df = \
lane_df.groupby('Lane').agg(sum)
sum_df = \
sum_df.reset_index()
lane_sample_df = \
pd.DataFrame([w for v in all_pdf[1].values for w in v])
lane_unknown_df = \
|
pd.DataFrame([w for v in all_pdf[2].values for w in v])
|
pandas.DataFrame
|
import logging
import numpy as np
import pandas as pd
from pytest import approx
from lenskit.metrics.topn import recall
from lenskit.util.test import demo_recs
from lenskit import topn
_log = logging.getLogger(__name__)
def _test_recall(items, rel, **kwargs):
recs = pd.DataFrame({'item': items})
truth = pd.DataFrame({'item': rel}).set_index('item')
return recall(recs, truth, **kwargs)
def test_recall_empty_zero():
prec = _test_recall([], [1, 3])
assert prec == approx(0)
def test_recall_norel_na():
prec = _test_recall([1, 3], [])
assert prec is None
def test_recall_simple_cases():
prec = _test_recall([1, 3], [1, 3])
assert prec == approx(1.0)
prec = _test_recall([1], [1, 3])
assert prec == approx(0.5)
prec = _test_recall([1, 2, 3, 4], [1, 3])
assert prec == approx(1.0)
prec = _test_recall([1, 2, 3, 4], [1, 3, 5])
assert prec == approx(2.0 / 3)
prec = _test_recall([1, 2, 3, 4], range(5, 10))
assert prec == approx(0.0)
prec = _test_recall([1, 2, 3, 4], range(4, 9))
assert prec == approx(0.2)
def test_recall_series():
prec = _test_recall(pd.Series([1, 3]), pd.Series([1, 3]))
assert prec == approx(1.0)
prec = _test_recall(pd.Series([1, 2, 3]), pd.Series([1, 3, 5, 7]))
assert prec == approx(0.5)
prec = _test_recall(pd.Series([1, 2, 3, 4]), pd.Series(range(4, 9)))
assert prec == approx(0.2)
def test_recall_series_set():
prec = _test_recall(pd.Series([1, 2, 3, 4]), [1, 3, 5, 7])
assert prec == approx(0.5)
prec = _test_recall(pd.Series([1, 2, 3, 4]), range(4, 9))
assert prec == approx(0.2)
def test_recall_series_index():
prec = _test_recall(pd.Series([1, 3]), pd.Index([1, 3]))
assert prec == approx(1.0)
prec = _test_recall(pd.Series([1, 2, 3, 4]), pd.Index([1, 3, 5, 7]))
assert prec == approx(0.5)
prec = _test_recall(pd.Series([1, 2, 3, 4]), pd.Index(range(4, 9)))
assert prec == approx(0.2)
def test_recall_series_array():
prec = _test_recall(pd.Series([1, 3]), np.array([1, 3]))
assert prec == approx(1.0)
prec = _test_recall(pd.Series([1, 2, 3, 4]), np.array([1, 3, 5, 7]))
assert prec == approx(0.5)
prec = _test_recall(
|
pd.Series([1, 2, 3, 4])
|
pandas.Series
|
"""Main class and helper functions.
"""
import os
from enum import Enum
from collections import OrderedDict
from functools import reduce
from pathlib import Path
from typing import Any, Union, Optional
from typing import Iterable, Sized, Sequence, Mapping, MutableMapping
from typing import Tuple, List, Dict, KeysView
from copy import deepcopy
import numpy as np
from numpy import ma
import pandas as pd
from numpy.lib.recfunctions import rec_drop_fields
from pandas.core.index import RangeIndex
from pandas.api.types import is_string_dtype, is_categorical
from scipy import sparse
from scipy.sparse import issparse
from scipy.sparse.sputils import IndexMixin
from natsort import natsorted
# try importing zarr
try:
from zarr.core import Array as ZarrArray
except ImportError:
class ZarrArray:
@staticmethod
def __rep__():
return 'mock zarr.core.Array'
# try importing zappy
try:
from zappy.base import ZappyArray
except ImportError:
class ZappyArray:
@staticmethod
def __rep__():
return 'mock zappy.base.ZappyArray'
from . import h5py
from .layers import AnnDataLayers
from . import utils
from .utils import Index, get_n_items_idx
from .logging import anndata_logger as logger
from .compat import PathLike
class StorageType(Enum):
Array = np.ndarray
Masked = ma.MaskedArray
Sparse = sparse.spmatrix
ZarrArry = ZarrArray
ZappyArry = ZappyArray
@classmethod
def classes(cls):
print(ZarrArray)
return tuple(c.value for c in cls.__members__.values())
class BoundRecArr(np.recarray):
"""A :class:`numpy.recarray` to which fields can be added using ``.['key']``.
To enable this, it is bound to a instance of AnnData.
"""
_attr_choices = ['obsm', 'varm']
def __new__(cls, input_array: np.ndarray, parent: Any, attr: str):
"""
Parameters
----------
input_array
A (structured) numpy array.
parent
Any object to which the BoundRecArr shall be bound to.
attr
The name of the attribute as which it appears in parent.
"""
arr = np.asarray(input_array).view(cls)
arr._parent = parent
arr._attr = attr
return arr
def __array_finalize__(self, obj: Any):
if obj is None: return
self._parent = getattr(obj, '_parent', None)
self._attr = getattr(obj, '_attr', None)
def __reduce__(self) -> Tuple[Dict[str, Any], Dict[str, Any], Dict[str, Any]]:
pickled_state = super().__reduce__()
new_state = pickled_state[2] + (self.__dict__, )
return pickled_state[0], pickled_state[1], new_state
def __setstate__(self, state: Sequence[Mapping[str, Any]]):
for k, v in state[-1].items():
self.__setattr__(k, v)
super().__setstate__(state[0:-1])
def copy(self, order='C') -> 'BoundRecArr':
new = super().copy()
new._parent = self._parent
return new
def flipped(self) -> 'BoundRecArr':
new_attr = (self._attr_choices[1] if self._attr == self._attr_choices[0]
else self._attr_choices[0])
return BoundRecArr(self, self._parent, new_attr)
def keys(self) -> Tuple[str, ...]:
return self.dtype.names
def __setitem__(self, key: str, arr: np.ndarray):
if not isinstance(arr, np.ndarray):
raise ValueError(
'Can only assign numpy ndarrays to .{}[{!r}], not objects of class {}'
.format(self._attr, key, type(arr))
)
if arr.ndim == 1:
raise ValueError('Use adata.obs or adata.var for 1-dimensional arrays.')
if self.shape[0] != arr.shape[0]:
raise ValueError(
'Can only assign an array of same length ({}), not of length {}.'
.format(self.shape[0], arr.shape[0])
)
# the following always allocates a new array
# even if the key already exists and dimensions match
# TODO: one could check for this case
# dtype
merged_dtype = []
found_key = False
for descr in self.dtype.descr:
if descr[0] == key:
merged_dtype.append((key, arr.dtype, arr.shape[1]))
found_key = True
else:
merged_dtype.append(descr)
if not found_key:
merged_dtype.append((key, arr.dtype, arr.shape[1]))
# create new array
new = np.empty(len(self), dtype=merged_dtype)
# fill the array
for name in new.dtype.names:
if name == key:
new[name] = arr
else:
new[name] = self[name]
# make it a BoundRecArr
# TODO: why can we not do this step before filling the array?
new = BoundRecArr(new, self._parent, self._attr)
setattr(self._parent, self._attr, new)
def __delitem__(self, key: str):
"""Delete field with name."""
if key not in self.dtype.names:
raise ValueError(
'Currently, can only delete single names from {}.'
.format(self.dtype.names)
)
new_array = rec_drop_fields(self, key)
new = BoundRecArr(new_array, self._parent, self._attr)
setattr(self._parent, self._attr, new)
def to_df(self) -> pd.DataFrame:
"""Convert to pandas dataframe."""
df = pd.DataFrame(index=
|
RangeIndex(0, self.shape[0], name=None)
|
pandas.core.index.RangeIndex
|
import numpy as np
import pytest
from pandas import (
Categorical,
DataFrame,
Series,
_testing as tm,
concat,
read_hdf,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
)
pytestmark = [
pytest.mark.single,
# pytables https://github.com/PyTables/PyTables/issues/822
pytest.mark.filterwarnings(
"ignore:a closed node found in the registry:UserWarning"
),
]
def test_categorical(setup_path):
with
|
ensure_clean_store(setup_path)
|
pandas.tests.io.pytables.common.ensure_clean_store
|
import csv
import requests
from bs4 import BeautifulSoup
import csv
import pandas as pd
import re
from PyQt4.QtGui import QApplication
from PyQt4.QtCore import QUrl
from PyQt4.QtWebKit import QWebPage
import bs4 as bs
import urllib.request
import datetime
import os
today=datetime.date.today()
urls=[]
urls1=[]
urls2=[]
model=[]
company=[]
specs=[]
country=[]
display_list = []
memory_list = []
processor_list = []
camera_list = []
battery_list = []
thickness_list = []
extras_links = []
url="http://www.innjoo.com/Product/phone"
r=requests.get(url)
soup=BeautifulSoup(r.text,'html.parser')
links=soup.find_all('li',attrs={'class':'dropdown dropdown-phone active'})
#print(links)
for i in links:
tt=i.find_all('ul',attrs={'class':'product-series-classify'})
for x in tt:
z=x.find_all('a')
for b in z:
urls1.append('http://www.innjoo.com'+b['href'])
for s in urls1:
r=requests.get(s)
soup=BeautifulSoup(r.text,'html.parser')
link=soup.findAll('a', text = re.compile('Tech Specs'))
for a in link:
urls.append('http://www.innjoo.com'+a['href'])
#print(urls)
for u in urls:
specs.append("NOT AVAILABLE")
heads=[]
dets=[]
company.append("INJOO")
country.append("CHINA")
r=requests.get(u)
soup=BeautifulSoup(r.text,'html.parser')
s=soup.find_all('div',attrs={'class':'wd'})
t=soup.find_all('div',attrs={'class':"techspec-content ltr-style"})
m=soup.find_all('div',attrs={'class':"product-name"})
if s:
k=''
print(u)
extras_links.append(u)
for x in m:
if x.text not in model:
model.append(x.text)
for tk in s:
th=tk.find_all('span')
heads.append(th[0].text.strip(":"))
dets.append(th[1].text)
for i in range(len(heads)):
if 'CPU' in heads[i]:
processor_list.append(dets[i])
#print("____")
if 'Screen Size' in heads[i]:
display_list.append(dets[i])
if 'Memory' in heads[i]:
k=k+dets[i]+" "
if 'Storage ' in heads[i]:
k=k+dets[i]
if 'Capacity' in heads[i]:
battery_list.append(dets[i])
if 'Dimensions' in heads[i]:
thickness_list.append(dets[i])
if 'Camera' in heads[i] or 'Pixel' in heads[i]:
camera_list.append(dets[i])
#print("________")
memory_list.append(k)
if t:
xx=[]
bn=' '
print(u)
if u not in extras_links:
extras_links.append(u)
for x in m:
if x.text not in model:
model.append(x.text)
for k in t :
th=k.find_all('td')
for z in th:
xx.append(z.text)
#print(xx)
for j in xx:
if 'mAh' in j:
battery_list.append(j)
if 'inch' in j:
display_list.append(j)
if 'Front' in j and("Megapixel" in j or 'MP' in j or 'megapixel' in j or 'pixels' in j):
camera_list.append(j)
#print("________")
if 'GB' in j:
bn=bn+j+" "
if 'mm' in j and('*' in j or 'x' in j):
thickness_list.append(j)
if 'GHz' in j and('MT' in j or 'Core' in j or 'core' in j):
processor_list.append(j)
#print("______")
memory_list.append("RAM/ROM:"+bn)
print(len(country))
print(len(company))
print(len(model))
print(len(specs))
print(len(display_list))
print(len(camera_list))
print(len(memory_list))
print(len(battery_list))
print(len(thickness_list))
print(len(processor_list))
print(len(extras_links))
records=[]
for i in range(len(company)):
records.append((country[i], company[i], model[i], specs[i], display_list[i], camera_list[i], memory_list[i], battery_list[i], thickness_list[i], processor_list[i], extras_links[i]))
path='C:\\LavaWebScraper\\BrandWiseFiles\\'
df =
|
pd.DataFrame(records, columns = ['COUNTRY', 'COMPANY', 'MODEL', 'USP', 'DISPLAY', 'CAMERA', 'MEMORY', 'BATTERY', 'THICKNESS', 'PROCESSOR', 'EXTRAS/ LINKS'])
|
pandas.DataFrame
|
"""
MBD-LLBorder
"""
from functools import partial
from itertools import combinations
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
from ..base import BaseMiner, DiscovererMixin
from ..itemsets.lcm import LCMMax
from ..utils import _check_growth_rate, _check_min_supp, filter_maximal, filter_minimal
def border_diff(U, S):
"""
Given a pair of borders <{∅}, {U}> and <{∅}, {S}>,
``border_diff`` derives another border <L2, {U}>
such that [L2, {U}] = [{∅}, {U}] - [{∅}, {S}]
Parameters
----------
U : set
non empty part from the border to differentiate
S : list of set
non-empty part of a border.
Noted as ``R1`` in the original paper
References
----------
.. [1]
<NAME>
Efficient Mining of Emerging Patterns Discovering
Notes
-----
See ``BORDER-DIFF`` in section 4.1
"""
# assert len(R1) < len(U) # assert we iterate on the smallest ensemble
L2 = [{x} for x in U - S[0]]
for i in range(1, len(S)):
_L2 = [X | {x} for x in U - S[i] for X in L2]
L2 = list(filter_minimal(_L2))
return L2, U
def mbdllborder(isets1, isets2):
"""
References
----------
.. [1]
<NAME>
Efficient Mining of Emerging Patterns Discovering
Notes
-----
main algorithm, as explained in section 4.2
"""
borders = list()
for iset in isets2:
if any((e > iset for e in isets1)):
continue
inter = (iset & e for e in isets1)
R = filter_maximal(inter)
diff = border_diff(iset, R)
borders.append(diff)
return borders
def borders_to_patterns(left, right, min_size=None):
"""
Operates in a bread-first manner, outputting all
valid patterns of a given size for each level.
Bigger patterns first.
Parameters
----------
left: list of set
right: set
min_size: int
only accepts patterns with greater or equal size
Returns
-------
pd.Series
"""
min_size = min_size or min(map(len, left))
patterns = list()
for size in range(len(right) - 1, min_size - 1, -1): # bigger patterns first
combs = combinations(right, size)
for pat in combs:
if any((e.issuperset(set(pat)) for e in left)):
continue
patterns.append(pat)
return
|
pd.Series(patterns)
|
pandas.Series
|
import pandas
import numpy
import sys
import unittest
import os
import tempfile
import random
import string
import json
import copy
import warnings
sys.path.append("..")
import nPYc
from generateTestDataset import generateTestDataset
from nPYc.enumerations import VariableType
class test_dataset_synthetic(unittest.TestCase):
"""
Test Dataset object functions with synthetic data
"""
def setUp(self):
# Load empty object and populate with sythetic data.
# Empty object
self.data = nPYc.Dataset()
validChars = string.ascii_letters + string.digits
# Function to generate random strings:
def randomword(length):
return ''.join(random.choice(validChars) for i in range(length))
# Randomly sized intensity data
self.name = randomword(10)
self.noFeat = numpy.random.randint(3,100)
self.noSamp = numpy.random.randint(3,100)
self.data._intensityData = numpy.random.rand(self.noSamp,self.noFeat)
self.data.sampleMetadata['Sample File Name'] = list(map(str, numpy.linspace(1, self.noSamp, num=self.noSamp, dtype=int)))
self.data.sampleMetadata['Sample Metadata'] = [randomword(10) for x in range(0, self.noSamp)]
self.data.featureMetadata['Feature Name'] = list(map(str, numpy.linspace(1, self.noFeat, num=self.noFeat, dtype=int)))
self.data.featureMetadata['Feature Metadata'] = [randomword(10) for x in range(0, self.noFeat)]
self.data.VariableType = VariableType.Discrete
def test_nofeatures(self):
self.assertEqual(self.data.noFeatures, self.noFeat)
def test_name(self):
self.data.name = self.name
self.assertEqual(self.data.name, self.name)
def test_name_raises(self):
with self.assertRaises(TypeError):
self.data.name = 5
def test_normalisation(self):
from nPYc.utilities import normalisation
with self.subTest(msg='Check initialised with a NullNormaliser'):
self.assertIsInstance(self.data.Normalisation, normalisation.NullNormaliser)
numpy.testing.assert_equal(self.data.intensityData, self.data._intensityData)
with self.subTest(msg='Check swap to TA normaliser'):
self.data.Normalisation = normalisation.TotalAreaNormaliser()
taNormaliser = normalisation.TotalAreaNormaliser()
numpy.testing.assert_array_equal(self.data.intensityData, taNormaliser.normalise(self.data._intensityData))
def test_normalisation_raises(self):
with self.assertRaises(TypeError):
self.data.Normalisation = 'Not a Normaliser'
def test_nosamples(self):
self.assertEqual(self.data.noSamples, self.noSamp)
def test__repr__(self):
pointer = id(self.data)
reprString = str(self.data)
testString = "<%s instance at %s, named %s, with %d samples, %d features>" % (nPYc.Dataset().__class__.__name__, pointer, nPYc.Dataset().__class__.__name__, self.noSamp, self.noFeat)
self.assertEqual(reprString, testString)
def test_initialisemasks(self):
self.data.initialiseMasks()
featureMask = numpy.squeeze(numpy.ones([self.noFeat, 1], dtype=bool))
sampleMask = numpy.squeeze(numpy.ones([self.noSamp, 1], dtype=bool))
with self.subTest(msg='Checking featureMask.'):
numpy.testing.assert_equal(self.data.featureMask, featureMask)
with self.subTest(msg='Checking sampleMask.'):
numpy.testing.assert_equal(self.data.sampleMask, sampleMask)
def test_applymasks(self):
# exclude feature 2, samples 1 and 3
featureMask = numpy.squeeze(numpy.ones([self.noFeat, 1], dtype=bool))
featureMask[1] = False
sampleMask = numpy.squeeze(numpy.ones([self.noSamp, 1], dtype=bool))
sampleMask[[0, 2]] = False
expectedDataset = copy.deepcopy(self.data)
expectedDataset.sampleMetadataExcluded = []
expectedDataset.intensityDataExcluded = []
expectedDataset.featureMetadataExcluded = []
expectedDataset.excludedFlag = []
expectedDataset.sampleMetadataExcluded.append(expectedDataset.sampleMetadata.loc[~sampleMask, :])
expectedDataset.intensityDataExcluded.append(expectedDataset.intensityData[~sampleMask, :])
expectedDataset.featureMetadataExcluded.append(expectedDataset.featureMetadata)
expectedDataset.excludedFlag.append('Samples')
expectedDataset.featureMetadataExcluded.append(expectedDataset.featureMetadata.loc[~featureMask, :])
expectedDataset.intensityDataExcluded.append(expectedDataset.intensityData[sampleMask, :][:, ~featureMask])
expectedDataset.sampleMetadataExcluded.append(expectedDataset.sampleMetadata.loc[sampleMask, :])
expectedDataset.sampleMetadataExcluded[1].reset_index(drop=True, inplace=True)
expectedDataset.excludedFlag.append('Features')
expectedDataset.intensityData = expectedDataset.intensityData[sampleMask, :][:, featureMask]
expectedDataset.sampleMetadata = expectedDataset.sampleMetadata.loc[sampleMask, :]
expectedDataset.sampleMetadata.reset_index(drop=True, inplace=True)
expectedDataset.featureMetadata = expectedDataset.featureMetadata.loc[featureMask, :]
expectedDataset.featureMetadata.reset_index(drop=True, inplace=True)
expectedDataset.initialiseMasks()
maskedDataset = copy.deepcopy(self.data)
maskedDataset.initialiseMasks()
maskedDataset.featureMask[1] = False
maskedDataset.sampleMask[[0, 2]] = False
maskedDataset.applyMasks()
with self.subTest(msg='Checking sampleMetadata'):
|
pandas.testing.assert_frame_equal(maskedDataset.sampleMetadata, expectedDataset.sampleMetadata)
|
pandas.testing.assert_frame_equal
|
from flask import Flask ,request
from flask import render_template
import pandas as pd
import joblib
from io import BytesIO
import base64
import shap
from keras.models import load_model
import numpy as np
import glob
data=pd.read_csv("churn_telec.csv")
data = data[data["Churn"] == "Yes"]
loaded_model = joblib.load("decisiontree")
deep_model=load_model("model.h5")
x_df,x_train=pd.read_csv("x_df.csv"),pd.read_csv("x_train.csv")
x_df_deep,x_train_deep=pd.read_csv("x_df_deep.csv"),pd.read_csv("x_train_deep.csv")
x_train=x_train.drop("customerID",axis=1)
x_train_deep=x_train_deep.drop("customerID",axis=1)
columns=data.columns
values=data.values
explainer = shap.KernelExplainer(loaded_model.predict,x_train)
explainer_deep = shap.KernelExplainer(deep_model.predict,x_train_deep)
def find_file_deep(code):
files=glob.glob("./Templates/deep_learning/*.html", recursive = True)
return ("./Templates/deep_learning\\"+code+"deep.html") in files
def find_file(code):
files=glob.glob("./Templates/machine_learning/*.html", recursive = True)
return ("./Templates/machine_learning\\"+code+".html") in files
def blackbox(explainer, row):
if(find_file(row.customerID.values[0])):
return (row.customerID.values[0]+".html")
else:
shap_values = explainer.shap_values(row.drop("customerID",axis=1),nsamples=100)
shap.initjs()
f=shap.force_plot(explainer.expected_value, shap_values, row.drop("customerID",axis=1))
shap.save_html("./Templates/machine_learning/"+row.customerID.values[0]+".html", f)
return (row.customerID.values[0]+".html")
def blackbox_deep(explainer, row,code):
if(find_file_deep(code)):
return (code+"deep.html")
else:
shap_values = explainer.shap_values(row,nsamples=100)
shap.initjs()
f=shap.force_plot(explainer.expected_value, shap_values[0], row)
shap.save_html("./Templates/deep_learning/"+code+"deep.html", f)
return (code+"deep.html")
app=Flask(__name__)
@app.route('/')
def index():
return render_template('home.html',file_ds="explainability_global.html")
@app.route('/user',methods=["GET" , "POST"])
def function():
id=request.args.get('id')
row=(
|
pd.DataFrame(x_df[x_df["customerID"]==id].iloc[-1])
|
pandas.DataFrame
|
"""
Decrease the memory usage of the values and labels in the csv files.
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import gc # garbage collector
import psutil
def convert_obj_columns_to_cat(df, exclude_cols):
"""
Convert the datatype of object columns to category columns.
:param df: Dataframe of the data
:type df: pandas.core.frame.DataFrame
:param exclude_cols: set of columns to exclude from conversion
:type exclude_cols: set
:returns: dataframe
:rtype: pandas.core.frame.DataFrame
"""
column_list = df.select_dtypes(include=['object']).columns
column_list = [col for col in column_list if col not in exclude_cols]
for col in column_list:
print("converting", col.ljust(30),
"size: ", round(df[col].memory_usage(deep=True)*1e-6, 2), end="\t")
df[col] = df[col].astype("category")
print("->\t", round(df[col].memory_usage(deep=True)*1e-6, 2))
return df
def downcast_df_int_columns(df):
"""
Change integer types to decrease memory usage.
:param df: Dataframe of the data
:type df: pandas.core.frame.DataFrame
:returns: dataframe
:rtype: pandas.core.frame.DataFrame
"""
list_of_columns = list(df.select_dtypes(
include=["int32", "int64"]).columns)
if len(list_of_columns) >= 1:
# finds max string length for better status printing
max_string_length = max([len(col) for col in list_of_columns])
print("\ndowncasting integers for:", list_of_columns, "\n")
for col in list_of_columns:
print("reduced memory usage for: ", col.ljust(max_string_length+2)[:max_string_length+2],
"from", str(round(df[col].memory_usage(deep=True)*1e-6, 2)).rjust(8), "to", end=" ")
df[col] = pd.to_numeric(df[col], downcast="integer")
print(str(round(df[col].memory_usage(deep=True)*1e-6, 2)).rjust(8))
else:
print("no columns to downcast")
gc.collect()
return df
def compress_labels(df):
"""
Decrease memory size of labels
:param df: Dataframe of the data
:type df: pandas.core.frame.DataFrame
:returns: dataframe
:rtype: pandas.core.frame.DataFrame
"""
df['status_group'] = df['status_group'].astype("category")
df['id'] =
|
pd.to_numeric(df['id'], downcast="integer")
|
pandas.to_numeric
|
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas.errors import (
NullFrequencyError, OutOfBoundsDatetime, PerformanceWarning)
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, NaT, Series, Timedelta, TimedeltaIndex,
Timestamp, timedelta_range)
import pandas.util.testing as tm
def get_upcast_box(box, vector):
"""
Given two box-types, find the one that takes priority
"""
if box is DataFrame or isinstance(vector, DataFrame):
return DataFrame
if box is Series or isinstance(vector, Series):
return Series
if box is pd.Index or isinstance(vector, pd.Index):
return pd.Index
return box
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Comparisons
class TestTimedelta64ArrayLikeComparisons:
# Comparison tests for timedelta64[ns] vectors fully parametrized over
# DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_timedelta64_zerodim(self, box_with_array):
# GH#26689 should unbox when comparing with zerodim array
box = box_with_array
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = pd.timedelta_range('2H', periods=4)
other = np.array(tdi.to_numpy()[0])
tdi = tm.box_expected(tdi, box)
res = tdi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
with pytest.raises(TypeError):
# zero-dim of wrong dtype should still raise
tdi >= np.array(4)
class TestTimedelta64ArrayComparisons:
# TODO: All of these need to be parametrized over box
def test_compare_timedelta_series(self):
# regression test for GH#5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_tdi_cmp_str_invalid(self, box_with_array):
# GH#13624
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = TimedeltaIndex(['1 day', '2 days'])
tdarr = tm.box_expected(tdi, box_with_array)
for left, right in [(tdarr, 'a'), ('a', tdarr)]:
with pytest.raises(TypeError):
left > right
with pytest.raises(TypeError):
left >= right
with pytest.raises(TypeError):
left < right
with pytest.raises(TypeError):
left <= right
result = left == right
expected = np.array([False, False], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = left != right
expected = np.array([True, True], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, object])
def test_comp_nat(self, dtype):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = rhs != lhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_comparisons_nat(self):
tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
'1 day 00:00:01', '5 day 00:00:03'])
tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
'1 day 00:00:02', '5 days 00:00:03'])
tdarr = np.array([np.timedelta64(2, 'D'),
np.timedelta64(2, 'D'), np.timedelta64('nat'),
np.timedelta64('nat'),
np.timedelta64(1, 'D') + np.timedelta64(2, 's'),
np.timedelta64(5, 'D') + np.timedelta64(3, 's')])
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
# TODO: better name
def test_comparisons_coverage(self):
rng = timedelta_range('1 days', periods=10)
result = rng < rng[3]
expected = np.array([True, True, True] + [False] * 7)
tm.assert_numpy_array_equal(result, expected)
# raise TypeError for now
with pytest.raises(TypeError):
rng < rng[3].value
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedelta64ArithmeticUnsorted:
# Tests moved from type-specific test files but not
# yet sorted/parametrized/de-duplicated
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dt
with pytest.raises(TypeError, match=msg):
tdi - dti
msg = (r"descriptor '__sub__' requires a 'datetime\.datetime' object"
" but received a 'Timedelta'")
with pytest.raises(TypeError, match=msg):
td - dt
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
td - dti
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = pd.date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = pd.date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt_tz - dt
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts_tz2
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt - dt_tz
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
ts - dt_tz
with pytest.raises(TypeError, match=msg):
ts_tz2 - ts
with pytest.raises(TypeError, match=msg):
ts_tz2 - dt
with pytest.raises(TypeError, match=msg):
ts_tz - ts_tz2
# with dti
with pytest.raises(TypeError, match=msg):
dti - ts_tz
with pytest.raises(TypeError, match=msg):
dti_tz - ts
with pytest.raises(TypeError, match=msg):
dti_tz - ts_tz2
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
tdi + dti[0:1]
with pytest.raises(ValueError, match=msg):
tdi[0:1] + dti
# random indexes
with pytest.raises(NullFrequencyError):
tdi + pd.Int64Index([1, 2, 3])
# this is a union!
# pytest.raises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
assert result == expected
result = td + dt
expected = Timestamp('20130102')
assert result == expected
# TODO: Needs more informative name, probably split up into
# more targeted tests
@pytest.mark.parametrize('freq', ['D', 'B'])
def test_timedelta(self, freq):
index = pd.date_range('1/1/2000', periods=50, freq=freq)
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
tm.assert_index_equal(index, back)
if freq == 'D':
expected = pd.tseries.offsets.Day(1)
assert index.freq == expected
assert shifted.freq == expected
assert back.freq == expected
else: # freq == 'B'
assert index.freq == pd.tseries.offsets.BusinessDay(1)
assert shifted.freq is None
assert back.freq == pd.tseries.offsets.BusinessDay(1)
result = index - timedelta(1)
expected = index + timedelta(-1)
tm.assert_index_equal(result, expected)
# GH#4134, buggy with timedeltas
rng = pd.date_range('2013', '2014')
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
tm.assert_index_equal(result1, result4)
tm.assert_index_equal(result2, result3)
class TestAddSubNaTMasking:
# TODO: parametrize over boxes
def test_tdi_add_timestamp_nat_masking(self):
# GH#17991 checking for overflow-masking with NaT
tdinat = pd.to_timedelta(['24658 days 11:15:00', 'NaT'])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants + ts_pos_variants:
res = tdinat + variant
assert res[1] is pd.NaT
def test_tdi_add_overflow(self):
# See GH#14068
# preliminary test scalar analogue of vectorized tests below
with pytest.raises(OutOfBoundsDatetime):
pd.to_timedelta(106580, 'D') + Timestamp('2000')
with pytest.raises(OutOfBoundsDatetime):
Timestamp('2000') + pd.to_timedelta(106580, 'D')
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([106580], 'D') + Timestamp('2000')
with pytest.raises(OverflowError, match=msg):
Timestamp('2000') + pd.to_timedelta([106580], 'D')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([_NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta(['5 days', _NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
(pd.to_timedelta([_NaT, '5 days', '1 hours']) -
pd.to_timedelta(['7 seconds', _NaT, '4 hours']))
# These should not overflow!
exp = TimedeltaIndex([pd.NaT])
result = pd.to_timedelta([pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex(['4 days', pd.NaT])
result = pd.to_timedelta(['5 days', pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([pd.NaT, pd.NaT, '5 hours'])
result = (pd.to_timedelta([pd.NaT, '5 days', '1 hours']) +
pd.to_timedelta(['7 seconds', pd.NaT, '4 hours']))
tm.assert_index_equal(result, exp)
class TestTimedeltaArraylikeAddSubOps:
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# TODO: moved from frame tests; needs parametrization/de-duplication
def test_td64_df_add_int_frame(self):
# GH#22696 Check that we don't dispatch to numpy implementation,
# which treats int64 as m8[ns]
tdi = pd.timedelta_range('1', periods=3)
df = tdi.to_frame()
other = pd.DataFrame([1, 2, 3], index=tdi) # indexed like `df`
with pytest.raises(TypeError):
df + other
with pytest.raises(TypeError):
other + df
with pytest.raises(TypeError):
df - other
with pytest.raises(TypeError):
other - df
# TODO: moved from tests.indexes.timedeltas.test_arithmetic; needs
# parametrization+de-duplication
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = pd.DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = pd.DataFrame(['00:00:02']).apply(pd.to_timedelta)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
dfn = pd.DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
actual = scalar1 + scalar1
assert actual == scalar2
actual = scalar2 - scalar1
assert actual == scalar1
actual = s1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - s1
tm.assert_series_equal(actual, s1)
actual = s1 + scalar1
tm.assert_series_equal(actual, s2)
actual = scalar1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - scalar1
tm.assert_series_equal(actual, s1)
actual = -scalar1 + s2
tm.assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
with pytest.raises(TypeError):
s1 + np.nan
with pytest.raises(TypeError):
np.nan + s1
with pytest.raises(TypeError):
s1 - np.nan
with pytest.raises(TypeError):
-np.nan + s1
actual = s1 + pd.NaT
tm.assert_series_equal(actual, sn)
actual = s2 - pd.NaT
tm.assert_series_equal(actual, sn)
actual = s1 + df1
tm.assert_frame_equal(actual, df2)
actual = s2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + s1
tm.assert_frame_equal(actual, df2)
actual = df2 - s1
tm.assert_frame_equal(actual, df1)
actual = df1 + df1
tm.assert_frame_equal(actual, df2)
actual = df2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + scalar1
tm.assert_frame_equal(actual, df2)
actual = df2 - scalar1
tm.assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
tm.assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
tm.assert_frame_equal(actual, dfn)
with pytest.raises(TypeError):
df1 + np.nan
with pytest.raises(TypeError):
df1 - np.nan
actual = df1 + pd.NaT # NaT is datetime, not timedelta
tm.assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
tm.assert_frame_equal(actual, dfn)
# TODO: moved from tests.series.test_operators, needs splitting, cleanup,
# de-duplication, box-parametrization...
def test_operators_timedelta64(self):
# series ops
v1 = pd.date_range('2012-1-1', periods=3, freq='D')
v2 = pd.date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
tm.assert_series_equal(rs, xp)
assert rs.dtype == 'timedelta64[ns]'
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == 'timedelta64[ns]'
# series on the rhs
result = df['A'] - df['A'].shift()
assert result.dtype == 'timedelta64[ns]'
result = df['A'] + td
assert result.dtype == 'M8[ns]'
# scalar Timestamp on rhs
maxa = df['A'].max()
assert isinstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
assert resultb.dtype == 'timedelta64[ns]'
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
tm.assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
tm.assert_series_equal(result, expected)
assert result.dtype == 'm8[ns]'
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
assert resulta.dtype == 'm8[ns]'
# roundtrip
resultb = resulta + d
tm.assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(resultb, df['A'])
assert resultb.dtype == 'M8[ns]'
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(df['A'], resultb)
assert resultb.dtype == 'M8[ns]'
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
assert rs[2] == value
def test_timedelta64_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
nat_series_dtype_timedelta = Series([NaT, NaT],
dtype='timedelta64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
tm.assert_series_equal(timedelta_series - NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
# addition
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
# multiplication
tm.assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
tm.assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series * 1, timedelta_series)
tm.assert_series_equal(1 * timedelta_series, timedelta_series)
tm.assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(timedelta_series * np.nan,
nat_series_dtype_timedelta)
tm.assert_series_equal(np.nan * timedelta_series,
nat_series_dtype_timedelta)
# division
tm.assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / np.nan,
nat_series_dtype_timedelta)
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box_with_array):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
def test_td64arr_add_sub_float(self, box_with_array, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdarr = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdarr + other
with pytest.raises(TypeError):
other + tdarr
with pytest.raises(TypeError):
tdarr - other
with pytest.raises(TypeError):
other - tdarr
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box_with_array, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box_with_array, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box_with_array):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box_with_array)
msg = ("cannot subtract a datelike from|"
"Could not operate|"
"cannot perform operation")
with pytest.raises(TypeError, match=msg):
idx - Timestamp('2011-01-01')
def test_td64arr_add_timestamp(self, box_with_array, tz_naive_fixture):
# GH#23215
# TODO: parametrize over scalar datetime types?
tz = tz_naive_fixture
other = Timestamp('2011-01-01', tz=tz)
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'], tz=tz)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx + other
tm.assert_equal(result, expected)
result = other + idx
tm.assert_equal(result, expected)
def test_td64arr_add_sub_timestamp(self, box_with_array):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdi = timedelta_range('1 day', periods=3)
expected = pd.date_range('2012-01-02', periods=3)
tdarr = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(ts + tdarr, expected)
tm.assert_equal(tdarr + ts, expected)
expected2 = pd.date_range('2011-12-31', periods=3, freq='-1D')
expected2 = tm.box_expected(expected2, box_with_array)
tm.assert_equal(ts - tdarr, expected2)
tm.assert_equal(ts + (-tdarr), expected2)
with pytest.raises(TypeError):
tdarr - ts
def test_tdi_sub_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_add_datetime64_nat(self, box_with_array):
# GH#23215
other = np.datetime64('NaT')
tdi = timedelta_range('1 day', periods=3)
expected = pd.DatetimeIndex(["NaT", "NaT", "NaT"])
tdser = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(tdser + other, expected)
tm.assert_equal(other + tdser, expected)
# ------------------------------------------------------------------
# Operations with int-like others
def test_td64arr_add_int_series_invalid(self, box):
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
int_ser = Series([2, 3, 4])
with pytest.raises(err):
tdser + int_ser
with pytest.raises(err):
int_ser + tdser
with pytest.raises(err):
tdser - int_ser
with pytest.raises(err):
int_ser - tdser
def test_td64arr_add_intlike(self, box_with_array):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box_with_array)
err = TypeError
if box_with_array in [pd.Index, tm.to_array]:
err = NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box_with_array,
scalar):
box = box_with_array
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box in [pd.Index, tm.to_array] and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype):
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with timedelta-like others
# TODO: this was taken from tests.series.test_ops; de-duplicate
@pytest.mark.parametrize('scalar_td', [timedelta(minutes=5, seconds=4),
Timedelta(minutes=5, seconds=4),
Timedelta('5m4s').to_timedelta64()])
def test_operators_timedelta64_with_timedelta(self, scalar_td):
# smoke tests
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 + scalar_td
scalar_td + td1
td1 - scalar_td
scalar_td - td1
td1 / scalar_td
scalar_td / td1
# TODO: this was taken from tests.series.test_ops; de-duplicate
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = (Series([timedelta(seconds=0)] * 3) -
Series([timedelta(seconds=1)] * 3))
assert result.dtype == 'm8[ns]'
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = (Series([timedelta(seconds=0)] * 3) -
Series([timedelta(seconds=1)] * 3))
assert result.dtype == 'm8[ns]'
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
def test_td64arr_add_td64_array(self, box):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi - tdarr
tm.assert_equal(result, expected)
result = tdarr - tdi
tm.assert_equal(result, expected)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
if box is pd.DataFrame and names[1] == 'Venkman':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
name=names[2])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser + tdi
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)],
name=names[2])
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser - tdi
tm.assert_equal(result, -expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
def test_td64arr_add_sub_td64_nat(self, box):
# GH#23320 special handling for timedelta64("NaT")
tdi = pd.TimedeltaIndex([NaT, Timedelta('1s')])
other = np.timedelta64("NaT")
expected = pd.TimedeltaIndex(["NaT"] * 2)
obj = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
result = other - obj
tm.assert_equal(result, expected)
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, Timedelta('1s')])
expected = Series([NaT, NaT], dtype='timedelta64[ns]')
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - pd.NaT
tm.assert_equal(res, expected)
def test_td64arr_add_timedeltalike(self, two_hours, box):
# only test adding/sub offsets as + is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng + two_hours
tm.assert_equal(result, expected)
def test_td64arr_sub_timedeltalike(self, two_hours, box):
# only test adding/sub offsets as - is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng - two_hours
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __add__/__sub__ with DateOffsets and arrays of DateOffsets
# TODO: this was taken from tests.series.test_operators; de-duplicate
def test_timedelta64_operations_with_DateOffset(self):
# GH#10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
tm.assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(PerformanceWarning):
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3),
timedelta(minutes=5, seconds=6),
timedelta(hours=2, minutes=5, seconds=3)])
tm.assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
tm.assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_add_offset_index(self, names, box):
# GH#18849, GH#19744
if box is pd.DataFrame and names[1] == 'bar':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(warn):
res2 = other + tdi
tm.assert_equal(res2, expected)
# TODO: combine with test_td64arr_add_offset_index by parametrizing
# over second box?
def test_td64arr_add_offset_array(self, box):
# GH#18849
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(warn):
res2 = other + tdi
tm.assert_equal(res2, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_sub_offset_index(self, names, box):
# GH#18824, GH#19744
if box is pd.DataFrame and names[1] == 'bar':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi - other
tm.assert_equal(res, expected)
def test_td64arr_sub_offset_array(self, box_with_array):
# GH#18824
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = None if box_with_array is pd.DataFrame else PerformanceWarning
with tm.assert_produces_warning(warn):
res = tdi - other
tm.assert_equal(res, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_with_offset_series(self, names, box_df_fail):
# GH#18849
box = box_df_fail
box2 = Series if box in [pd.Index, tm.to_array] else box
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = Series([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected_add = Series([tdi[n] + other[n] for n in range(len(tdi))],
name=names[2])
tdi = tm.box_expected(tdi, box)
expected_add = tm.box_expected(expected_add, box2)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected_add)
# TODO: separate/parametrize add/sub test?
expected_sub = Series([tdi[n] - other[n] for n in range(len(tdi))],
name=names[2])
expected_sub = tm.box_expected(expected_sub, box2)
with tm.assert_produces_warning(PerformanceWarning):
res3 = tdi - other
tm.assert_equal(res3, expected_sub)
@pytest.mark.parametrize('obox', [np.array, pd.Index, pd.Series])
def test_td64arr_addsub_anchored_offset_arraylike(self, obox,
box_with_array):
# GH#18824
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
tdi = tm.box_expected(tdi, box_with_array)
anchored = obox([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi + anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored + tdi
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi - anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored - tdi
class TestTimedeltaArraylikeMulDivOps:
# Tests for timedelta64[ns]
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# TODO: Moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize("m", [1, 3, 10])
@pytest.mark.parametrize("unit", ['D', 'h', 'm', 's', 'ms', 'us', 'ns'])
def test_timedelta64_conversions(self, m, unit):
startdate = Series(pd.date_range('2013-01-01', '2013-01-03'))
enddate = Series(pd.date_range('2013-03-01', '2013-03-03'))
ser = enddate - startdate
ser[2] = np.nan
# op
expected = Series([x / np.timedelta64(m, unit) for x in ser])
result = ser / np.timedelta64(m, unit)
tm.assert_series_equal(result, expected)
# reverse op
expected = Series([Timedelta(np.timedelta64(m, unit)) / x
for x in ser])
result = np.timedelta64(m, unit) / ser
tm.assert_series_equal(result, expected)
# ------------------------------------------------------------------
# Multiplication
# organized with scalar others first, then array-like
def test_td64arr_mul_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
result = idx * 1
tm.assert_equal(result, idx)
result = 1 * idx
tm.assert_equal(result, idx)
def test_td64arr_mul_tdlike_scalar_raises(self, two_hours, box_with_array):
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError):
rng * two_hours
def test_tdi_mul_int_array_zerodim(self, box_with_array):
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 * 5)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx * np.array(5, dtype='int64')
tm.assert_equal(result, expected)
def test_tdi_mul_int_array(self, box_with_array):
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected =
|
TimedeltaIndex(rng5 ** 2)
|
pandas.TimedeltaIndex
|
import streamlit as st
import pandas as pd
import seaborn as sns
import numpy as np
from matplotlib import pyplot as plt
from sklearn.neighbors import NearestNeighbors
import random
import missingno as msno
import ppscore as pps
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import StandardScaler
import base64
from io import BytesIO
pd.set_option('display.max_columns', 500)
@st.cache
def readcsv(csv):
df=pd.read_csv(csv)
return df
def load_data():
DATA_URL = ('https://raw.githubusercontent.com/guireis1/Codenation-Final-Project/master/estaticos_portfolio1.csv')
data = pd.read_csv(DATA_URL)
return data
def head(dataframe):
if len(dataframe) > 1000:
lenght = 1000
else:
lenght = len(dataframe)
slider = st.slider('Linhas exibidas:', 10, lenght)
st.dataframe(dataframe.head(slider))
def vis(data):
for i in data.columns:
if i == 'setor':
sns.set(style="whitegrid")
plt.figure(figsize=(20,10))
sns.countplot(x="setor", data=data, palette="Reds_r",saturation=0.5)
plt.title('Contagem dos Setores',fontsize=20)
plt.xlabel('')
plt.ylabel('')
st.pyplot()
if i == 'natureza_juridica_macro':
sns.set(style="whitegrid")
plt.figure(figsize=(20,10))
sns.countplot(x="natureza_juridica_macro", data=data, palette="Reds_r",saturation=0.5)
plt.title('Contagem da natureza jurídica',fontsize=20)
plt.xlabel('')
plt.ylabel('')
st.pyplot()
if i == 'de_faixa_faturamento_estimado_grupo':
sns.set(style="whitegrid")
plt.figure(figsize=(20,20))
sns.countplot(y="de_faixa_faturamento_estimado_grupo",hue='setor', data=data, palette="Reds_r",saturation=0.5)
plt.title('Contagem do faturamento por setor',fontsize=20)
plt.xlabel('')
plt.ylabel('')
st.pyplot()
if i == 'nm_meso_regiao':
sns.set(style="whitegrid")
plt.figure(figsize=(20,20))
sns.countplot(y="nm_meso_regiao", data=data, palette="Reds_r",saturation=0.5)
plt.title('Contagem Meso Região',fontsize=20)
plt.xlabel('')
plt.ylabel('')
st.pyplot()
@st.cache
def descritiva(dataframe):
desc = dataframe.describe().T
desc['column'] = desc.index
exploratory =
|
pd.DataFrame()
|
pandas.DataFrame
|
from Gray import read_image
import pandas as pd
import matplotlib.pyplot as plt
# 原图
image,x,y=read_image('g.png')
pixels=[]
for i in range(x):
for j in range(y):
pixel=image.getpixel((i,j))
pixels.append((pixel))
pixels=pd.Series(pixels)
proportitionDict=dict(pixels.value_counts(normalize=True))
hist_dict={}
add=0
for i in range(256):
if i in proportitionDict.keys():
add+=proportitionDict[i]
hist_dict[i]=add
table=[]
for i in range(256):
table.append(hist_dict[i]*255)
plt.subplot(221)
plt.hist(pixels,bins=256)
# 目标图片
image,x,y=read_image('g2.png')
pixels=[]
for i in range(x):
for j in range(y):
pixel=image.getpixel((i,j))
pixels.append((pixel))
pixels=
|
pd.Series(pixels)
|
pandas.Series
|
__author__ = "<NAME>"
__credits__ = ["<NAME>"]
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__version__ = "0.1"
__license__ = "MIT"
import pandas as pd
import yaml
from collections import OrderedDict
def import_series(filename, series_label='precipitation', index_label='datetime', csv_reader_args=None):
"""
Args:
filename:
series_label:
index_label:
csv_reader_args: for example: sep="," or "." and decimal=";" or ","
Returns:
pandas.Series: precipitation series
"""
if filename.endswith('csv'):
if csv_reader_args is None:
csv_reader_args = dict(sep=';', decimal=',')
try:
ts = pd.read_csv(filename, index_col=0, header=0, squeeze=True, **csv_reader_args)
ts.index = pd.to_datetime(ts.index)
ts.index.name = index_label
ts.name = series_label
except:
raise UserWarning('ERROR | '
'Something is wrong with your csv format. The file should only include two columns. '
'First column is the date and time index (prefered format is "YYYY-MM-DD HH:MM:SS") '
'and second column the precipitation values in mm. '
'As a separator use "{sep}" and as decimal sign use "{decimal}".'.format(**csv_reader_args))
return ts
elif filename.endswith('parquet'):
return pd.read_parquet(filename, columns=[series_label])[series_label].rename_axis(index_label, axis='index')
elif filename.endswith('pkl'):
return
|
pd.read_pickle(filename)
|
pandas.read_pickle
|
import numpy as np
import pandas as pd
import seaborn as sns
from mpl_toolkits import mplot3d
from pathlib import Path
###############################################################################
#Non-Standard Imports
###############################################################################
import dunlin._utils_plot as upp
###############################################################################
#Raw Data
###############################################################################
class TimeResponseData:
consolidated_colors = {}
###########################################################################
#Instantiation
###########################################################################
def __init__(self, data, base_colors=None, palette_type='light_palette', roll=2,
thin=2, truncate=None, levels=None, drop_scenarios=None, consolidate_colors=True,
):
def _2dict(df):
return {i: g.droplevel(axis=1, level=0) for i, g in df.groupby(axis=1, level=0)}
data = self.preprocess(data, roll, thin, truncate, levels, drop_scenarios)
colors = self.make_colors(data, base_colors, palette_type)
self.colors = colors
self._data = data
self._dct = _2dict(data)
self._t = pd.DataFrame(dict.fromkeys(colors, data.index), index=data.index)
###########################################################################
#Supporting Methods
###########################################################################
@classmethod
def make_colors(cls, df, base_colors=None, palette_type='light_palette'):
levels = list(range(df.columns.nlevels))[1:]
scenarios = sorted([i for i, g in df.groupby(axis=1, level=levels)])
if palette_type == 'light_palette':
colors = upp.make_light_scenarios(scenarios, base_colors)
elif palette_type == 'dark_palette':
colors = upp.make_dark_scenarios(scenarios, base_colors)
else:
colors = upp.make_color_scenarios(scenarios, base_colors)
return colors
@staticmethod
def preprocess(df, roll=2, thin=2, truncate=None, levels=None, drop_scenarios=None):
if levels:
to_drop = [lvl for lvl in df.columns.names if lvl not in levels]
df = df.droplevel(to_drop, axis=1)
df = df.reorder_levels(levels, axis=1)
if truncate:
lb, ub = truncate
df = df.loc[lb:ub]
if drop_scenarios:
lvls = df.columns.names[1:]
temp = [g for i, g in df.groupby(level=lvls, axis=1) if i not in drop_scenarios]
df =
|
pd.concat(temp, axis=1, sort=False)
|
pandas.concat
|
""" Wrapper to invoke retinanet training scripts from openem """
import subprocess
import os
import csv
import pandas as pd
import cv2
import numpy as np
from collections import namedtuple
from pprint import pprint
from openem_train.util import utils
from openem_train.util.roi_transform import RoiTransform
from openem_train.util.img_augmentation import resizeAndFill
import progressbar
FishBoxDetection = namedtuple(
'FishBoxDetection',
['video_id', 'frame', 'x', 'y', 'width', 'height', 'theta', 'class_id'])
def prep(config):
""" Generates a csv file compatible with retinanet training script
outputs it in the OPENEM_WORK area for subsequent commands to use
"""
work_dir = config.work_dir()
retinanet_dir = os.path.join(work_dir, "retinanet")
species_csv = os.path.join(retinanet_dir, "species.csv")
retinanet_csv = os.path.join(retinanet_dir, "totalPopulation.csv")
os.makedirs(retinanet_dir, exist_ok=True)
# Generate the species csv file first
# This is a csv file with each species on a new line, with no
# header
species=[]
for idx,name in enumerate(config.species()):
species.append({'species': name,
'id': idx})
species_df =
|
pd.DataFrame(columns=['species', 'id'], data=species)
|
pandas.DataFrame
|
import datetime as dt
import gzip
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
from watchcbb.scrape.common import get_html
class SportsRefScrape:
"""Class to perform various web-scraping routines from sports-reference.com/cbb"""
def __init__(self):
pass
def get_gid(self, date, t1, t2):
""" Return unique game id, with date and alphabetized teams, like 2020-02-15_indiana_purdue" """
tnames = sorted([t1, t2])
return "{0}_{1}_{2}".format(date,tnames[0],tnames[1])
def get_team_list(self, season=2020):
""" Return a list of all teams in D-I for a given season """
teams_url = f"http://www.sports-reference.com/cbb/seasons/{season}-school-stats.html"
teams_html = get_html(teams_url)
teams_soup = BeautifulSoup(teams_html, "html.parser")
teams = []
table = teams_soup.find("table", id="basic_school_stats").find("tbody")
for td in table.find_all("td", {"data-stat":"school_name"}):
team = td.find("a")["href"].split("/")[3]
teams.append(team)
return teams
def get_game_data(self, season, fout=None, overwrite=False, gids=None, teams=None, startdate=None, enddate=None, verbose=False):
"""Retrieve individual game statistics for a set of teams in a given season
Parameters:
season: year of the season (i.e. 2020 for 2019-20 season)
fout: file to write output CSV to (None to not write to file)
overwrite: True to overwrite file, False to append to it (taking care to avoid duplicates)
gids: optional list of gids to get. If not None, this overrides anything in teams, startdate, enddate
teams: list of team IDs (from sports-reference) to retrive games for.
If None, use all teams in D-I for the given season
startdate: date to start retrieving games, defaults to beginning of season
enddate: date to end retrieving games, defaults to full season
verbose: print extra info
Returns: list of comma-separated strings, as would be written into the lines of a CSV
"""
if teams is not None:
if gids is not None:
raise Exception("Only one of gids, teams can be non-null")
else:
if gids is None:
teams = self.get_team_list(season)
gids_to_get = None
if gids is not None:
gids_to_get = gids
teams = [gid.split("_")[1] for gid in gids]
teams = list(set(teams))
gids = {}
lines = {}
rows = []
# if we want to update the game file, record everything in the old file
if fout is not None and overwrite==False:
for line in open(fout).readlines()[1:]:
sp = line.strip().split(",")
date = sp[1]
gid = self.get_gid(date,sp[3], sp[5])
if date not in gids.keys():
gids[date] = []
lines[date] = []
lines[date].append(line)
gids[date].append(gid)
stats = ["pts","fg","fga","fg3","fg3a","ft","fta","orb","trb","ast","stl","blk","tov","pf"]
for team in teams:
if verbose:
print("Getting games for "+team+"...")
url = f"http://www.sports-reference.com/cbb/schools/{team}/{season}-gamelogs.html"
html = get_html(url)
soup = BeautifulSoup(html, "html.parser")
# this page only for "game type" (reg season, conf tourney, etc.) If before March, guaranteed Reg Season
if enddate==None or enddate.month >= 2:
url2 = "http://www.sports-reference.com/cbb/schools/{0}/{1}-schedule.html".format(team,season)
html2 = get_html(url2)
soup2 = BeautifulSoup(html2, "html.parser")
table = soup.find("table", id="sgl-basic").find("tbody")
for tr in table.find_all("tr"):
if tr.get("id") == None:
continue
date = tr.find("td", {"data-stat":"date_game"})
if date.find("a") != None:
date = date.find("a").string
else:
continue
opp = tr.find("td", {"data-stat":"opp_id"})
if startdate!=None and startdate > dt.date(*[int(x) for x in date.split("-")]):
continue
if enddate!=None and enddate < dt.date(*[int(x) for x in date.split("-")]):
continue
if opp.find("a")==None:
continue
opp = opp.find("a")["href"].split("/")[3]
gid = self.get_gid(date, team, opp)
if gids_to_get is not None and gid not in gids_to_get:
continue
datem1day = str(dt.date(*[int(x) for x in date.split("-")]) - dt.timedelta(1))
gidm1day = self.get_gid(datem1day, team, opp)
if date not in gids.keys():
gids[date] = []
lines[date] = []
if gid in gids[date] or (datem1day in gids.keys() and gidm1day in gids[datem1day]):
continue
else:
gids[date].append(gid)
if enddate==None or enddate.month >= 2:
gtype = soup2.find("td",{"csk":date}).find_parent("tr").find("td",{"data-stat":"game_type"}).string
else:
gtype = "REG"
if gtype == "REG":
gtype = "RG"
if gtype == "CTOURN":
gtype = "CT"
loc = tr.find("td", {"data-stat":"game_location"}).string
if loc==None: loc="H"
elif loc=="@": loc="A"
elif loc=="N": loc="N"
else:
raise Exception(loc)
numot = tr.find("td", {"data-stat":"game_result"})
if numot.find("small") != None:
numot = int(numot.find("small").string.split("(")[1].split()[0])
else:
numot = 0
statdict = {}
opp_statdict = {}
getint = lambda x: (0 if x is None else int(x))
for stat in stats:
statdict[stat] = getint(tr.find("td",{"data-stat":stat}).string)
opp_statdict[stat] = getint(tr.find("td",{"data-stat":"opp_"+stat}).string)
if statdict["pts"] > opp_statdict["pts"]:
wd, ld = statdict, opp_statdict
wteam, lteam = team, opp
else:
wd, ld = opp_statdict, statdict
wteam, lteam = opp, team
if loc=="H": loc="A"
elif loc=="A": loc="H"
rowvals = [season,date,gtype,wteam,wd["pts"],lteam,ld["pts"],loc,numot,
wd["fg"],wd["fga"],wd["fg3"],wd["fg3a"],wd["ft"],wd["fta"],wd["orb"],
wd["trb"]-wd["orb"],wd["ast"],wd["tov"],wd["stl"],wd["blk"],wd["pf"],
ld["fg"],ld["fga"],ld["fg3"],ld["fg3a"],ld["ft"],ld["fta"],ld["orb"],
ld["trb"]-ld["orb"],ld["ast"],ld["tov"],ld["stl"],ld["blk"],ld["pf"]
]
rows.append(rowvals)
string = ",".join([str(x) for x in rowvals]) + '\n'
lines[date].append(string)
colnames = ["Season","Date","Type","WTeamID","WScore","LTeamID","LScore","WLoc","NumOT",
"WFGM","WFGA","WFGM3","WFGA3","WFTM","WFTA","WOR","WDR","WAst","WTO","WStl",
"WBlk","WPF","LFGM","LFGA","LFGM3","LFGA3","LFTM","LFTA","LOR","LDR","LAst",
"LTO","LStl","LBlk","LPF"
]
if fout:
fout = open(fout, 'w')
fout.write(",".join(colnames)+'\n')
for date in sorted(gids.keys()):
for s in lines[date]:
fout.write(s)
fout.close()
return
|
pd.DataFrame(rows, columns=colnames)
|
pandas.DataFrame
|
#/////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////
# script: generateSummaryTable.py
# author: Lincoln
# date: 3.28.19
#
# Takes as input GOI_out_AA.csv files (from getMutationCounts_overall_and_GOI.py),
# patient metadata, seurat metadata, fusionsDF, and creates a BY CELL
# summaryTable. The goal with this table is to provide an answer to questions like
# 'which patients have which mutations?', and 'how many cells have clinically relevant
# mutations?'. Currently i've got an ipynb that accomplishes this, but its like
# super long and unwieldy, so i though converting it to a script would be a good
# idea. Lets try and make this more modular and flowy.
#/////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////
import myLib
import pandas as pd
import numpy as np
pd.options.mode.chained_assignment = None # want to disable this SettingWithCopyWarning
print('running...')
# READ IN ALL OF THESE BY-GENE AMINO-ACID LEVEL MUTATION COUNTS OBJECTS
mutsPATH = '/Users/lincoln.harris/code/SNP_calling_pipeline/getMutationCounts/'
egfrPATH = mutsPATH + 'egfr_germline_out_AA.csv'
brafPATH = mutsPATH + 'braf_germline_out_AA.csv'
krasPATH = mutsPATH + 'kras_germline_out_AA.csv'
egfr_df = pd.read_csv(egfrPATH, header=None, names=['cell', 'mutations'])
braf_df = pd.read_csv(brafPATH, header=None, names=['cell', 'mutations'])
kras_df = pd.read_csv(krasPATH, header=None, names=['cell', 'mutations'])
# FIRST STEP IS TO GENERATE THE mutationsDF
print('creating mutationsDF')
mutationsDF =
|
pd.DataFrame(columns=['cell', 'brafMut', 'egfrMut', 'krasMut'])
|
pandas.DataFrame
|
#!/usr/bin/env python3
import os
import argparse
import pandas as pd
from pathlib import Path
from plumbum import local
from argparse import RawTextHelpFormatter
from plumbum.commands.processes import ProcessExecutionError
from scipy.cluster.hierarchy import ward, complete, average, dendrogram, fcluster, linkage
"""
Run all three python scripts in BioData workflow to generate combined results
Display options provided in KEGG_decoder will be available for final step only
"""
class ArgParse:
def __init__(self, arguments_list, description, *args, **kwargs):
self.arguments_list = arguments_list
self.args = []
# Instantiate ArgumentParser
self.parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter, description=description,
*args, **kwargs)
# Add all arguments stored in self.arguments_list
self._parse_arguments()
# Parse arguments
try:
self.args = self.parser.parse_args()
except:
exit(1)
def _parse_arguments(self):
""" Protected method for adding all arguments stored in self.arguments_list
Checks value of "require" and sets accordingly
"""
for args in self.arguments_list:
self.parser.add_argument(*args[0], **args[1])
@staticmethod
def description_builder(header_line, help_dict, flag_dict):
""" Static method provides summary of programs/requirements
:param header_line:
:param help_dict:
:param flag_dict:
:return:
"""
assert set(help_dict.keys()) == set(flag_dict.keys()), "Program names do not match in key/help dictionaries"
to_return = header_line + "\n\nAvailable Programs:\n\n"
programs = sorted(flag_dict.keys())
for program in programs:
to_return += program + ": " + help_dict[program] + "\n\t" + \
"\t(Flags: {})".format(" --" + " --".join(flag_dict[program])) + "\n"
to_return += "\n"
return to_return
def hClust_euclidean(genome_df):
linkage_matrix = linkage(genome_df, method='average', metric='euclidean')
# linkage_matrix = linkage(df, metric='braycurtis')
names = genome_df.index.tolist()
# clust = dendrogram(linkage_matrix, orientation="right", labels=names, get_leaves=True)
clust = dendrogram(linkage_matrix, no_plot=True, labels=names, get_leaves=True)
leaves = clust['ivl']
leave_order = list(leaves)
genome_df = genome_df.reindex(leave_order)
return genome_df
def hClust_correlation(genome_df):
linkage_matrix = linkage(genome_df, method='single', metric='correlation')
# linkage_matrix = linkage(df, metric='braycurtis')
names = genome_df.index.tolist()
# clust = dendrogram(linkage_matrix, orientation="right", labels=names, get_leaves=True)
clust = dendrogram(linkage_matrix, no_plot=True, labels=names, get_leaves=True)
leaves = clust['ivl']
leave_order = list(leaves)
genome_df = genome_df.reindex(leave_order)
return genome_df
def hClust_most_least(genome_df):
sort_dex = genome_df.sum(axis=1).sort_values(ascending=True).index
genome_df = genome_df.loc[sort_dex]
return genome_df
def hClust_least_most(genome_df):
sort_dex = genome_df.sum(axis=1).sort_values(ascending=False).index
genome_df = genome_df.loc[sort_dex]
return genome_df
def default_viz(genome_df, outfile_name):
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(font_scale=1.2)
sns.set_style({"savefig.dpi": 200})
ax = sns.heatmap(genome_df, cmap=plt.cm.YlOrRd, linewidths=2,
linecolor='k', square=True, xticklabels=True,
yticklabels=True, cbar_kws={"shrink": 0.1})
ax.xaxis.tick_top()
# ax.set_yticklabels(ax.get_yticklabels(), rotation=90)
plt.xticks(rotation=90)
plt.yticks(rotation=0)
# get figure (usually obtained via "fig,ax=plt.subplots()" with matplotlib)
fig = ax.get_figure()
# specify dimensions and save
# xLen = len(genome_df.columns.values.tolist())*20
# yLen = len(genome_df.index.tolist())*20
fig.set_size_inches(100, 100)
fig.savefig(outfile_name, bbox_inches='tight', pad_inches=0.1)
def prefix(_path):
""" Get prefix of file
:param _path: Path, possibly relative
:return:
"""
return os.path.splitext(Path(_path).resolve())[0]
def try_which():
""" Locate hmmsearch on path, if possible
:return:
"""
try:
return str(local["which"]["hmmsearch"]()).rstrip("\r\n")
except ProcessExecutionError:
return "None"
def print_run(cmd):
"""
:param cmd: plumbum local object
:return:
"""
print(cmd)
cmd()
def plotly_viz(genome_df, output_file):
# build heatmap in plotly.offline
Euclidean_genome_df = hClust_euclidean(genome_df)
Correlation_genome_df = hClust_correlation(genome_df)
Most_Least_genome_df = hClust_most_least(genome_df)
Least_Most_genome_df = hClust_least_most(genome_df)
import plotly.graph_objs as go
import plotly.offline as py
xLen = len(genome_df.columns.values.tolist()) * 20
len_genomes = len(genome_df.index.tolist())
if len_genomes >= 200:
yLen = len_genomes * 40
menL = 1.05
elif len_genomes >= 100:
yLen = len_genomes * 30
menL = 1.2
elif len_genomes >= 50:
yLen = len_genomes * 20
menL = 1.5
elif len_genomes >= 25:
yLen = len_genomes * 10
menL = 2.0
else:
yLen = 750
menL = 3.0
colorscale = [
[0, '#f1eef6'],
[0.2, '#f1eef6'],
[0.2, '#bdc9e1'],
[0.4, '#bdc9e1'],
[0.4, '#74a9cf'],
[0.6, '#74a9cf'],
[0.6, '#2b8cbe'],
[0.8, '#2b8cbe'],
[0.8, '#045a8d'],
[1, '#045a8d']]
colorbar = {'tick0': 0, 'dtick': 0.2, 'lenmode': 'pixels', 'len': 500, 'y': 1}
Euclidean_clust = go.Heatmap(x=Euclidean_genome_df.columns.values.tolist(),
y=Euclidean_genome_df.index.tolist(),
z=Euclidean_genome_df.values.tolist(),
colorscale=colorscale,
colorbar=colorbar,
hovertemplate='Sample: %{y}<br>Function: %{x}<br>Proportion: %{z}<extra></extra>',
xgap=1,
ygap=1)
Correlation_clust = go.Heatmap(x=Correlation_genome_df.columns.values.tolist(),
y=Correlation_genome_df.index.tolist(),
z=Correlation_genome_df.values.tolist(),
colorscale=colorscale,
colorbar=colorbar,
xgap=1,
ygap=1,
hovertemplate='Sample: %{y}<br>Function: %{x}<br>Proportion: %{z}<extra></extra>',
visible=False)
Most_Least_clust = go.Heatmap(x=Most_Least_genome_df.columns.values.tolist(),
y=Most_Least_genome_df.index.tolist(),
z=Most_Least_genome_df.values.tolist(),
colorscale=colorscale,
colorbar=colorbar,
xgap=1,
ygap=1,
hovertemplate='Sample: %{y}<br>Function: %{x}<br>Proportion: %{z}<extra></extra>',
visible=False)
Least_Most_clust = go.Heatmap(x=Least_Most_genome_df.columns.values.tolist(),
y=Least_Most_genome_df.index.tolist(),
z=Least_Most_genome_df.values.tolist(),
colorscale=colorscale,
colorbar=colorbar,
xgap=1,
ygap=1,
hovertemplate='Sample: %{y}<br>Function: %{x}<br>Proportion: %{z}<extra></extra>',
visible=False)
data = [Euclidean_clust, Correlation_clust, Most_Least_clust, Least_Most_clust]
updatemenus = [dict(
buttons=[
dict(label='Euclidean_Clustering', method='update', args=[{'visible': [True, False, False, False]}]),
dict(label='Correlation_Clustering', method='update', args=[{'visible': [False, True, False, False]}]),
dict(label='Most_to_Least', method='update', args=[{'visible': [False, False, True, False]}]),
dict(label='Least_to_Most', method='update', args=[{'visible': [False, False, False, True]}])
],
direction='down',
pad={'r': 10, 't': 10},
showactive=True,
x=0.1,
xanchor='left',
y=menL,
yanchor='top'
)]
layout = go.Layout(xaxis={'side': 'top'},
autosize=False,
width=xLen,
height=yLen,
plot_bgcolor='#000000',
margin=go.layout.Margin(t=500),
updatemenus=updatemenus,
)
fig = go.Figure(data=data, layout=layout)
py.plot(fig, filename=output_file, auto_open=False)
def make_tanglegram(genome_df, newick, output_file, tanglegram_opt):
import pandas as pd
import itertools
from Bio import Phylo
import tanglegram as tg
from scipy.spatial.distance import pdist, squareform
# FORMAT KEGGDECODER OUTPUT
# generate distance matrix for genome_df from pathway values
# genome_df = pd.read_csv(genome_df, index_col=0, sep='\t')
kegg_d = squareform(pdist(genome_df, metric='euclidean'))
kegg_m =
|
pd.DataFrame(kegg_d)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 4 11:25:03 2019
@author: lealp
"""
import pandas as pd
pd.set_option('display.width', 50000)
|
pd.set_option('display.max_rows', 50000)
|
pandas.set_option
|
from pathlib import Path
from typing import Sequence, Union
import numpy as np
import pandas as pd
import xarray as xr
from scipy.io import loadmat
def load_data_SESL(
sl_fpath: Union[str, Path],
T_fpath: Union[str, Path],
use_cov: bool,
use_Mar_T0: bool,
Mar_fpath: Union[str, Path, None] = None,
T_err_sc: float = 1,
cov_tau: float = 100,
no_neg_cov: bool = True,
baseperiod: Sequence[int] = [1400, 1800],
T0_temp_level: float = 100,
T0_period_st: int = -2000,
) -> xr.Dataset:
"""Load historical temperature and sea level reconstructions
Parameters
----------
sl_fpath : str or :class:`pathlib.Path`
Path to sea level reconstruction input ``.mat`` file.
T_fpath : str or :class:`pathlib.Path`
Path to temperature reconstruction input ``.mat`` file.
use_cov : bool
If True, use covariance matrix of SL reconstruction data (if existing) to
estimate likelihood of parameter set.
use_Mar_T0 : bool
If True, use Marcott long-running temperature reconstruction to calculate ``T0``
value until reconstruction at ``T_fpath`` starts.
Mar_fpath : str or :class:`pathlib.Path` or None, optional
Path to Marcott sea level reconstruction input ``.mat`` file. Only used if
``use_Mar_T0`` is True.
T_err_sc : float, optional
Scaling factor for temperature error uncertainty.
cov_tau : float, optional
Time scale for covariance. If not null, take the elementwise product of the
covariance and a tapering function exp(-delta(t)/cov_tau). Only used if
``use_cov`` is True.
no_neg_cov : bool, optional
Bound covariance matrix to be non-negative. Default True.
baseperiod : array-like, optional
Reference period used for sea level data. Data are normed to have 0 mean over
this period. Default [1400, 1800].
T0_temp_level : int, optional
If ``use_Mar_T0`` is True, number of years over which to harmonize the mean of
the Marcott T time series and time series at ``T_fpath`` in order to calculate
T0 from Marcott.
T0_period_st : int, optional
Starting year of period used to calculate an initial T0(0).
Returns
-------
:class:`xarray.Dataset`
Contains the processed estimated value and error for the temperature
reconstruction at ``T_fpath``, the sea level reconstruction at ``sl_fpath``, and
the derived T0 timeseries using ``sl_fpath`` and (optionally) the long-running
Marcott reconstruction
"""
# load SL proxy data
sl_data = loadmat(sl_fpath, squeeze_me=True)
sl = sl_data["sl"]
proxy_sl = pd.DataFrame(
{
"val": (sl[:, 1] / 10).astype(np.float64),
"err": (sl[:, 2] / 10).astype(np.float64),
},
index=pd.Index(sl[:, 0].astype(np.int16), name="year"),
)
C = (sl_data["C"] / 100).astype(np.float64)
C += np.eye(len(C)) * np.finfo(C.dtype).eps
if use_cov:
if cov_tau is not None:
Csc = np.exp(
-np.abs(
np.expand_dims(proxy_sl.index.values, 0)
- np.expand_dims(proxy_sl.index.values, 1)
)
/ cov_tau
)
C *= Csc
else:
raise NotImplementedError
if no_neg_cov:
C = np.maximum(C, 0)
# rebase proxy SL data to base period
proxy_sl["val"] -= proxy_sl.loc[baseperiod[0] : baseperiod[1], "val"].mean()
# convert to long format
proxy_sl = proxy_sl.stack()
proxy_sl.index = proxy_sl.index.rename("kind", level=-1)
proxy_sl.name = "sl"
# load T reconstruction data
T = loadmat(T_fpath, squeeze_me=True)["T"]
T = pd.DataFrame(
T[:, 1:3],
columns=["val", "err"],
index=pd.Index(T[:, 0], name="year").astype(np.int16),
)
# assert common timestep
dyr = np.diff(T.index)
assert len(np.unique(dyr)) == 1
dyr = dyr[0]
# scale by predefined scaling factor
T["err"] *= T_err_sc
# convert to long format
T_long = T.stack()
T_long.index = T_long.index.rename("kind", level=-1)
T_long.name = "T"
# aggregate into Dataset
data = xr.merge(
(
proxy_sl.to_xarray().rename(year="sl_year"),
T_long.to_xarray().rename(year="T_year"),
)
)
# Use Mar data for early T values if using for initializing T0
if use_Mar_T0:
T_mar = loadmat(Mar_fpath)["T"]
T_mar = pd.DataFrame(
T_mar[:, 1:],
columns=["val", "err"],
index=pd.Index(T_mar[:, 0], name="year").astype(np.int16),
)
T_mar_overlap_mean = T_mar.loc[
T.index.min() : T.index.min() + T0_temp_level, "val"
].mean()
T_overlap_mean = T.loc[: T.index.min() + T0_temp_level, "val"].mean()
T_mar["val"] = T_mar["val"] - T_mar_overlap_mean + T_overlap_mean
T_mar = T_mar.loc[: T.index.min() - int((dyr - 1) / 2)]
T0_temp =
|
pd.concat((T_mar, T))
|
pandas.concat
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 01 10:00:58 2021
@author: <NAME>
"""
#------------------------------------------------------------------#
# # # # # Imports # # # # #
#------------------------------------------------------------------#
from math import e
import numpy as np
import pandas as pd
import os
import time
import glob
import itertools
from joblib import Parallel, delayed
from generate_files import GenerateFiles
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.colors import LogNorm
import seaborn as sns
import matplotlib.style as style
style.use('seaborn-poster') #sets the size of the charts
style.use('ggplot')
from scipy import ndimage
from astropy.io import fits
from astropy.wcs import WCS
from astropy.utils.data import get_pkg_data_filename
from astropy.coordinates import SkyCoord, match_coordinates_sky
import astropy.units as u
from astropy.stats import mad_std
import astrotools.healpytools as hpt
import astropy_healpix as ahp
from astropy.coordinates import ICRS
from tqdm import tqdm
from collections import Counter
import warnings
warnings.filterwarnings('ignore')
import healpy as hp
from hpproj import CutSky, to_coord
import logging
cs_logger = logging.getLogger('cutsky')
cs_logger.setLevel(logging.WARNING)
cs_logger.propagate = False
hpproj_logger = logging.getLogger('hpproj')
hpproj_logger.setLevel(logging.WARNING)
mpl_logger = logging.getLogger('matplotlib')
mpl_logger.setLevel(logging.WARNING)
#------------------------------------------------------------------#
# # # # # Functions # # # # #
#------------------------------------------------------------------#
class MakeData(object):
"""Class to create and preprocess input/output files from full sky-maps.
"""
def __init__(self, dataset, npix, loops, planck_path, milca_path, disk_radius=None, output_path=None):
"""
Args:
dataset (str): file name for the cluster catalog that will used.
Options are 'planck_z', 'planck_z_no-z', 'MCXC', 'RM30', 'RM50'.
bands (list): list of full sky-maps that will be used for the input file.
loops (int): number of times the dataset containing patches with at least one cluster within will be added
again to training set with random variations (translations/rotations).
Options are 100GHz','143GHz','217GHz','353GHz','545GHz','857GHz', and 'y-map'.
More full sky-maps will be added later on (e.g. CO2, X-ray, density maps).
planck_path (str): path to directory containing planck HFI 6 frequency maps.
Files should be named as following
'HFI_SkyMap_100-field-IQU_2048_R3.00_full.fits', 'HFI_SkyMap_143-field-IQU_2048_R3.00_full.fits',
'HFI_SkyMap_143-field-IQU_2048_R3.00_full.fits', 'HFI_SkyMap_353-psb-field-IQU_2048_R3.00_full.fits',
'HFI_SkyMap_545-field-Int_2048_R3.00_full.fits', 'HFI_SkyMap_857-field-Int_2048_R3.00_full.fits'.
milca_path (str): path to directory containing MILCA full sky map. File should be named 'milca_ymaps.fits'.
disk_radius (float, optional): Disk radius that will be used to create segmentation masks for output files.
Defaults to None.
output_path (str, optional): Path to output directory. Output directory needs be created beforehand using
'python xcluster.py -m True' selecting same output directory in 'params.py'.
If None, xcluster path will be used. Defaults to None.
"""
self.path = os.getcwd() + '/'
self.dataset = dataset # 'planck_z', 'planck_z_no-z', 'MCXC', 'RM30', 'RM50'
self.bands = ['100GHz','143GHz','217GHz','353GHz','545GHz','857GHz','y-map','CO','p-noise']
self.loops = loops
self.n_labels = 2
maps = []
self.freq = 1022
self.planck_freq = 126
if '100GHz' in self.bands:
maps.append((planck_path + "HFI_SkyMap_100-field-IQU_2048_R3.00_full.fits", {'legend': 'HFI 100', 'docontour': True}))
# self.freq += 2
# self.planck_freq += 2
if '143GHz' in self.bands:
maps.append((planck_path + "HFI_SkyMap_143-field-IQU_2048_R3.00_full.fits", {'legend': 'HFI 143', 'docontour': True}))
# self.freq += 4
# self.planck_freq += 4
if '217GHz' in self.bands:
maps.append((planck_path + "HFI_SkyMap_217-field-IQU_2048_R3.00_full.fits", {'legend': 'HFI 217', 'docontour': True}))
# self.freq += 8
# self.planck_freq += 8
if '353GHz' in self.bands:
maps.append((planck_path + "HFI_SkyMap_353-psb-field-IQU_2048_R3.00_full.fits", {'legend': 'HFI 353', 'docontour': True}))
# self.freq += 16
# self.planck_freq += 16
if '545GHz' in self.bands:
maps.append((planck_path + "HFI_SkyMap_545-field-Int_2048_R3.00_full.fits", {'legend': 'HFI 545', 'docontour': True}))
# self.freq += 32
# self.planck_freq += 32
if '857GHz' in self.bands:
maps.append((planck_path + "HFI_SkyMap_857-field-Int_2048_R3.00_full.fits", {'legend': 'HFI 857', 'docontour': True}))
# self.freq += 64
# self.planck_freq += 64
if 'y-map' in self.bands:
maps.append((milca_path + "milca_ymaps.fits", {'legend': 'MILCA y-map', 'docontour': True}))
# self.freq += 128
if 'CO' in self.bands:
maps.append((planck_path + "COM_CompMap_CO21-commander_2048_R2.00.fits", {'legend': 'CO', 'docontour': True}))
# self.freq += 256
if 'p-noise' in self.bands:
maps.append((planck_path + 'COM_CompMap_Compton-SZMap-milca-stddev_2048_R2.00.fits', {'legend': 'noise', 'docontour': True}))
# self.freq += 512
maps.append((milca_path + "milca_ymaps.fits", {'legend': 'MILCA y-map', 'docontour': True})) #used for plots only
self.maps = maps
self.temp_path = self.path + 'to_clean/'
self.disk_radius = disk_radius
self.npix = npix #in pixels
self.pixsize = 1.7 #in arcmin
self.ndeg = (self.npix * self.pixsize)/60 #in deg
self.nside = 2
if output_path is None:
self.output_path = self.path + 'output/' + self.dataset + time.strftime("/%Y-%m-%d/")
else:
self.output_path = output_path + 'output/' + self.dataset + time.strftime("/%Y-%m-%d/")
self.dataset_path = self.path + 'datasets/' + self.dataset + '/'
self.planck_path = planck_path
self.milca_path = milca_path
self.test_regions = [[0, 360, 90, 70],
[0, 120, 70, 40], [120, 240, 70, 40], [240, 360, 70, 40],
[0, 120, 40, 18], [120, 240, 40, 18], [240, 360, 40, 18],
[0, 120, -18, -40], [120, 240, -18, -40], [240, 360, -18, -40],
[0, 120, -40, -70], [120, 240, -40, -70], [240, 360, -40, -70],
[0, 360, -70, -90]]
self.val_regions = [[0, 180, -20, -40],
[0, 180, -20, -40], [0, 180, -20, -40], [0, 180, -20, -40],
[0, 360, -40, -60], [0, 360, -40, -60], [0, 360, -40, -60],
[0, 360, 60, 40], [0, 360, 60, 40], [0, 360, 60, 40],
[0, 180, 40, 20], [0, 180, 40, 20], [0, 180, 40, 20],
[0, 180, 40, 20]]
def plot_psz2_clusters(self, healpix_path):
"""Saves plots containing patches for planck frequency maps and y-map.
Function is deprecated and will be removed in later versions.
Args:
healpix_path (str): output path for plots (deprecated).
"""
maps = self.maps
PSZ2 = fits.open(self.planck_path + 'PSZ2v1.fits')
glon = PSZ2[1].data['GLON']
glat = PSZ2[1].data['GLAT']
freq = ['100GHz','143GHz','217GHz','353GHz','545GHz','857GHz', 'y-map']
for j in range(len(glon)):
fig = plt.figure(figsize=(21,14), tight_layout=False)
fig.suptitle(r'$glon=$ {:.2f} $^\circ$, $glat=$ {:.2f} $^\circ$'.format(glon[j], glat[j]), y=0.92, fontsize=20)
cutsky = CutSky(maps, npix=self.npix, pixsize=self.pixsize, low_mem=False)
coord = to_coord([glon[j], glat[j]])
result = cutsky.cut_fits(coord)
for i,nu in enumerate(freq):
ax = fig.add_subplot(3,4,1+i)
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='5%', pad=0.05)
HDU = result[i]['fits']
im = ax.imshow(HDU.data, origin="lower")
w = WCS(HDU.header)
sky = w.world_to_pixel_values(glon[j], glat[j])
segmentation = plt.Circle((sky[0], sky[1]), 2.5/1.7, color='white', alpha=0.1)
ax.add_patch(segmentation)
ax.axvline(sky[0], ymin=0, ymax=(self.npix//2-10)/self.npix, color='white', linestyle='--')
ax.axvline(sky[0], ymin=(self.npix//2+10)/self.npix, ymax=1, color='white', linestyle='--')
ax.axhline(sky[1], xmin=0, xmax=(self.npix//2-10)/self.npix, color='white', linestyle='--')
ax.axhline(sky[1], xmin=(self.npix//2+10)/self.npix, xmax=1, color='white', linestyle='--')
# ax.scatter(sky[0], sky[1], color='red')
ax.set_title(r'%s'%nu)
fig.colorbar(im, cax=cax, orientation='vertical')
plt.savefig(healpix_path + 'PSZ2/PSZ2_skycut_%s.png'%j, bbox_inches='tight', transparent=False)
plt.show()
plt.close()
def create_catalogs(self, plot=False):
"""Creates the following catalogs using 'PSZ2v1.fits', 'MCXC-Xray-clusters.fits', and 'redmapper_dr8_public_v6.3_catalog.fits'
(see <NAME> 2018 for more details):
planck_z (pd.DataFrame): dataframe with the following columns for PSZ2 clusters with known redshift:
'RA', 'DEC', 'GLON', 'GLAT', 'M500', 'R500', 'Y5R500', 'REDMAPPER', 'MCXC', 'Z'
planck_no_z (pd.DataFrame): dataframe with the following columns for PSZ2 clusters with unknown redshift:
'RA', 'DEC', 'GLON', 'GLAT', 'M500', 'R500', 'Y5R500', 'REDMAPPER', 'MCXC'
MCXC_no_planck (pd.DataFrame): dataframe with the following columns for MCXC clusters:
'RA', 'DEC', 'R500', 'M500', 'Z'
RM50_no_planck (pd.DataFrame): dataframe with the following columns for RedMaPPer clusters with lambda>50:
'RA', 'DEC', 'LAMBDA', 'Z'
RM30_no_planck (pd.DataFrame): dataframe with the following columns for RedMaPPer clusters with lambda>30:
'RA', 'DEC', 'LAMBDA', 'Z'
Catalogs are saved in output_path + /catalogs/. Input catalogs are in planck_path.
Args:
plot (bool, optional): If True, will save duplicates distance from each other distribution plots. Defaults to False.
"""
PSZ2 = fits.open(self.planck_path + 'PSZ2v1.fits')
df_psz2 = pd.DataFrame(data={'RA': PSZ2[1].data['RA'].tolist(), 'DEC': PSZ2[1].data['DEC'].tolist(), 'GLON': PSZ2[1].data['GLON'].tolist(), 'GLAT':PSZ2[1].data['GLAT'].tolist(),
'M500': PSZ2[1].data['MSZ'].tolist(), 'R500': PSZ2[1].data['Y5R500'].tolist(), 'REDMAPPER': PSZ2[1].data['REDMAPPER'].tolist(), 'MCXC': PSZ2[1].data['MCXC'].tolist(),
'Z': PSZ2[1].data['REDSHIFT'].tolist()})
df_psz2 = df_psz2.replace([-1, -10, -99], np.nan)
planck_no_z = df_psz2.query('Z.isnull()', engine='python')
planck_z = df_psz2.query('Z.notnull()', engine='python')
# planck_no_z = planck_no_z[['RA', 'DEC']].copy()
# planck_z = planck_z[['RA', 'DEC']].copy()
planck_no_z.to_csv(self.path + 'catalogs/planck_no-z' + '.csv', index=False)
planck_z.to_csv(self.path + 'catalogs/planck_z' + '.csv', index=False)
MCXC = fits.open(self.planck_path + 'MCXC-Xray-clusters.fits')
MCXC_skycoord = SkyCoord(ra=MCXC[1].data['RA'].tolist(), dec=MCXC[1].data['DEC'].tolist(), unit=u.degree)
MCXC_GLON = list(MCXC_skycoord.galactic.l.degree)
MCXC_GLAT = list(MCXC_skycoord.galactic.b.degree)
df_MCXC = pd.DataFrame(data={'RA': MCXC[1].data['RA'].tolist(), 'DEC': MCXC[1].data['DEC'].tolist(), 'R500': MCXC[1].data['RADIUS_500'].tolist(), 'M500': MCXC[1].data['MASS_500'].tolist(),
'GLON': MCXC_GLON, 'GLAT': MCXC_GLAT, 'Z': MCXC[1].data['REDSHIFT'].tolist()})
REDMAPPER = fits.open(self.planck_path + 'redmapper_dr8_public_v6.3_catalog.fits')
REDMAPPER_skycoord = SkyCoord(ra=REDMAPPER[1].data['RA'].tolist(), dec=REDMAPPER[1].data['DEC'].tolist(), unit=u.degree)
REDMAPPER_GLON = list(REDMAPPER_skycoord.galactic.l.degree)
REDMAPPER_GLAT = list(REDMAPPER_skycoord.galactic.b.degree)
df_REDMAPPER = pd.DataFrame(data={'RA': REDMAPPER[1].data['RA'].tolist(), 'DEC': REDMAPPER[1].data['DEC'].tolist(), 'LAMBDA': REDMAPPER[1].data['LAMBDA'].tolist(),
'GLON': REDMAPPER_GLON, 'GLAT': REDMAPPER_GLAT, 'Z': REDMAPPER[1].data['Z_SPEC'].tolist()})
df_REDMAPPER_30 = df_REDMAPPER.query("LAMBDA > 30")
df_REDMAPPER_50 = df_REDMAPPER.query("LAMBDA > 50")
ACT = fits.open(self.planck_path + 'sptecs_catalog_oct919_forSZDB.fits')
SPT = fits.open(self.planck_path + 'DR5_cluster-catalog_v1.1_forSZDB.fits')
df_act = pd.DataFrame(data={'RA': list(ACT[1].data['RA']), 'DEC': list(ACT[1].data['DEC']), 'GLON': list(ACT[1].data['GLON']), 'GLAT': list(ACT[1].data['GLAT'])})
df_spt = pd.DataFrame(data={'RA': list(SPT[1].data['RA']), 'DEC': list(SPT[1].data['DEC']), 'GLON': list(SPT[1].data['GLON']), 'GLAT': list(SPT[1].data['GLAT'])})
self.remove_duplicates_on_radec(df_MCXC, df_psz2, output_name='MCXC_no_planck', plot=plot)
self.remove_duplicates_on_radec(df_REDMAPPER_30, df_psz2, output_name='RM30_no_planck', plot=plot)
self.remove_duplicates_on_radec(df_REDMAPPER_50, df_psz2, output_name='RM50_no_planck', plot=plot)
self.remove_duplicates_on_radec(df_act, df_psz2, output_name='ACT_no_planck', plot=plot)
self.remove_duplicates_on_radec(df_spt, df_psz2, output_name='SPT_no_planck', plot=plot)
PSZ2.close()
MCXC.close()
MCXC.close()
REDMAPPER.close()
ACT.close()
SPT.close()
def create_fake_source_catalog(self):
PGCC = fits.open(self.planck_path + 'HFI_PCCS_GCC_R2.02.fits')
df_pgcc = pd.DataFrame(data={'RA': list(PGCC[1].data['RA']), 'DEC': list(PGCC[1].data['DEC']), 'GLON': list(PGCC[1].data['GLON']), 'GLAT': list(PGCC[1].data['GLAT'])})
PGCC.close()
df_pgcc.to_csv(self.path + 'catalogs/' + 'PGCC' + '.csv', index=False)
df = pd.DataFrame(columns=['RA','DEC','GLON','GLAT'])
bands = ['100GHz', '143GHz', '217GHz', '353GHz', '545GHz', '857GHz']
cs_100 = fits.open(self.planck_path + 'COM_PCCS_100_R2.01.fits')
cs_143 = fits.open(self.planck_path + 'COM_PCCS_143_R2.01.fits')
cs_217 = fits.open(self.planck_path + 'COM_PCCS_217_R2.01.fits')
cs_353 = fits.open(self.planck_path + 'COM_PCCS_353_R2.01.fits')
cs_545 = fits.open(self.planck_path + 'COM_PCCS_545_R2.01.fits')
cs_857 = fits.open(self.planck_path + 'COM_PCCS_857_R2.01.fits')
df_cs_100 = pd.DataFrame(data={'RA': list(cs_100[1].data['RA']), 'DEC': list(cs_100[1].data['DEC']), 'GLON': list(cs_100[1].data['GLON']), 'GLAT': list(cs_100[1].data['GLAT'])})
df_cs_100.to_csv(self.path + 'catalogs/' + 'cs_100' + '.csv', index=False)
df_cs_143 = pd.DataFrame(data={'RA': list(cs_143[1].data['RA']), 'DEC': list(cs_143[1].data['DEC']), 'GLON': list(cs_143[1].data['GLON']), 'GLAT': list(cs_143[1].data['GLAT'])})
df_cs_143.to_csv(self.path + 'catalogs/' + 'cs_143' + '.csv', index=False)
df_cs_217 = pd.DataFrame(data={'RA': list(cs_217[1].data['RA']), 'DEC': list(cs_217[1].data['DEC']), 'GLON': list(cs_217[1].data['GLON']), 'GLAT': list(cs_217[1].data['GLAT'])})
df_cs_217.to_csv(self.path + 'catalogs/' + 'cs_217' + '.csv', index=False)
df_cs_353 = pd.DataFrame(data={'RA': list(cs_353[1].data['RA']), 'DEC': list(cs_353[1].data['DEC']), 'GLON': list(cs_353[1].data['GLON']), 'GLAT': list(cs_353[1].data['GLAT'])})
df_cs_353.to_csv(self.path + 'catalogs/' + 'cs_353' + '.csv', index=False)
df_cs_545 = pd.DataFrame(data={'RA': list(cs_545[1].data['RA']), 'DEC': list(cs_545[1].data['DEC']), 'GLON': list(cs_545[1].data['GLON']), 'GLAT': list(cs_545[1].data['GLAT'])})
df_cs_545.to_csv(self.path + 'catalogs/' + 'cs_545' + '.csv', index=False)
df_cs_857 = pd.DataFrame(data={'RA': list(cs_857[1].data['RA']), 'DEC': list(cs_857[1].data['DEC']), 'GLON': list(cs_857[1].data['GLON']), 'GLAT': list(cs_857[1].data['GLAT'])})
df_cs_857.to_csv(self.path + 'catalogs/' + 'cs_857' + '.csv', index=False)
freq = 0
if '100GHz' in bands:
freq += 2
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_100[1].data['RA']), 'DEC': list(cs_100[1].data['DEC']), 'GLON': list(cs_100[1].data['GLON']), 'GLAT': list(cs_100[1].data['GLAT'])})))
if '143GHz' in bands:
freq += 4
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_143[1].data['RA']), 'DEC': list(cs_143[1].data['DEC']), 'GLON': list(cs_143[1].data['GLON']), 'GLAT': list(cs_143[1].data['GLAT'])})))
if '217GHz' in bands:
freq += 8
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_217[1].data['RA']), 'DEC': list(cs_217[1].data['DEC']), 'GLON': list(cs_217[1].data['GLON']), 'GLAT': list(cs_217[1].data['GLAT'])})))
if '353GHz' in bands:
freq += 16
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_353[1].data['RA']), 'DEC': list(cs_353[1].data['DEC']), 'GLON': list(cs_353[1].data['GLON']), 'GLAT': list(cs_353[1].data['GLAT'])})))
if '545GHz' in bands:
freq += 32
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_545[1].data['RA']), 'DEC': list(cs_545[1].data['DEC']), 'GLON': list(cs_545[1].data['GLON']), 'GLAT': list(cs_545[1].data['GLAT'])})))
if '857GHz' in bands:
freq += 64
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_857[1].data['RA']), 'DEC': list(cs_857[1].data['DEC']), 'GLON': list(cs_857[1].data['GLON']), 'GLAT': list(cs_857[1].data['GLAT'])})))
df = pd.concat((df_pgcc, df))
df = self.remove_duplicates_on_radec(df, with_itself=True, tol=2)
df.to_csv(self.path + 'catalogs/' + 'False_SZ_catalog_f%s'%freq + '.csv', index=False)
df = pd.DataFrame(columns=['RA','DEC','GLON','GLAT'])
for L in range(1, len(bands)):
for subset in tqdm(itertools.combinations(bands, L)):
freq = 0
if '100GHz' in subset:
freq += 2
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_100[1].data['RA']), 'DEC': list(cs_100[1].data['DEC']), 'GLON': list(cs_100[1].data['GLON']), 'GLAT': list(cs_100[1].data['GLAT'])})))
if '143GHz' in subset:
freq += 4
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_143[1].data['RA']), 'DEC': list(cs_143[1].data['DEC']), 'GLON': list(cs_143[1].data['GLON']), 'GLAT': list(cs_143[1].data['GLAT'])})))
if '217GHz' in subset:
freq += 8
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_217[1].data['RA']), 'DEC': list(cs_217[1].data['DEC']), 'GLON': list(cs_217[1].data['GLON']), 'GLAT': list(cs_217[1].data['GLAT'])})))
if '353GHz' in subset:
freq += 16
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_353[1].data['RA']), 'DEC': list(cs_353[1].data['DEC']), 'GLON': list(cs_353[1].data['GLON']), 'GLAT': list(cs_353[1].data['GLAT'])})))
if '545GHz' in subset:
freq += 32
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_545[1].data['RA']), 'DEC': list(cs_545[1].data['DEC']), 'GLON': list(cs_545[1].data['GLON']), 'GLAT': list(cs_545[1].data['GLAT'])})))
if '857GHz' in subset:
freq += 64
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_857[1].data['RA']), 'DEC': list(cs_857[1].data['DEC']), 'GLON': list(cs_857[1].data['GLON']), 'GLAT': list(cs_857[1].data['GLAT'])})))
df = pd.concat((df_pgcc, df))
df = self.remove_duplicates_on_radec(df, with_itself=True, tol=2)
df.to_csv(self.path + 'catalogs/' + 'False_SZ_catalog_f%s'%freq + '.csv', index=False)
cs_100.close()
cs_143.close()
cs_217.close()
cs_353.close()
cs_545.close()
cs_857.close()
def remove_duplicates_on_radec(self, df_main, df_with_dup=None, output_name=None, with_itself=False, tol=5, plot=False):
""""Takes two different dataframes with columns 'RA' & 'DEC' and performs a spatial
coordinate match with a tol=5 arcmin tolerance. Saves a .csv file containing df_main
without objects in common from df_with_dup.
Args:
df_main (pd.DataFrame): main dataframe.
df_with_dup (pd.DataFrame): dataframe that contains objects from df_main. Defaults to None.
output_name (str): name that will be used in the saved/plot file name. If None, no file will be saved. Defaults to None.
with_itself (bool, optional): If True, the spatial coordinates match will be performed with df_main. Defaults to False.
tol (int, optional): tolerance for spatial coordinates match in arcmin. Defaults to 5.
plot (bool, optional): If True, will save duplicates distance from each other distribution plots. Defaults to False.
"""
if with_itself == True:
scatalog_sub = SkyCoord(ra=df_main['RA'].values, dec=df_main['DEC'].values, unit='deg')
idx, d2d, _ = match_coordinates_sky(scatalog_sub, scatalog_sub, nthneighbor=2)
ismatched = d2d < tol*u.arcminute #threshold to consider whether or not two galaxies are the same
df_d2d = pd.DataFrame(data={'ismatched': ismatched, 'idx': idx, 'd2d': d2d})
df_main['ismatched'], df_main['ID'] = ismatched, idx
df_main.query("ismatched == False", inplace=True)
df_main.drop(columns=['ismatched', 'ID'], inplace=True)
df_main = df_main.replace([-1, -10, -99], np.nan)
if output_name is not None:
df_main.to_csv(self.path + 'catalogs/' + output_name + '.csv', index=False)
elif with_itself == False:
assert df_with_dup is not None
ID = np.arange(0, len(df_with_dup))
df_with_dup = df_with_dup[['RA', 'DEC']].copy()
df_with_dup.insert(loc=0, value=ID, column='ID')
scatalog_sub = SkyCoord(ra=df_main['RA'].values, dec=df_main['DEC'].values, unit='deg')
pcatalog_sub = SkyCoord(ra=df_with_dup['RA'].values, dec=df_with_dup['DEC'].values, unit='deg')
idx, d2d, _ = match_coordinates_sky(scatalog_sub, pcatalog_sub, nthneighbor=1)
ismatched = d2d < tol*u.arcminute #threshold to consider whether or not two galaxies are the same
df_d2d = pd.DataFrame(data={'ismatched': ismatched, 'idx': idx, 'd2d': d2d})
df_main['ismatched'], df_main['ID'] = ismatched, idx
df_with_dup.drop(columns=['RA', 'DEC'], inplace=True)
df_wo_dup = pd.merge(df_main, df_with_dup, indicator=True, on='ID', how='outer').query('_merge=="both"').drop('_merge', axis=1)
df_wo_dup.query("ismatched == False", inplace=True)
df_wo_dup.drop(columns=['ismatched', 'ID'], inplace=True)
df_wo_dup = df_wo_dup.replace([-1, -10, -99], np.nan)
if output_name is not None:
df_wo_dup.to_csv(self.path + 'catalogs/' + output_name + '.csv', index=False)
df_main = df_wo_dup.copy()
if plot == True and output_name is not None:
fig = plt.figure(figsize=(8,8), tight_layout=False)
ax = fig.add_subplot(111)
ax.set_facecolor('white')
ax.grid(True, color='grey', lw=0.5)
ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
ax.set_xlabel(r'$\mathrm{angular\;distance\;\left(arcmin\right)}$', fontsize=20)
ax.set_ylabel(output_name, fontsize=20)
ax.hist(np.array(df_d2d['d2d'].values)*60, bins = 400)
ax.axvline(tol, color='k', linestyle='--')
ax.set_xlim(0, 2*tol)
plt.savefig(self.output_path + 'figures/' + 'd2d_' + output_name + '.png', bbox_inches='tight', transparent=False)
plt.show()
plt.close()
return df_main
def remove_duplicates_on_lonlat(self, df_main, df_with_dup=None, output_name=None, with_itself=False, tol=2, plot=False):
""""Takes two different dataframes with columns 'GLON' & 'GLAT' and performs a spatial
coordinate match with a tol=2 arcmin tolerance. Saves a .csv file containing df_main
without objects in common from df_with_dup.
Args:
df_main (pd.DataFrame): main dataframe.
output_name (str): name that will be used in the saved/plot file name. If None, no file will be saved. Defaults to None.
df_with_dup (pd.DataFrame): dataframe that contains objects from df_main. Defaults to None.
with_itself (bool, optional): If True, the spatial coordinates match will be performed with df_main. Defaults to False.
tol (int, optional): tolerance for spatial coordinates match in arcmin. Defaults to 2.
plot (bool, optional): If True, will save duplicates distance from each other distribution plots. Defaults to False.
"""
if with_itself == True:
scatalog_sub = SkyCoord(df_main['GLON'].values, df_main['GLAT'].values, unit='deg', frame='galactic')
idx, d2d, _ = match_coordinates_sky(scatalog_sub, scatalog_sub, nthneighbor=2)
ismatched = d2d < tol*u.arcminute #threshold to consider whether or not two galaxies are the same
df_d2d = pd.DataFrame(data={'ismatched': ismatched, 'idx': idx, 'd2d': d2d})
df_main['ismatched'], df_main['ID'] = ismatched, idx
df_main.query("ismatched == False", inplace=True)
df_main.drop(columns=['ismatched', 'ID'], inplace=True)
df_main = df_main.replace([-1, -10, -99], np.nan)
if output_name is not None:
df_main.to_csv(self.path + 'catalogs/' + output_name + '.csv', index=False)
elif with_itself == False:
assert df_with_dup is not None
ID = np.arange(0, len(df_with_dup))
df_with_dup = df_with_dup[['GLON', 'GLAT']].copy()
df_with_dup.insert(loc=0, value=ID, column='ID')
scatalog_sub = SkyCoord(df_main['GLON'].values, df_main['GLAT'].values, unit='deg', frame='galactic')
pcatalog_sub = SkyCoord(df_with_dup['GLON'].values, df_with_dup['GLAT'].values, unit='deg', frame='galactic')
idx, d2d, _ = match_coordinates_sky(scatalog_sub, pcatalog_sub, nthneighbor=1)
ismatched = d2d < tol*u.arcminute #threshold to consider whether or not two galaxies are the same
df_d2d = pd.DataFrame(data={'ismatched': ismatched, 'idx': idx, 'd2d': d2d})
df_main['ismatched'], df_main['ID'] = ismatched, idx
df_with_dup.drop(columns=['GLON', 'GLAT'], inplace=True)
df_wo_dup = pd.merge(df_main, df_with_dup, indicator=True, on='ID', how='outer').query('_merge=="both"').drop('_merge', axis=1)
df_wo_dup.query("ismatched == False", inplace=True)
df_wo_dup.drop(columns=['ismatched', 'ID'], inplace=True)
df_wo_dup = df_wo_dup.replace([-1, -10, -99], np.nan)
if output_name is not None:
df_wo_dup.to_csv(self.path + 'catalogs/' + output_name + '.csv', index=False)
df_main = df_wo_dup.copy()
if plot == True and output_name is not None:
fig = plt.figure(figsize=(8,8), tight_layout=False)
ax = fig.add_subplot(111)
ax.set_facecolor('white')
ax.grid(True, color='grey', lw=0.5)
ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
ax.set_xlabel(r'$\mathrm{angular\;distance\;\left(arcmin\right)}$', fontsize=20)
ax.set_ylabel(output_name, fontsize=20)
ax.hist(np.array(df_d2d['d2d'].values)*60, bins = 400)
ax.axvline(tol, color='k', linestyle='--')
ax.set_xlim(0, 2*tol)
plt.savefig(self.output_path + 'figures/' + 'd2d_' + output_name + '.png', bbox_inches='tight', transparent=False)
plt.show()
plt.close()
return df_main
def create_circular_mask(self, h, w, center, ang_center, radius):
"""Takes a list of center positions and returns a segmentation mask with circulat masks at the center's
position.
Args:
h (int): patch height.
w (int): patch width.
center (list of tuples): In pixels. List of tupples containing center coordinates to mask.
ang_center (list of tuples): In ICRS. Same as center
radius ([type]): In arcmin. Disk radius for mask
Returns:
np.ndarray: ndarray with shape (h,w) filled with zeros except at centers position where circular masks
with size radius are equal to one.
"""
if radius is None:
size_distribution = fits.open(self.path + 'catalogs/exp_joined_ami_carma_plck_psz1_psz2_act_spt_YT.fits')[1].data['T500']
heights, bins = np.histogram(size_distribution, bins=8, density=False, range=[0,15])
heights = heights/sum(heights)
bins = bins[1:]
radius = np.random.choice(bins, p=heights)/self.pixsize
else:
radius = radius/self.pixsize
Y, X = np.ogrid[:h, :w]
mask = np.zeros((h,w))
count = 0
ra, dec = [], []
for i,c in enumerate(center):
if np.isnan(c[0]):
continue
elif np.isnan(c[1]):
continue
else:
dist_from_center = np.sqrt((X - int(c[0]))**2 + (Y - int(c[1]))**2)
mask += (dist_from_center <= radius).astype(int)
is_all_zero = np.all(((dist_from_center <= radius).astype(int) == 0))
if is_all_zero == False:
count += 1
ra.append(ang_center[i][0])
dec.append(ang_center[i][1])
return np.where(mask > 1, 1, mask), count, ra, dec
def return_coord_catalog(self):
"""
Returns coordinate catalogs
Returns:
DataFrame: cluster coordinate catalog
DataFrame: other sources coordinate catalog
"""
if self.dataset == 'planck_z':
planck_z =
|
pd.read_csv(self.path + 'catalogs/planck_z' + '.csv')
|
pandas.read_csv
|
from os.path import join, exists, dirname, basename
from os import makedirs
import sys
import pandas as pd
from glob import glob
import seaborn as sns
import numpy as np
from scipy import stats
import xlsxwriter
import matplotlib.pyplot as plt
from scripts.parse_samplesheet import get_min_coverage, get_role, add_aliassamples, get_species
from scripts.snupy import check_snupy_status
import json
import datetime
import getpass
import socket
import requests
from requests.auth import HTTPBasicAuth
import urllib3
import yaml
import pickle
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
plt.switch_backend('Agg')
RESULT_NOT_PRESENT = -5
def report_undertermined_filesizes(fp_filesizes, fp_output, fp_error,
zscorethreshold=1):
# read all data
fps_sizes = glob(join(dirname(fp_filesizes), '*.txt'))
pds_sizes = []
for fp_size in fps_sizes:
data = pd.read_csv(
fp_size, sep="\t", names=["filesize", "filename", "status"],
index_col=1)
# mark given read as isme=True while all other data in the dir
# are isme=False
data['isme'] = fp_filesizes in fp_size
data['filesize'] /= 1024**3
pds_sizes.append(data)
pd_sizes = pd.concat(pds_sizes)
# compute z-score against non-bad known runs
pd_sizes['z-score'] = np.nan
idx_nonbad = pd_sizes[pd_sizes['status'] != 'bad'].index
pd_sizes.loc[idx_nonbad, 'z-score'] = stats.zscore(
pd_sizes.loc[idx_nonbad, 'filesize'])
# plot figure
fig = plt.figure()
ax = sns.distplot(
pd_sizes[(pd_sizes['isme'] == np.False_) &
(pd_sizes['status'] != 'bad')]['filesize'],
kde=False, rug=False, color="black", label='known runs')
ax = sns.distplot(
pd_sizes[(pd_sizes['isme'] == np.False_) &
(pd_sizes['status'] == 'bad')]['filesize'],
kde=False, rug=False, color="red", label='bad runs')
ax = sns.distplot(
pd_sizes[pd_sizes['isme'] == np.True_]['filesize'],
kde=False, rug=True, color="green", label='this run')
_ = ax.set_ylabel('number of files')
_ = ax.set_xlabel('file-size in GB')
ax.set_title('run %s' % basename(fp_filesizes)[:-4])
ax.legend()
# raise error if current run contains surprisingly large undetermined
# filesize
if pd_sizes[(pd_sizes['isme'] == np.True_) &
(pd_sizes['status'] == 'unknown')]['z-score'].max() > zscorethreshold:
ax.set_title('ERROR: %s' % ax.get_title())
fig.savefig(fp_error, bbox_inches='tight')
raise ValueError(
("Compared to known historic runs, your run contains surprisingly "
"(z-score > %f) large file(s) of undetermined reads. You will find"
" an supporting image at '%s'. Please do the following things:\n"
"1. discuss with lab personal about the quality of the run.\n"
"2. should you decide to keep going with this run, mark file "
"status (3rd column) in file '%s' as 'good'.\n"
"3. for future automatic considerations, mark file status (3rd "
"column) as 'bad' if you have decided to abort processing due to"
" too low quality (z-score kind of averages about known values)."
) % (zscorethreshold, fp_error, fp_filesizes))
else:
fig.savefig(fp_output, bbox_inches='tight')
def report_exome_coverage(
fps_sample, fp_plot,
min_coverage=30, min_targets=80, coverage_cutoff=200):
"""Creates an exome coverage plot for multiple samples.
Parameters
----------
fps_sample : [str]
A list of file-paths with coverage data in csv format.
fp_plot : str
Filepath of output graph.
min_coverage : int
Default: 30.
An arbitraty threshold of minimal coverage that we expect.
A vertical dashed line is drawn at this value.
min_targets : float
Default: 80.
An arbitraty threshold of minimal targets that we expect to be covered.
A horizontal dashed line is drawn at this value.
coverage_cutoff : float
Default: 200.
Rightmost coverage cut-off value where X-axis is limited.
Raises
------
ValueError : If one of the sample's coverage falls below expected
thresholds.
"""
# Usually we aim for a 30X coverage on 80% of the sites.
fig, ax = plt.subplots()
ax.axhline(y=min_targets, xmin=0, xmax=coverage_cutoff, color='gray',
linestyle='--')
ax.axvline(x=min_coverage, ymin=0, ymax=100, color='gray', linestyle='--')
samples_below_coverage_threshold = []
for fp_sample in fps_sample:
coverage = pd.read_csv(fp_sample, sep="\t")
samplename = fp_sample.split('/')[-1].split('.')[0]
linewidth = 1
if coverage[coverage['#coverage'] == min_coverage]['percent_cumulative'].min() < min_targets:
linewidth = 4
samples_below_coverage_threshold.append(samplename)
ax.plot(coverage['#coverage'],
coverage['percent_cumulative'],
label=samplename,
linewidth=linewidth)
ax.set_xlim((0, coverage_cutoff))
ax.set_xlabel('Read Coverage')
ax.set_ylabel('Targeted Exome Bases')
ax.legend()
if len(samples_below_coverage_threshold) > 0:
fp_plot = fp_plot.replace('.pdf', '.error.pdf')
fig.savefig(fp_plot, bbox_inches='tight')
if len(samples_below_coverage_threshold) > 0:
raise ValueError(
"The following %i sample(s) have coverage below expected "
"thresholds. Please discuss with project PIs on how to proceed. "
"Maybe, samples need to be re-sequenced.\n\t%s\nYou will find more"
" information in the generated coverage plot '%s'." % (
len(samples_below_coverage_threshold),
'\n\t'.join(samples_below_coverage_threshold),
fp_plot))
ACTION_PROGRAMS = [
{'action': 'background',
'program': 'GATK',
'fileending_snupy_extract': '.snp_indel.gatk',
'fileending_spike_calls': '.gatk.snp_indel.vcf',
'stepname_spike_calls': 'gatk_CombineVariants',
},
{'action': 'background',
'program': 'Platypus',
'fileending_snupy_extract': '.indel.ptp',
'fileending_spike_calls': '.ptp.annotated.filtered.indels.vcf',
'stepname_spike_calls': 'platypus_filtered',
},
{'action': 'tumornormal',
'program': 'Varscan',
'fileending_snupy_extract': '.somatic.varscan',
'fileending_spike_calls':
{'homo sapiens': '.snp.somatic_germline.vcf',
'mus musculus': '.indel_snp.vcf'},
'stepname_spike_calls': 'merge_somatic',
},
{'action': 'tumornormal',
'program': 'Mutect',
'fileending_snupy_extract': '.somatic.mutect',
'fileending_spike_calls': '.all_calls.vcf',
'stepname_spike_calls': 'mutect',
},
{'action': 'tumornormal',
'program': 'Excavator2',
'fileending_snupy_extract': '.somatic.cnv.excavator2',
'fileending_spike_calls': '.vcf',
'stepname_spike_calls': 'excavator_somatic',
},
{'action': 'trio',
'program': 'Varscan\ndenovo',
'fileending_snupy_extract': '.denovo.varscan',
'fileending_spike_calls': '.var2denovo.vcf',
'stepname_spike_calls': 'writing_headers',
},
{'action': 'trio',
'program': 'Excavator2',
'fileending_snupy_extract': '.trio.cnv.excavator2',
'fileending_spike_calls': '.vcf',
'stepname_spike_calls': 'excavator_trio',
},
]
def _get_statusdata_demultiplex(samplesheets, prefix, config):
demux_yields = []
for flowcell in samplesheets['run'].unique():
fp_yielddata = '%s%s%s/Data/%s.yield_data.csv' % (prefix, config['dirs']['intermediate'], config['stepnames']['yield_report'], flowcell)
if exists(fp_yielddata):
demux_yields.append(
pd.read_csv(fp_yielddata, sep="\t").rename(columns={'Project': 'Sample_Project', 'Sample': 'Sample_ID', 'Yield': 'yield'})) #.set_index(['Project', 'Lane', 'Sample', 'Barcode sequence'])
if len(demux_yields) <= 0:
return pd.DataFrame()
demux_yields = add_aliassamples(pd.concat(demux_yields, axis=0), config)
# map yields of original sampels to aliases
for idx, row in demux_yields[demux_yields['is_alias'] == True].iterrows():
orig = demux_yields[(demux_yields['Sample_Project'] == row['fastq-prefix'].split('/')[0]) & (demux_yields['Sample_ID'] == row['fastq-prefix'].split('/')[1])]['yield']
if orig.shape[0] > 0:
demux_yields.loc[idx, 'yield'] = orig.sum()
demux_yields = demux_yields.dropna(subset=['yield'])
return pd.DataFrame(demux_yields).groupby(['Sample_Project', 'Sample_ID'])['yield'].sum()
def _get_statusdata_coverage(samplesheets, prefix, config, min_targets=80):
coverages = []
for (sample_project, sample_id), meta in samplesheets.groupby(['Sample_Project', 'Sample_ID']):
role_sample_project, role_sample_id = sample_project, sample_id
if (meta['is_alias'] == True).any():
role_sample_project, role_sample_id = get_role(sample_project, meta['spike_entity_id'].unique()[0], meta['spike_entity_role'].unique()[0], samplesheets).split('/')
fp_coverage = join(prefix, config['dirs']['intermediate'], config['stepnames']['exome_coverage'], role_sample_project, '%s.exome_coverage.csv' % role_sample_id)
if exists(fp_coverage):
coverage = pd.read_csv(fp_coverage, sep="\t")
if coverage.shape[0] > 0:
coverages.append({
'Sample_Project': sample_project,
'Sample_ID': sample_id,
'coverage': coverage.loc[coverage['percent_cumulative'].apply(lambda x: abs(x-min_targets)).idxmin(), '#coverage']})
if len(coverages) <= 0:
return pd.DataFrame()
return pd.DataFrame(coverages).set_index(['Sample_Project', 'Sample_ID'])['coverage']
def _isKnownDuo(sample_project, spike_entity_id, config):
"""Checks if trio is a known duo, i.e. missing samples won't be available in the future.
Parameters
----------
sample_project : str
spike_entity_id : str
config : dict()
Snakemake configuration.
Returns
-------
Boolean: True, if spike_entity_id is in config list of known duos for given project.
False, otherwise.
"""
if 'projects' in config:
if sample_project in config['projects']:
if 'known_duos' in config['projects'][sample_project]:
if spike_entity_id in config['projects'][sample_project]['known_duos']:
return True
return False
def _get_statusdata_snupyextracted(samplesheets, prefix, snupy_instance, config):
results = []
for sample_project, meta in samplesheets.groupby('Sample_Project'):
# project in config file is not properly configure for snupy!
if config['projects'].get(sample_project, None) is None:
continue
if config['projects'][sample_project].get('snupy', None) is None:
continue
if config['projects'][sample_project]['snupy'][snupy_instance].get('project_id', None) is None:
continue
r = requests.get('%s/experiments/%s.json' % (config['credentials']['snupy'][snupy_instance]['host'], config['projects'][sample_project]['snupy'][snupy_instance]['project_id']),
auth=HTTPBasicAuth(config['credentials']['snupy'][snupy_instance]['username'], config['credentials']['snupy'][snupy_instance]['password']),
verify=False)
check_snupy_status(r)
samples = [sample['name'] for sample in r.json()['samples']]
for sample_id, meta_sample in meta.groupby('Sample_ID'):
for file_ending, action, program in [(ap['fileending_snupy_extract'], ap['action'], ap['program']) for ap in ACTION_PROGRAMS]:
# in some cases "sample name" hold spike_entity_id, in others Sample_ID
entity = sample_id
runs = '+'.join(sorted(meta_sample['run'].unique()))
if (action == 'trio'):
if meta_sample['spike_entity_role'].unique()[0] == 'patient':
entity = meta_sample['spike_entity_id'].iloc[0]
runs = '+'.join(sorted(samplesheets[samplesheets['spike_entity_id'] == meta_sample['spike_entity_id'].iloc[0]]['run'].unique()))
if (action == 'tumornormal'):
if meta_sample['spike_entity_role'].unique()[0] == 'tumor':
entity = meta_sample['spike_entity_id'].iloc[0]
runs = '+'.join(sorted(samplesheets[samplesheets['spike_entity_id'] == meta_sample['spike_entity_id'].iloc[0]]['run'].unique()))
name = '%s_%s/%s%s' % (runs, sample_project, entity, file_ending)
if (sample_project in config['projects']) and (pd.notnull(meta_sample['spike_entity_role'].iloc[0])):
if ((action == 'trio') and (meta_sample['spike_entity_role'].iloc[0] in ['patient', 'sibling']) and (not _isKnownDuo(sample_project, meta_sample['spike_entity_id'].iloc[0], config))) or\
((action == 'background')) or\
((action == 'tumornormal') and (meta_sample['spike_entity_role'].iloc[0].startswith('tumor'))):
results.append({
'Sample_Project': sample_project,
'Sample_ID': sample_id,
'action': action,
'program': program,
'status': name in samples,
'snupy_sample_name': name
})
if len(results) <= 0:
return pd.DataFrame()
return pd.DataFrame(results).set_index(['Sample_Project', 'Sample_ID', 'action', 'program'])
def _get_statusdata_numberpassingcalls(samplesheets, prefix, config, RESULT_NOT_PRESENT, verbose=sys.stderr):
results = []
# leave out samples aliases
for (sample_project, spike_entity_id, spike_entity_role, fastq_prefix), meta in samplesheets[samplesheets['is_alias'] != True].fillna('not defined').groupby(['Sample_Project', 'spike_entity_id', 'spike_entity_role', 'fastq-prefix']):
def _get_fileending(file_ending, fastq_prefix, samplesheets, config):
if isinstance(file_ending, dict):
return file_ending[get_species(fastq_prefix, samplesheets, config)]
else:
return file_ending
for ap in ACTION_PROGRAMS:
fp_vcf = None
if (ap['action'] == 'background') and pd.notnull(spike_entity_role):
if (ap['program'] == 'GATK'):
fp_vcf = '%s%s%s/%s%s' % (prefix, config['dirs']['intermediate'], config['stepnames'][ap['stepname_spike_calls']], fastq_prefix, _get_fileending(ap['fileending_spike_calls'], fastq_prefix, meta, config))
elif (ap['program'] == 'Platypus'):
fp_vcf = '%s%s%s/%s%s' % (prefix, config['dirs']['intermediate'], config['stepnames'][ap['stepname_spike_calls']], fastq_prefix, _get_fileending(ap['fileending_spike_calls'], fastq_prefix, meta, config))
elif (ap['action'] == 'tumornormal'):
for (alias_sample_project, alias_spike_entity_role, alias_sample_id), alias_meta in samplesheets[(samplesheets['fastq-prefix'] == fastq_prefix) & (samplesheets['spike_entity_role'].apply(lambda x: x.split('_')[0] if pd.notnull(x) else x).isin(['tumor']))].groupby(['Sample_Project', 'spike_entity_role', 'Sample_ID']):
# for Keimbahn, the tumor sample needs to include the name of the original sample ID
instance_id = '%s/%s' % (alias_sample_project, alias_sample_id)
if alias_spike_entity_role == 'tumor':
# for Maus_Hauer, the filename holds the entity name, but not the Sample ID
instance_id = '%s/%s' % (sample_project, spike_entity_id)
if (alias_spike_entity_role.split('_')[0] in set(['tumor'])):
if (ap['program'] == 'Varscan'):
fp_vcf = '%s%s%s/%s%s' % (prefix, config['dirs']['intermediate'], config['stepnames'][ap['stepname_spike_calls']], instance_id, _get_fileending(ap['fileending_spike_calls'], fastq_prefix, meta, config))
elif (ap['program'] == 'Mutect'):
fp_vcf = '%s%s%s/%s%s' % (prefix, config['dirs']['intermediate'], config['stepnames'][ap['stepname_spike_calls']], instance_id, _get_fileending(ap['fileending_spike_calls'], fastq_prefix, meta, config))
elif (ap['program'] == 'Excavator2'):
fp_vcf = '%s%s%s/%s/Results/%s/EXCAVATORRegionCall_%s%s' % (prefix, config['dirs']['intermediate'], config['stepnames'][ap['stepname_spike_calls']], instance_id, fastq_prefix.split('/')[-1], fastq_prefix.split('/')[-1], _get_fileending(ap['fileending_spike_calls'], fastq_prefix, meta, config))
elif (ap['action'] == 'trio'):
for (alias_sample_project, alias_spike_entity_role, alias_sample_id, alias_spike_entity_id), alias_meta in samplesheets[(samplesheets['fastq-prefix'] == fastq_prefix) & (samplesheets['spike_entity_role'].isin(['patient', 'sibling']))].groupby(['Sample_Project', 'spike_entity_role', 'Sample_ID', 'spike_entity_id']):
# Trios are a more complicated case, since by default the result name is given by the
# spike_entity_id, but if computed for siblings, the name is given by the fastq-prefix
if (ap['program'] == 'Varscan\ndenovo'):
if (alias_spike_entity_role in set(['patient'])):
fp_vcf = '%s%s%s/%s/%s%s' % (prefix, config['dirs']['intermediate'], config['stepnames'][ap['stepname_spike_calls']], alias_sample_project, alias_spike_entity_id, _get_fileending(ap['fileending_spike_calls'], fastq_prefix, meta, config))
elif (alias_spike_entity_role in set(['sibling'])):
fp_vcf = '%s%s%s/%s%s' % (prefix, config['dirs']['intermediate'], config['stepnames'][ap['stepname_spike_calls']], fastq_prefix, _get_fileending(ap['fileending_spike_calls'], fastq_prefix, meta, config))
elif (ap['program'] == 'Excavator2'):
if (alias_spike_entity_role in set(['patient'])):
fp_vcf = '%s%s%s/%s/%s/Results/%s/EXCAVATORRegionCall_%s%s' % (prefix, config['dirs']['intermediate'], config['stepnames'][ap['stepname_spike_calls']], alias_sample_project, alias_spike_entity_id, fastq_prefix.split('/')[-1], fastq_prefix.split('/')[-1], _get_fileending(ap['fileending_spike_calls'], fastq_prefix, meta, config))
elif (alias_spike_entity_role in set(['sibling'])):
fp_vcf = '%s%s%s/%s/Results/%s/EXCAVATORRegionCall_%s%s' % (prefix, config['dirs']['intermediate'], config['stepnames'][ap['stepname_spike_calls']], fastq_prefix, fastq_prefix.split('/')[-1], fastq_prefix.split('/')[-1], _get_fileending(ap['fileending_spike_calls'], fastq_prefix, meta, config))
# remove entry, if it is known (config.yaml) that this trio is incomplete
if (spike_entity_role == 'patient') and (spike_entity_id in config.get('projects', []).get(sample_project, []).get('known_duos', [])):
fp_vcf = None
results.append({
'Sample_Project': sample_project,
'Sample_ID': fastq_prefix.split('/')[-1],
'action': ap['action'],
'program': ap['program'],
'fp_calls': fp_vcf,
})
status = 0
num_status = 20
if verbose is not None:
print('of %i: ' % num_status, file=verbose, end="")
for i, res in enumerate(results):
if (verbose is not None) and int(i % (len(results) / num_status)) == 0:
status+=1
print('%i ' % status, file=verbose, end="")
nr_calls = RESULT_NOT_PRESENT
if (res['fp_calls'] is not None) and exists(res['fp_calls']):
try:
if res['program'] == 'Varscan':
nr_calls = pd.read_csv(res['fp_calls'], comment='#', sep="\t", dtype=str, header=None, usecols=[7], squeeze=True).apply(lambda x: ';SS=2;' in x).sum()
else:
nr_calls = pd.read_csv(res['fp_calls'], comment='#', sep="\t", dtype=str, header=None, usecols=[6], squeeze=True).value_counts()['PASS']
except pd.io.common.EmptyDataError:
nr_calls = 0
res['number_calls'] = nr_calls
if verbose is not None:
print('done.', file=verbose)
if len(results) <= 0:
return pd.DataFrame()
results = pd.DataFrame(results)
results = results[pd.notnull(results['fp_calls'])].set_index(['Sample_Project', 'Sample_ID', 'action', 'program'])['number_calls']
# add alias sample results
for (sample_project, spike_entity_id, spike_entity_role, fastq_prefix), meta in samplesheets[samplesheets['is_alias'] == True].groupby(['Sample_Project', 'spike_entity_id', 'spike_entity_role', 'fastq-prefix']):
for (_, _, action, program), row in results.loc[fastq_prefix.split('/')[0], fastq_prefix.split('/')[-1], :].iteritems():
results.loc[sample_project, meta['Sample_ID'].unique()[0], action, program] = row
# remove samples, that don't have their own role, but were used for aliases
for (sample_project, sample_id), _ in samplesheets[pd.isnull(samplesheets['spike_entity_role'])].groupby(['Sample_Project', 'Sample_ID']):
idx_to_drop = results.loc[sample_project, sample_id, ['tumornormal', 'trio'], :].index
if len(idx_to_drop) > 0:
results.drop(index=idx_to_drop, inplace=True)
return results
def _get_genepanel_data(samplesheets, prefix, config):
results = []
columns = ['Sample_Project', 'Sample_ID', 'genepanel', 'gene']
# leave out samples aliases
for (sample_project, spike_entity_id, spike_entity_role, fastq_prefix), meta in samplesheets[samplesheets['is_alias'] != True].fillna('not defined').groupby(['Sample_Project', 'spike_entity_id', 'spike_entity_role', 'fastq-prefix']):
#print(sample_project, spike_entity_id, spike_entity_role, fastq_prefix)
for file in glob('%s%s%s/*/%s.tsv' % (prefix, config['dirs']['intermediate'], config['stepnames']['genepanel_coverage'], fastq_prefix)):
#print("\t", file)
coverage = pd.read_csv(file, sep="\t")
parts = file.split('/')
# determine genepanel name, project and sample_id from filename
coverage['Sample_Project'] = sample_project
coverage['Sample_ID'] = meta['Sample_ID'].unique()[0]
coverage['genepanel'] = parts[-3][:-5]
coverage = coverage.set_index(columns)
results.append(coverage)
if len(results) > 0:
results = pd.concat(results).sort_values(by=columns)
else:
results =
|
pd.DataFrame(columns=columns)
|
pandas.DataFrame
|
from numpy.testing import assert_equal, assert_allclose
from pandas.testing import assert_frame_equal
import numpy as np
import pandas as pd
from sciparse import find_lcr_dataline, parse_lcr_header, parse_lcr
from sciparse import convert_lcr_to_standard
import pytest
import os
@pytest.fixture
def filename():
dir_name = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dir_name, 'data/lcr_test_data.dat')
filename = str(filename)
return filename
@pytest.fixture
def metadata(filename):
metadata = parse_lcr_header(filename)
return metadata
@pytest.fixture
def data(filename):
data, metadata = parse_lcr(filename)
return data
def test_extract_header(metadata):
desiredMode = "SWEEP"
actualMode = metadata['mode']
assert_equal(actualMode, desiredMode)
desiredStartVoltage = 10
actualStartVoltage = metadata['start_voltage']
assert_equal(actualStartVoltage, desiredStartVoltage, err_msg="stop voltage")
desiredStopVoltage = -20
actualStopVoltage = metadata['stop_voltage']
assert_equal(actualStopVoltage, desiredStopVoltage, err_msg="start voltage")
desiredStepVoltage = -0.25
actualStepVoltage = metadata['step_voltage']
assert_equal(actualStepVoltage, desiredStepVoltage, err_msg="step voltage")
desiredPoints = 121
actualPoints = metadata['n_samples']
assert_equal(actualPoints, desiredPoints, err_msg="number points")
def test_find_datalines(filename):
desiredStartLine = 28
actualStartLine = find_lcr_dataline(filename)
assert_equal(actualStartLine, desiredStartLine)
def test_parse_data_header(data):
# Confirm we got the right data types
actualDataTypes = data.columns.values
desiredDataTypes = ['Z', 'THETA', 'BIAS', 'VM', 'IM']
assert_equal(actualDataTypes, desiredDataTypes)
def test_parse_data_length(data):
# Confirm we got the right length of data
desired_data_points = 121
actual_data_points = len(data)
assert_equal(actual_data_points, desired_data_points)
def test_parse_data(data):
desiredZData = 5.57723*1e6
actualZData = data['Z'].iloc[1]
assert_allclose(desiredZData, actualZData)
desiredBIASData = 8.5
actualBIASData = data['BIAS'].iloc[6]
assert_allclose(desiredBIASData, actualBIASData)
def test_convert_data_CP_RP():
frequency = 1 / (2*np.pi) * 1000 # 1krad/s
test_metadata = {'frequency': frequency}
test_data = pd.DataFrame({'CP': [1e-9], 'RP': 1e6})
desired_data = pd.DataFrame({
'Z (ohm)': [1 / np.sqrt(2) * 1e6],
'THETA (rad)': [-np.pi/4]})
actual_data = convert_lcr_to_standard(test_data, test_metadata)
assert_frame_equal(actual_data, desired_data)
def test_convert_data_CS_RS():
frequency = 1 / (2*np.pi) * 1000 # 1krad/s
test_metadata = {'frequency': frequency}
test_data = pd.DataFrame({'CS': [1e-9], 'RS': 1e6})
desired_data = pd.DataFrame({
'Z (ohm)': [np.sqrt(2) * 1e6],
'THETA (rad)': [-np.pi/4]})
actual_data = convert_lcr_to_standard(test_data, test_metadata)
|
assert_frame_equal(actual_data, desired_data)
|
pandas.testing.assert_frame_equal
|
"""
Utils to plot graphs with arrows
"""
import matplotlib.transforms
import matplotlib.patches
import matplotlib.colors
import matplotlib.cm
import numpy as np
import pandas as pd
import logging
from tctx.util import plot
def _clip_arrows(arrows, tail_offset, head_offset):
"""
shorten head & tail so the arrows don't overlap with markers
:param arrows: a pd.DataFrame with columns: 'source_x', 'source_y', 'target_x', 'target_y'
:param tail_offset: how much shorter to make the tail (so it doesn't overlap with the markers)
:param head_offset: how much shorter to make the head (so it doesn't overlap with the markers)
:return: 2 numpy arrays of shape Nx2
"""
source_pos = arrows[['source_x', 'source_y']].values
target_pos = arrows[['target_x', 'target_y']].values
direction = target_pos - source_pos
length = np.sqrt(np.sum(np.square(direction), axis=1))
direction = direction / length[:, np.newaxis]
source_pos = source_pos + direction * tail_offset
target_pos = target_pos + direction * (-1 * head_offset)
return source_pos, target_pos
def plot_arrows_cmap(
ax, arrows, c, cmap=None, norm=None,
tail_offset=0, head_offset=0, head_length=4, head_width=1.25, **kwargs):
"""
Draw multiple arrows using a colormap.
:param ax: matplotlib.axes.Axes
:param arrows: a pd.DataFrame with columns: 'source_x', 'source_y', 'target_x', 'target_y'
:param c: a pd.Series with the same index as arrows or a string that identifies a column in it.
:param tail_offset: how much shorter to make the tail (so it doesn't overlap with the markers)
:param head_offset: how much shorter to make the head (so it doesn't overlap with the markers)
:param kwargs: args for matplotlib.patches.FancyArrowPatch
:return: matplotlib.cm.Mappable that can be used for a colorbar
:param cmap:
:param norm:
:param head_length:
:param head_width:
:return:
"""
if cmap is None:
cmap = 'default'
if isinstance(cmap, str):
cmap = plot.lookup_cmap(cmap)
if isinstance(c, str):
c = arrows[c]
if norm is None:
norm = matplotlib.colors.Normalize(vmin=c.min(), vmax=c.max())
arrowstyle = matplotlib.patches.ArrowStyle.CurveFilledB(head_length=head_length, head_width=head_width)
kwargs.setdefault('linewidth', .75)
source_pos, target_pos = _clip_arrows(arrows, tail_offset, head_offset)
for i, idx in enumerate(arrows.index):
color = cmap(norm(c[idx]))
_plot_single_arrow(ax, source_pos[i], target_pos[i], arrowstyle, color, **kwargs)
sm = matplotlib.cm.ScalarMappable(cmap=cmap, norm=norm)
sm.set_array(c.values)
return sm
def _plot_single_arrow(ax, source_pos, target_pos, arrowstyle, color, **kwargs):
patch_kwargs = kwargs.copy()
patch_kwargs.setdefault('edgecolor', color)
patch_kwargs.setdefault('facecolor', color)
patch = matplotlib.patches.FancyArrowPatch(
posA=source_pos,
posB=target_pos,
arrowstyle=arrowstyle,
**patch_kwargs,
)
ax.add_artist(patch)
def plot_arrows_solid(
ax, arrows, color=None,
tail_offset=0, head_offset=0, head_length=4, head_width=1.25, **kwargs):
"""
Draw multiple arrows using a solid color.
:param ax: matplotlib.axes.Axes
:param arrows: a pd.DataFrame with columns: 'source_x', 'source_y', 'target_x', 'target_y'
:param tail_offset: how much shorter to make the tail (so it doesn't overlap with the markers)
:param head_offset: how much shorter to make the head (so it doesn't overlap with the markers)
:param kwargs: args for matplotlib.patches.FancyArrowPatch
:param color:
:param head_length:
:param head_width:
:param kwargs:
:return:
"""
arrowstyle = matplotlib.patches.ArrowStyle.CurveFilledB(head_length=head_length, head_width=head_width)
kwargs.setdefault('linewidth', .75)
source_pos, target_pos = _clip_arrows(arrows, tail_offset, head_offset)
for i, idx in enumerate(arrows.index):
_plot_single_arrow(ax, source_pos[i], target_pos[i], arrowstyle, color, **kwargs)
class Graph:
"""
A class to plot graphs with per-node and per-edge styles
"""
def __init__(self, nodes, edges, styles=None, transform=None, kwargs_nodes=None, kwargs_edges=None):
"""
:param nodes: a pd.DataFrame with columns ['x', 'y'] representing the 2d position and
column 'style' that can be indexed into the styles DF
:param edges: a pd.DataFrame with columns ['source', 'target'] that can be indexed into the nodes DF and
column 'style' that can be indexed into the styles DF
:param styles: pd.DataFrame with columns for different cmaps ('cmap_from_white', etc),
color levels ('light', 'dark', etc). By default: plot.styles_df
:param kwargs_nodes: default kwargs to nodes plotting
:param kwargs_edges: default kwargs to edges plotting
:param transform: the transform to apply to the graph. Useful when drawing an inset.
"""
assert np.all(edges['source'] != edges['target']), 'self edges'
assert np.all([np.issubdtype(nodes[c].dtype, np.number) for c in ['x', 'y']])
if styles is None:
styles = plot.styles_df.copy()
self.styles = styles
self.nodes = nodes
self.edges = edges
self.transform = transform
self.default_kwargs_nodes = dict(
cmap='cmap',
marker='marker_time',
linewidth=.5,
facecolor='light',
edgecolor='darker',
)
self.default_kwargs_nodes.update(kwargs_nodes or {})
self.default_kwargs_edges = dict(
cmap='cmap',
facecolor='main',
edgecolor='main',
)
self.default_kwargs_edges.update(kwargs_edges or {})
edge_len = self.get_edge_lengths()
too_short = np.count_nonzero(np.isclose(edge_len, 0))
if too_short:
logging.warning(f'{too_short}/{len(edge_len)} edges of zero length')
# pandas complains when editing categories which is inconvenient
if self.nodes['style'].dtype.name == 'category':
self.nodes['style'] = self.nodes['style'].astype(str)
if self.edges['style'].dtype.name == 'category':
self.edges['style'] = self.edges['style'].astype(str)
def copy(self):
return Graph(
nodes=self.nodes.copy(),
edges=self.edges.copy(),
styles=self.styles.copy(),
transform=None if self.transform is None else self.transform.copy(),
kwargs_nodes=self.default_kwargs_nodes.copy(),
kwargs_edges=self.default_kwargs_edges.copy(),
)
def get_edge_lengths(self):
xy0 = self.nodes.loc[self.edges['source'], ['x', 'y']].values
xy1 = self.nodes.loc[self.edges['target'], ['x', 'y']].values
edge_len = np.sqrt(np.sum(np.square(xy0 - xy1), axis=1))
return pd.Series(edge_len, index=self.edges.index)
def _get_arrows(self, selection=None):
if selection is None:
selection = self.edges
if isinstance(selection, (np.ndarray, pd.Index)):
selection = self.edges.loc[selection]
arrows = [selection]
for end in ['source', 'target']:
pos = self.nodes[['x', 'y']].reindex(selection[end])
pos.index = selection.index
pos.columns = [end + '_' + c for c in pos.columns]
arrows.append(pos)
arrows = pd.concat(arrows, axis=1)
return arrows
def _lookup_style_kwargs(self, style, kwargs):
kwargs = kwargs.copy()
if 'style' in kwargs:
specific = kwargs.pop('style')
if style in specific:
kwargs.update(specific[style])
styled_kwargs = kwargs.copy()
for k, v in kwargs.items():
if isinstance(v, str) and v in self.styles.columns:
styled_kwargs[k] = self.styles.loc[style, v]
if self.transform is not None:
styled_kwargs['transform'] = self.transform
return styled_kwargs
def plot_nodes_solid(self, ax, selection=None, **kwargs):
"""
Plot all of the nodes with a flat color
:param ax:
:param selection: an array, index or boolean series that
can be used on self.nodes.loc to draw a subset of the known nodes
:param kwargs: scatter params
:return:
"""
final_kwargs = self.default_kwargs_nodes.copy()
final_kwargs.update(kwargs)
nodes_to_draw = self.nodes
if selection is not None:
assert isinstance(selection, (np.ndarray, pd.Index, pd.Series))
nodes_to_draw = self.nodes.loc[selection]
for style, nodes in nodes_to_draw.groupby('style'):
style_kwargs = self._lookup_style_kwargs(style, final_kwargs)
if 'cmap' in style_kwargs:
style_kwargs.pop('cmap')
ax.scatter(
nodes.x,
nodes.y,
**style_kwargs,
)
def plot_nodes_cmap(self, ax, c=None, selection=None, **kwargs):
"""
Plot all of the nodes with a color map
:param ax:
:param c: series or array matching length of self.nodes,
if none indicated, we expect a column 'c' in self.nodes
:param selection: an array, index or boolean series that
can be used on self.nodes.loc to draw a subset of the known nodes
:param kwargs: scatter params
:return: a dict of style to mappable for use in colorbars
"""
final_kwargs = self.default_kwargs_nodes.copy()
final_kwargs.update(kwargs)
nodes_to_draw = self.nodes
if selection is not None:
assert isinstance(selection, (np.ndarray, pd.Index, pd.Series))
nodes_to_draw = self.nodes.loc[selection]
if c is None:
c = 'c'
if isinstance(c, str):
c = self.nodes[c]
if isinstance(c, np.ndarray):
c = pd.Series(c, index=self.nodes.index)
all_sm = {}
for style, nodes in nodes_to_draw.groupby('style'):
style_kwargs = self._lookup_style_kwargs(style, final_kwargs)
if 'facecolor' in style_kwargs:
style_kwargs.pop('facecolor')
all_sm[style] = ax.scatter(
nodes.x,
nodes.y,
c=c.loc[nodes.index],
**style_kwargs,
)
return all_sm
def plot_nodes_labels(self, ax, nodes=None, va='center', ha='center', fmt='{index}', fontsize=6, **kwargs):
"""
plot a descriptive text for each node.
By default, the index is show, modify fmt to use something else
"""
# TODO allow the style column in the fmt to color by dark of the "label" column.
if nodes is None:
nodes = self.nodes
else:
nodes = self.nodes.loc[nodes]
for idx, row in nodes.iterrows():
ax.text(row['x'], row['y'], fmt.format(index=idx, **row), va=va, ha=ha, fontsize=fontsize, **kwargs)
def plot_edges_cmap(self, ax, c=None, **kwargs):
"""
Plot all of the nodes with a color map
:param ax:
:param c: series or array matching length of self.edges,
if none indicated, we expect a column 'c' in self.edges
:param kwargs: params to plot_arrows_cmap
:return: a dict of style to mappable for use in colorbars
"""
final_kwargs = self.default_kwargs_edges.copy()
final_kwargs.update(kwargs)
if c is None:
c = self.edges['c']
all_sm = {}
for style, arrows in self._get_arrows().groupby('style'):
style_kwargs = self._lookup_style_kwargs(style, final_kwargs)
if 'facecolor' in style_kwargs:
style_kwargs.pop('facecolor')
if 'edgecolor' in style_kwargs:
style_kwargs.pop('edgecolor')
all_sm[style] = plot_arrows_cmap(
ax, arrows, c,
**style_kwargs
)
return all_sm
def plot_edges_solid(self, ax, selection=None, **kwargs):
"""
Plot all of the edges with a flat color
:param ax:
:param selection:
:param kwargs:
:return:
"""
final_kwargs = self.default_kwargs_edges.copy()
final_kwargs.update(kwargs)
for style, arrows in self._get_arrows(selection=selection).groupby('style'):
style_kwargs = self._lookup_style_kwargs(style, final_kwargs)
if 'cmap' in style_kwargs:
style_kwargs.pop('cmap')
plot_arrows_solid(
ax, arrows,
**style_kwargs
)
@classmethod
def from_conns(cls, conns, cells, node_style='ei_type', edge_style='con_type'):
"""plot the connections in XY space"""
all_gids = np.unique(conns[['source_gid', 'target_gid']].values.flatten())
nodes = cells.loc[all_gids, ['x', 'y']].copy()
nodes['style'] = cells.loc[nodes.index, node_style]
edges = conns[['source_gid', 'target_gid']].copy()
edges.columns = ['source', 'target']
edges['style'] = conns.loc[edges.index, edge_style]
return cls(nodes, edges)
@classmethod
def from_conn_jumps(
cls, selected_jumps, detailed_spikes, node_keys, edge_style,
**kwargs):
"""plot spike jumps"""
assert 'x' in node_keys and 'y' in node_keys and 'style' in node_keys
nodes = {}
for k, v in node_keys.items():
if isinstance(v, str):
v = detailed_spikes[v]
else:
assert isinstance(v, (tuple, list, pd.Series, np.ndarray))
nodes[k] = v
nodes = pd.DataFrame(nodes)
edges = selected_jumps[['source_spike', 'target_spike']].copy()
edges.columns = ['source', 'target']
edges['style'] = selected_jumps.loc[edges.index, edge_style]
return cls(nodes, edges, **kwargs)
def get_floating_nodes(self) -> pd.Index:
"""
:return: the index of nodes with no connections in or out
"""
return self.nodes.index[
~self.nodes.index.isin(self.edges['source']) &
~self.nodes.index.isin(self.edges['target'])
]
def get_linked_nodes(self) -> pd.Index:
"""
:return: the index of nodes with at least a connection in or out
"""
return self.nodes.index[~self.nodes.index.isin(self.get_floating_nodes())]
def drop_nodes(self, drop_gids: pd.Index):
"""
remove the given nodes from the graph. This will also remove edges to/from those nodes
:param drop_gids: either a list of node ids or a boolean mask (True == remove)
:return:
"""
if drop_gids.dtype == 'bool':
if isinstance(drop_gids, pd.Series):
drop_gids = drop_gids.reindex(self.nodes.index, fill_value=False)
assert len(drop_gids) == len(self.nodes)
drop_gids = self.nodes.index[drop_gids]
drop_gids = pd.Index(np.asarray(drop_gids))
remaining_gids = self.nodes.index.difference(drop_gids)
self.nodes = self.nodes.loc[remaining_gids].copy()
bad_edges = (
self.edges['source'].isin(drop_gids) |
self.edges['target'].isin(drop_gids)
)
self.edges = self.edges.loc[~bad_edges].copy()
def drop_edges(self, drop_gids: pd.Index):
"""
remove the given edges from the graph
example:
graph.drop_edges(graph.edges['weight'] < .75 * 70)
:param drop_gids: either a list of edge ids or a boolean mask (True == remove)
:return:
"""
if drop_gids.dtype == 'bool':
if isinstance(drop_gids, pd.Series):
drop_gids = drop_gids.reindex(self.edges.index, fill_value=False)
assert len(drop_gids) == len(self.edges)
drop_gids = self.edges.index[drop_gids]
drop_gids = pd.Index(np.asarray(drop_gids))
remaining_gids = self.edges.index.difference(drop_gids)
self.edges = self.edges.loc[remaining_gids].copy()
def add_edges(self, new_edges: pd.DataFrame, **overwrite_cols):
"""
Add edges to this graph.
Inplace.
:param overwrite_cols: pairs of <column, value> to assign to new_edges before adding them.
For example, to set a style.
"""
new_edges = new_edges.copy()
for c, v in overwrite_cols.items():
new_edges[c] = v
missing_cols = self.edges.columns.difference(new_edges.columns)
if len(missing_cols) > 0:
logging.error(f'Missing columns: {list(missing_cols)}. Got: {list(new_edges.columns)}')
return
repeated = self.edges.index.intersection(new_edges.index)
if len(repeated):
logging.warning(f'Repeated edges will be ignored: {repeated}')
new_edges = new_edges.drop(repeated)
valid = (
new_edges['source'].isin(self.nodes.index) &
new_edges['target'].isin(self.nodes.index)
)
if np.any(~valid):
logging.warning(f'{np.count_nonzero(~valid):,g} edges without source or target will be ignored')
new_edges = new_edges[valid]
all_edges = pd.concat([self.edges, new_edges], axis=0, sort=False)
assert all_edges.index.is_unique
self.edges = all_edges
def add_nodes(self, new_nodes: pd.DataFrame, **overwrite_cols):
"""
Add edges to this graph.
Inplace.
:param overwrite_cols: pairs of <column, value> to assign to new_nodes before adding them.
For example, to set a style.
"""
new_nodes = new_nodes.copy()
for c, v in overwrite_cols.items():
new_nodes[c] = v
missing_cols = self.nodes.columns.difference(new_nodes.columns)
if len(missing_cols) > 0:
logging.warning(f'Missing columns: {list(missing_cols)}. Got: {list(new_nodes.columns)}')
repeated = self.nodes.index.intersection(new_nodes.index)
if len(repeated):
logging.warning(f'Repeated nodes will be ignored: {repeated}')
new_nodes = new_nodes.drop(repeated)
all_nodes = pd.concat([self.nodes, new_nodes], axis=0, sort=False)
assert all_nodes.index.is_unique
self.nodes = all_nodes
def add_graph(self, other):
"""
Add another graph to this one.
Inplace.
"""
self.add_nodes(other.nodes)
self.add_edges(other.edges)
def drop_edges_orphan(self):
"""remove edges without a known source or target"""
mask_edges = (
self.edges['source'].isin(self.nodes.index) &
self.edges['target'].isin(self.nodes.index)
)
self.edges = self.edges[mask_edges].copy()
def layout_spring(self, edges_idx=None, iterations=100, source_gid=None, **kwargs):
"""
modify inplace the XY positions of the graph using a spring force algorithm
if source_gid is provided, it will be fixed at coordinate (0, 0)
initial position are taken from the current XY.
"""
fixed = kwargs.pop('fixed', None)
if source_gid is not None:
if fixed is None:
fixed = {}
fixed[source_gid] = (0, 0)
from networkx import spring_layout
pos = spring_layout(
self._get_as_networkx_digraph(edges_idx),
pos={i: (x, y) for i, x, y in self.nodes[['x', 'y']].itertuples()},
fixed=fixed,
iterations=iterations,
**kwargs,
)
self._set_node_xy(pd.DataFrame.from_dict(pos, orient='index', columns=['x', 'y']))
def layout_graphviz(self, edges_idx=None, **kwargs):
"""
modify inplace the XY positions of the graph using a one of the graphviz algorithms
see https://stackoverflow.com/questions/21978487/improving-python-networkx-graph-layout
"""
from networkx.drawing.nx_agraph import graphviz_layout
pos = graphviz_layout(
self._get_as_networkx_digraph(edges_idx),
**kwargs)
self._set_node_xy(pd.DataFrame.from_dict(pos, orient='index', columns=['x', 'y']))
def layout_raster_graphviz(self, all_spikes):
"""
modify inplace the Y positions of the graph (preserving X)
using the 'dot' algorithm (hierarchical)
"""
oldx = self.nodes['x'].copy()
self.layout_graphviz(prog='dot')
self.layout_transpose()
self.layout_reflect('x')
# restore x as time
self.nodes['x'] = oldx
# force y to be different and unique per gid
self.nodes['y'] = self.nodes['y'].astype(np.float)
gids = all_spikes.loc[self.nodes.index, 'gid'].values
yloc = self.nodes['y'].groupby(gids).median().rank(method='first').reindex(gids)
yloc.index = self.nodes.index
self.nodes['y'] = yloc
assert np.all([np.issubdtype(self.nodes[c].dtype, np.number) for c in ['x', 'y']])
def layout_best_fit(self, around, orientation='vertical'):
"""
Place nodes using graphviz.
For 'floating' (disconnected) nodes, force them at the bottom of the plot.
Rotate plot to best use the orientation
:return:
"""
floating_gids = self.get_floating_nodes()
self.layout_graphviz()
center = self._layout_around(around)
# make sure floating gids don't interfere when we are rotationg our graph
# their position will get set afterwards
self.nodes.loc[floating_gids, 'x'] = center[0]
self.nodes.loc[floating_gids, 'y'] = center[1]
self.layout_rotate_to_match(around=center, orientation=orientation)
linked_gids = self.get_linked_nodes()
bbox = (
np.minimum(self.nodes.loc[linked_gids, ['x', 'y']].min(), -10),
np.maximum(self.nodes.loc[linked_gids, ['x', 'y']].max(), +10),
)
x = np.linspace(bbox[0]['x'], bbox[1]['x'], len(floating_gids) + 2)[1:-1]
self.nodes.loc[floating_gids, 'x'] = x
y = bbox[0]['y'] - (bbox[1]['y'] - bbox[0]['y']) * .2
self.nodes.loc[floating_gids, 'y'] = y
def _layout_around(self, around):
"""
translate the "around" param of other functions
:param around:
None: the center of mass of the graph
tuple, list or array: the exact 2d coordinates
anything else: the ID of the node we want to center around
:return: array of 2 elements containing xy position
"""
xy = self.nodes[['x', 'y']].values
if around is None:
around = np.mean(xy, axis=0)
elif isinstance(around, (list, tuple, np.ndarray)):
around = np.array(around)
else:
around = self.nodes.loc[around, ['x', 'y']].values.astype(np.float)
assert np.issubdtype(around.dtype, np.number)
return around
def sort_edges(self, by, ascending=True):
"""sort the edges by the given series. inplace"""
if isinstance(by, str):
by = self.edges[by]
if not isinstance(by, pd.Series):
by = pd.Series(np.asarray(by), by.index)
assert isinstance(by, pd.Series)
by = by.reindex(self.edges.index)
by = by.sort_values(ascending=ascending)
self.edges = self.edges.loc[by.index]
def layout_get_dists(self):
"""
get the distances for every node with respect to (0, 0)
:return:
"""
xy = self.nodes[['x', 'y']].values.T
dists = np.sqrt(np.sum(np.square(xy), axis=0))
return pd.Series(dists, index=self.nodes.index)
def layout_get_angles(self):
"""
get the angles for every node with respect to (0, 0)
:return:
"""
dists = self.layout_get_dists()
xy = self.nodes[['x', 'y']].values.T
vectors = xy / dists.values
angles = np.degrees(np.arctan2(vectors[1], vectors[0]))
return
|
pd.Series(angles, index=self.nodes.index)
|
pandas.Series
|
# pylint: disable=C0103,C0301,E0401
"""Process raw IEM data, output single optimized pickle file"""
import argparse
import os
import sys
import time
from multiprocessing import Pool
from pathlib import Path
import numpy as np
import pandas as pd
import ruptures as rpt
# note: this would not install properly on AWS EB!
# only needed for preprocessing, not the app, so
# it has been removed as a dependency from this project's Pipfile
# bias-correction = {git = "https://github.com/pankajkarman/bias_correction.git"}
# git+https://github.com/pankajkarman/bias_correction.git#egg=bias-correction
from bias_correction import BiasCorrection
from scipy.signal import find_peaks
# this hack is done to alllow import from luts.py in app dir
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from luts import decades
def filter_spurious(station, tname="ts"):
"""Identify and remove spurious observations,
returns filtered data and flagged observations"""
station = station.set_index(tname)
# ignore missing speed data
ws_series = station[~np.isnan(station["sped"])]["sped"]
# identify and remove completely obvious peaks, to help with dip detection
obv_peaks, _ = find_peaks(ws_series, prominence=30, threshold=50)
# if-else in case no obvious spikes
if obv_peaks.shape[0] != 0:
obv_spikes = ws_series[obv_peaks]
ws_series = ws_series.drop(obv_spikes.index)
else:
obv_spikes = pd.Series()
# invert series, identify dips using less strict criteria
dip_peaks, _ = find_peaks(ws_series * -1, prominence=30, threshold=35)
# if-else in case no dips found
if dip_peaks.shape[0] != 0:
dips = ws_series[dip_peaks]
ws_series = ws_series.drop(dips.index)
else:
dips = pd.Series()
# identify less obvious peaks
peaks, properties = find_peaks(ws_series, prominence=25, width=(None, 2))
# combine with obvious peaks if present
if peaks.shape[0] != 0:
# condition on width_heights property to reduce sensitivty (see ancillary/raw_qc.ipynb)
spikes = pd.concat(
[obv_spikes, ws_series[peaks[properties["width_heights"] >= 18]]]
)
else:
spikes = pd.concat([obv_spikes, pd.Series()])
# subset the station data to keep these flagged observations,
# then remove from station data
if dips.size != 0:
dips = station.loc[dips.index]
station = station.drop(dips.index)
else:
# take empty slice
dips = station[station["station"] == "cats"]
if spikes.size != 0:
# subset station data frame for spikes and dips
spikes = station.loc[spikes.index]
station = station.drop(spikes.index)
else:
# take empty slice
dips = station[station["station"] == "dogs"]
return station.reset_index(), spikes.reset_index(), dips.reset_index()
def filter_to_hour(station):
"""Aggregate observations to nearest hour"""
# METAR reports are typically recorded close to a clock hour, either
# on the hour, or something like 1:07, or 12:55, etc. Instead of aggregating
# multiple observations, in this case other SPECIals recorded, just take
# the observation nearest to the hour.
# (this represents the most likely "routine" METAR record),
# and could help avoid potential sampling bias from SPECIals
# done by finding record with minimum timedelta from hour
station["ts"] = station["valid"].dt.round("H")
station["delta_dt"] = abs((station["ts"] - station["valid"]))
min_dt_station = (
station.groupby("ts")
.agg(min_delta_dt=
|
pd.NamedAgg("delta_dt", "min")
|
pandas.NamedAgg
|
import pandas as pd
def make_descriptions(df):
df['split_name'] = df['name'].apply(lambda x: x.split('-'))
df['main_description'] = df['split_name'].apply(lambda x: x[-1])
df['detailed_description'] = (
df['split_name'].apply(
lambda x: ','.join(x[1:-1])
if len(x) > 2 else (
','.join(x[0:-1]) if len(x) > 1 else None
)
)
)
df['more_details'] = (
df['split_name'].apply(lambda x: x[0] if len(x) > 2 else None))
return df
def make_chart_data(df, column):
df_grouped = (
df
.groupby([column])
.duration.sum()
.reset_index()
.sort_values(by=['duration'], ascending=False)
)
# df_grouped['duration'] = (
# df_grouped['duration'].apply(lambda x: round(x, 2))
# )
df_grouped['time'] = (
df_grouped['duration']
.round()
.apply(pd.to_timedelta, unit='s')
).astype(str)
df_grouped.columns = ['name', 'duration', 'time']
return list(df_grouped.T.to_dict().values())
def total_duration(df):
return str(pd.to_timedelta(df.duration.sum().round(), unit='s'))
def total_idle(df):
return str(
pd.to_timedelta(
df[df["main_description"] == 'user_idle']
.duration
.sum()
.round(), unit='s')
)
def metrics_bundle(data, drill_level):
df =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13:
|
pd.Timestamp("2013-05-14 00:00:00")
|
pandas.Timestamp
|
import glob
import datetime
import os
import pandas as pd
import numpy as np
import re
from tkinter import filedialog
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
# pyinstaller --onefile --noconsole --icon GetCSV.ico Arca_GetCSVConverter_2-0-0.py
#for MMW 18-6 spreadsheets
probCol = False
#infer desktop
desktopPath = os.path.expanduser("~/Desktop/")
filelist=['']
probRecords = []
probColls = []
#filename = r'arms_modsonly_May9.csv'
col_names = ["IslandoraContentModel","BCRDHSimpleObjectPID",'imageLink','filename','directory','childKey','title', 'alternativeTitle', 'creator1', 'creator2','creator3']
col_names += ['corporateCreator1','corporateCreator2','contributor1','contributor2','corporateContributor1','publisher_original','publisher_location']
col_names += ['dateCreated','description','extent','topicalSubject1','topicalSubject2','topicalSubject3','topicalSubject4','topicalSubject5']
col_names += ['geographicSubject1','coordinates','personalSubject1','personalSubject2','corporateSubject1','corporateSubject2', 'dateIssued_start']
col_names += ['dateIssued_end','dateRange', 'frequency','genre','genreAuthority','type','internetMediaType','language1','language2','notes']
col_names += ['accessIdentifier','localIdentifier','ISBN','classification','URI']
col_names += ['source','rights','creativeCommons_URI','rightsStatement_URI','relatedItem_title','relatedItem_PID','recordCreationDate','recordOrigin']
pattern1 = r'^[A-Z][a-z]{2}-\d{2}$' #%b-%Y date (e.g. Jun-17)
pattern2 = r'^\d{2}-\d{2}-[1-2]\d{3}$'
contentModels = {
r"info:fedora/islandora:sp_large_image_cmodel": "Large Image",
r"info:fedora/islandora:sp_basic_image": "Basic Image",
r"info:fedora/islandora:bookCModel": "Book",
r"info:fedora/islandora:newspaperIssueCModel":"Newspaper - issue",
r"info:fedora/islandora:newspaperPageCModel":"Newspaper",
r"info:fedora/islandora:sp_PDF":"PDF",
r"info:fedora/islandora:sp-audioCModel":"Audio",
r"info:fedora/islandora:sp_videoCModel":"Video",
r"info:fedora/islandora:sp_compoundCModel":"Compound",
r"info:fedora/ir:citationCModel":"Citation"
}
def browse_button():
# Allow user to select a directory and store it in global var
# called folder_path1
lbl1['text'] = ""
csvname = filedialog.askopenfilename(initialdir = desktopPath,title = "Select file",filetypes = (("csv files","*.csv"),("all files","*.*")))
if ".csv" not in csvname:
lbl1['text'] = "**Please choose a file with a .csv extension!"
else:
filelist[0] = csvname
lbl1['text'] = csvname
def splitMultiHdgs(hdgs):
if pd.notna(hdgs):
hdgs = hdgs.replace("\\,",";")
hdgs = hdgs.split(",")
newhdgs = []
for hdg in hdgs:
newhdg = hdg.replace(";", ",")
newhdgs.append(newhdg)
return newhdgs
else:
return None
def getMultiVals(item, string, df, pd):
hdgs = df.filter(like=string).columns
for hdg in hdgs:
vals = df.at[item.Index,hdg]
if pd.notna(vals):
vals = splitMultiHdgs(vals)
return vals
return None
def convert_date(dt_str, letter_date):
"""
Converts an invalid formatted date into a proper date for ARCA Mods
Correct format: Y-m-d
Fixes:
Incorrect format: m-d-Y
Incorrect format (letter date): m-d e.g. Jun-17
:param dt_str: the date string
:param letter_date: whether the string is a letter date. Letter date is something like Jun-17
:return: the correctly formatted date
"""
if letter_date:
rev_date = datetime.datetime.strptime(dt_str, '%b-%y').strftime('%Y-%m') # convert date to yymm string format
rev_date_pts = rev_date.split("-")
year_num = int(rev_date_pts[0])
if year_num > 1999:
year_num = year_num - 100
year_str = str(year_num)
rev_date_pts[0] = year_str
revised = "-".join(rev_date_pts)
else:
revised = datetime.datetime.strptime(dt_str, '%d-%m-%Y').strftime(
'%Y-%m-%d') # convert date to YY-mm string format
return revised
def sortValues(lst):
for item in lst:
if pd.isna(item):
lst.remove(item)
lst = set(lst)
lst = list(lst)
return lst
def dropNullCols(df):
nullcols = []
for col in df.columns:
notNull = df[col].notna().sum()
if notNull < 1:
nullcols.append(col)
return nullcols
def convert():
probCol = False
df2 = pd.DataFrame(columns = col_names)
df2.append(pd.Series(), ignore_index=True)
f=filelist[0]
# if not os.path.exists(savePath): #if folder does not exist
# os.makedirs(savePath)
try:
df = pd.read_csv(f,dtype = "string", encoding = 'utf_7')
except UnicodeDecodeError:
df = pd.read_csv(f,dtype = "string", encoding = 'utf_8')
nullcols = dropNullCols(df)
df.drop(nullcols, axis=1, inplace=True)
i = 1
for item in df.itertuples():
#PID
df2.at[i, 'BCRDHSimpleObjectPID'] = item.PID
if 'mods_subject_name_personal_namePart_ms' in df.columns:
pNames = item.mods_subject_name_personal_namePart_ms
#ContentModel
cModel = item.RELS_EXT_hasModel_uri_s
df2.at[i,"IslandoraContentModel"] =contentModels[cModel]
#Local Identifier
if 'mods_identifier_local_ms' in df.columns:
localID = item.mods_identifier_local_ms
if pd.notna(localID) and localID != "None":
df2.at[i,'localIdentifier'] = localID
#Access Identifer
if 'mods_identifier_access_ms' in df.columns:
accessID = item.mods_identifier_access_ms
if pd.notna(accessID):
df2.at[i,'accessIdentifier'] = accessID
#Image Link
# Link to Image
PIDparts = item.PID.split(":")
repo = PIDparts[0] #repository code
num = PIDparts[1] #auto-generated accession number
imageLink = "https://bcrdh.ca/islandora/object/" + repo + "%3A" + num
df2.at[i, 'imageLink'] = imageLink
#Title
if 'mods_titleInfo_title_ms' in df.columns:
title = item.mods_titleInfo_title_ms
if pd.notna(title):
df2.at[i,'title'] = title.replace("\,",",")
#Alternative Title
if "mods_titleInfo_alternative_title_ms" in df.columns:
altTitle = item.mods_titleInfo_alternative_title_ms
if pd.notna(altTitle):
df2.at[i, 'alternativeTitle'] = altTitle.replace("\,",",")
#Date
if "mods_originInfo_dateIssued_ms" in df.columns:
dt = item.mods_originInfo_dateIssued_ms
if pd.notna(dt):
if (re.match(pattern1, dt)): #letter date, i.e. Jun-17
dt = convert_date(dt, True)
elif (re.match(pattern2, dt)): #reverse date
dt = convert_date(dt, False)
df2.at[i,'dateCreated'] = dt
#Date Issued Start
if 'mods_originInfo_encoding_w3cdtf_keyDate_yes_point_start_dateIssued_ms' in df.columns:
startDt = item.mods_originInfo_encoding_w3cdtf_keyDate_yes_point_start_dateIssued_ms
if pd.notna(startDt):
df2.at[i,'dateIssued_start'] = startDt
#Date Issued End
if 'mods_originInfo_encoding_w3cdtf_keyDate_yes_point_end_dateIssued_ms' in df.columns:
endDt = item.mods_originInfo_encoding_w3cdtf_keyDate_yes_point_end_dateIssued_ms
if pd.notna(endDt):
df2.at[i,'dateIssued_end'] = startDt
#Publisher
if 'mods_originInfo_publisher_ms' in df.columns:
pub = item.mods_originInfo_publisher_ms
if pd.notna(pub):
df2.at[i, 'publisher_original'] = pub
#Publisher Location
if 'mods_originInfo_place_placeTerm_text_ms' in df.columns:
place = item.mods_originInfo_place_placeTerm_text_ms
if
|
pd.notna(place)
|
pandas.notna
|
"""Rules for conversion between SQL, pandas, and odbc data types."""
import pandas as pd
import pyodbc
rules = pd.DataFrame.from_records(
[
{
"sql_type": "bit",
"sql_category": "boolean",
"min_value": False,
"max_value": True,
"pandas_type": "boolean",
"odbc_type": pyodbc.SQL_BIT,
"odbc_size": 1,
"odbc_precision": 0,
},
{
"sql_type": "tinyint",
"sql_category": "exact numeric",
"min_value": 0,
"max_value": 255,
"pandas_type": "UInt8",
"odbc_type": pyodbc.SQL_TINYINT,
"odbc_size": 1,
"odbc_precision": 0,
},
{
"sql_type": "smallint",
"sql_category": "exact numeric",
"min_value": -(2 ** 15),
"max_value": 2 ** 15 - 1,
"pandas_type": "Int16",
"odbc_type": pyodbc.SQL_SMALLINT,
"odbc_size": 2,
"odbc_precision": 0,
},
{
"sql_type": "int",
"sql_category": "exact numeric",
"min_value": -(2 ** 31),
"max_value": 2 ** 31 - 1,
"pandas_type": "Int32",
"odbc_type": pyodbc.SQL_INTEGER,
"odbc_size": 4,
"odbc_precision": 0,
},
{
"sql_type": "bigint",
"sql_category": "exact numeric",
"min_value": -(2 ** 63),
"max_value": 2 ** 63 - 1,
"pandas_type": "Int64",
"odbc_type": pyodbc.SQL_BIGINT,
"odbc_size": 8,
"odbc_precision": 0,
},
{
"sql_type": "float",
"sql_category": "approximate numeric",
"min_value": -(1.79 ** 308),
"max_value": 1.79 ** 308,
"pandas_type": "float64",
"odbc_type": pyodbc.SQL_FLOAT,
"odbc_size": 8,
"odbc_precision": 53,
},
{
"sql_type": "time",
"sql_category": "date time",
"min_value": pd.Timedelta("00:00:00.0000000"),
"max_value": pd.Timedelta("23:59:59.9999999"),
"pandas_type": "timedelta64[ns]",
"odbc_type": pyodbc.SQL_SS_TIME2,
"odbc_size": 16,
"odbc_precision": 7,
},
{
"sql_type": "date",
"sql_category": "date time",
"min_value": (pd.Timestamp.min + pd.DateOffset(days=1)).date(),
"max_value":
|
pd.Timestamp.max.date()
|
pandas.Timestamp.max.date
|
import pandas as pd
from pandas.api.types import is_numeric_dtype
from pathlib import Path
class Csv2Pandas(object):
"""
Private Properties and Getters
"""
@property
def df(self):
pass
@df.getter
def df(self):
return self.__df
@property
def file_name(self):
pass
@df.getter
def file_name(self):
return self.__file_name
"""
Constructor
"""
def __init__(self, file_path = None):
self.file_path = file_path
def _fetch(self, file_path):
if not self.file_path:
raise AttributeError('file_path is not properly prepared.')
file_name = Path(self.file_path)
self.__file_name = file_name.stem
self.__df = pd.read_csv(file_path ,header=0)
def fetch(self):
self._fetch(self.file_path)
@staticmethod
def property(prop):
return None if
|
pd.isnull(prop)
|
pandas.isnull
|
import vectorbt as vbt
import numpy as np
import pandas as pd
from numba import njit
from datetime import datetime
import pytest
from vectorbt.generic import nb as generic_nb
from vectorbt.generic.enums import range_dt
from tests.utils import record_arrays_close
seed = 42
day_dt = np.timedelta64(86400000000000)
mask = pd.DataFrame([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]), columns=['a', 'b', 'c'])
ts = pd.Series([1., 2., 3., 2., 1.], index=mask.index)
price = pd.DataFrame({
'open': [10, 11, 12, 11, 10],
'high': [11, 12, 13, 12, 11],
'low': [9, 10, 11, 10, 9],
'close': [11, 12, 11, 10, 9]
})
group_by = pd.Index(['g1', 'g1', 'g2'])
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# accessors.py ############# #
class TestAccessors:
def test_indexing(self):
assert mask.vbt.signals['a'].total() == mask['a'].vbt.signals.total()
def test_freq(self):
assert mask.vbt.signals.wrapper.freq == day_dt
assert mask['a'].vbt.signals.wrapper.freq == day_dt
assert mask.vbt.signals(freq='2D').wrapper.freq == day_dt * 2
assert mask['a'].vbt.signals(freq='2D').wrapper.freq == day_dt * 2
assert pd.Series([False, True]).vbt.signals.wrapper.freq is None
assert pd.Series([False, True]).vbt.signals(freq='3D').wrapper.freq == day_dt * 3
assert pd.Series([False, True]).vbt.signals(freq=np.timedelta64(4, 'D')).wrapper.freq == day_dt * 4
@pytest.mark.parametrize(
"test_n",
[1, 2, 3, 4, 5],
)
def test_fshift(self, test_n):
pd.testing.assert_series_equal(mask['a'].vbt.signals.fshift(test_n), mask['a'].shift(test_n, fill_value=False))
np.testing.assert_array_equal(
mask['a'].vbt.signals.fshift(test_n).values,
generic_nb.fshift_1d_nb(mask['a'].values, test_n, fill_value=False)
)
pd.testing.assert_frame_equal(mask.vbt.signals.fshift(test_n), mask.shift(test_n, fill_value=False))
@pytest.mark.parametrize(
"test_n",
[1, 2, 3, 4, 5],
)
def test_bshift(self, test_n):
pd.testing.assert_series_equal(
mask['a'].vbt.signals.bshift(test_n),
mask['a'].shift(-test_n, fill_value=False))
np.testing.assert_array_equal(
mask['a'].vbt.signals.bshift(test_n).values,
generic_nb.bshift_1d_nb(mask['a'].values, test_n, fill_value=False)
)
pd.testing.assert_frame_equal(mask.vbt.signals.bshift(test_n), mask.shift(-test_n, fill_value=False))
def test_empty(self):
pd.testing.assert_series_equal(
pd.Series.vbt.signals.empty(5, index=np.arange(10, 15), name='a'),
pd.Series(np.full(5, False), index=np.arange(10, 15), name='a')
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.empty((5, 3), index=np.arange(10, 15), columns=['a', 'b', 'c']),
pd.DataFrame(np.full((5, 3), False), index=np.arange(10, 15), columns=['a', 'b', 'c'])
)
pd.testing.assert_series_equal(
pd.Series.vbt.signals.empty_like(mask['a']),
pd.Series(np.full(mask['a'].shape, False), index=mask['a'].index, name=mask['a'].name)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.empty_like(mask),
pd.DataFrame(np.full(mask.shape, False), index=mask.index, columns=mask.columns)
)
def test_generate(self):
@njit
def choice_func_nb(from_i, to_i, col, n):
if col == 0:
return np.arange(from_i, to_i)
elif col == 1:
return np.full(1, from_i)
else:
return np.full(1, to_i - n)
pd.testing.assert_series_equal(
pd.Series.vbt.signals.generate(5, choice_func_nb, 1, index=mask['a'].index, name=mask['a'].name),
pd.Series(
np.array([True, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
with pytest.raises(Exception):
_ = pd.Series.vbt.signals.generate((5, 2), choice_func_nb, 1)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate(
(5, 3), choice_func_nb, 1, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[True, True, False],
[True, False, False],
[True, False, False],
[True, False, False],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate(
(5, 3), choice_func_nb, 1, pick_first=True, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[True, True, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
def test_generate_both(self):
@njit
def entry_func_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
return temp_int[:1]
@njit
def exit_func_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
return temp_int[:1]
temp_int = np.empty((mask.shape[0],), dtype=np.int_)
en, ex = pd.Series.vbt.signals.generate_both(
5, entry_func_nb, (temp_int,), exit_func_nb, (temp_int,),
index=mask['a'].index, name=mask['a'].name)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, False, True, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, False, True, False]),
index=mask['a'].index,
name=mask['a'].name
)
)
en, ex = pd.DataFrame.vbt.signals.generate_both(
(5, 3), entry_func_nb, (temp_int,), exit_func_nb, (temp_int,),
index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[True, True, True],
[False, False, False],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[False, False, False],
[True, True, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.Series.vbt.signals.generate_both(
(5,), entry_func_nb, (temp_int,), exit_func_nb, (temp_int,),
index=mask['a'].index, name=mask['a'].name, entry_wait=1, exit_wait=0)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([True, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
en, ex = pd.Series.vbt.signals.generate_both(
(5,), entry_func_nb, (temp_int,), exit_func_nb, (temp_int,),
index=mask['a'].index, name=mask['a'].name, entry_wait=0, exit_wait=1)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
@njit
def entry_func2_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
if from_i + 1 < to_i:
temp_int[1] = from_i + 1
return temp_int[:2]
return temp_int[:1]
@njit
def exit_func2_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
if from_i + 1 < to_i:
temp_int[1] = from_i + 1
return temp_int[:2]
return temp_int[:1]
en, ex = pd.DataFrame.vbt.signals.generate_both(
(5, 3), entry_func2_nb, (temp_int,), exit_func2_nb, (temp_int,),
entry_pick_first=False, exit_pick_first=False,
index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[False, False, False],
[False, False, False],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[True, True, True],
[True, True, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
def test_generate_exits(self):
@njit
def choice_func_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
return temp_int[:1]
temp_int = np.empty((mask.shape[0],), dtype=np.int_)
pd.testing.assert_series_equal(
mask['a'].vbt.signals.generate_exits(choice_func_nb, temp_int),
pd.Series(
np.array([False, True, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_exits(choice_func_nb, temp_int),
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_exits(choice_func_nb, temp_int, wait=0),
pd.DataFrame(
np.array([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
]),
index=mask.index,
columns=mask.columns
)
)
@njit
def choice_func2_nb(from_i, to_i, col, temp_int):
for i in range(from_i, to_i):
temp_int[i - from_i] = i
return temp_int[:to_i - from_i]
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_exits(choice_func2_nb, temp_int, until_next=False, pick_first=False),
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[True, True, False],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
mask2 = pd.Series([True, True, True, True, True], index=mask.index)
pd.testing.assert_series_equal(
mask2.vbt.signals.generate_exits(choice_func_nb, temp_int, until_next=False, skip_until_exit=True),
pd.Series(
np.array([False, True, False, True, False]),
index=mask.index
)
)
def test_clean(self):
entries = pd.DataFrame([
[True, False, True],
[True, False, False],
[True, True, True],
[False, True, False],
[False, True, True]
], index=mask.index, columns=mask.columns)
exits = pd.Series([True, False, True, False, True], index=mask.index)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(),
pd.DataFrame(
np.array([
[True, False, True],
[False, False, False],
[False, True, True],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.clean(entries),
pd.DataFrame(
np.array([
[True, False, True],
[False, False, False],
[False, True, True],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits)[0],
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits)[1],
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits, entry_first=False)[0],
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits, entry_first=False)[1],
pd.DataFrame(
np.array([
[False, True, False],
[False, False, False],
[False, False, False],
[False, False, False],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.clean(entries, exits)[0],
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.clean(entries, exits)[1],
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
with pytest.raises(Exception):
_ = pd.Series.vbt.signals.clean(entries, entries, entries)
def test_generate_random(self):
pd.testing.assert_series_equal(
pd.Series.vbt.signals.generate_random(
5, n=3, seed=seed, index=mask['a'].index, name=mask['a'].name),
pd.Series(
np.array([False, True, True, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
with pytest.raises(Exception):
_ = pd.Series.vbt.signals.generate_random((5, 2), n=3)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), n=3, seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[False, False, True],
[True, True, True],
[True, True, False],
[False, True, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), n=[0, 1, 2], seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[False, False, True],
[False, False, True],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_series_equal(
pd.Series.vbt.signals.generate_random(
5, prob=0.5, seed=seed, index=mask['a'].index, name=mask['a'].name),
pd.Series(
np.array([True, False, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
with pytest.raises(Exception):
_ = pd.Series.vbt.signals.generate_random((5, 2), prob=3)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), prob=0.5, seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[True, True, True],
[False, True, False],
[False, False, False],
[False, False, True],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), prob=[0., 0.5, 1.], seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[False, True, True],
[False, True, True],
[False, False, True],
[False, False, True],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
with pytest.raises(Exception):
pd.DataFrame.vbt.signals.generate_random((5, 3))
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), prob=[0., 0.5, 1.], pick_first=True, seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[False, True, True],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
def test_generate_random_both(self):
# n
en, ex = pd.Series.vbt.signals.generate_random_both(
5, n=2, seed=seed, index=mask['a'].index, name=mask['a'].name)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, False, True, False, False]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), n=2, seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[True, True, False],
[False, False, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[False, False, False],
[False, True, False],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), n=[0, 1, 2], seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[False, False, True],
[False, True, False],
[False, False, False],
[False, False, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, False, True],
[False, False, False],
[False, True, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both((2, 3), n=2, seed=seed, entry_wait=1, exit_wait=0)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
])
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True]
])
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both((3, 3), n=2, seed=seed, entry_wait=0, exit_wait=1)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[False, False, False]
])
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[True, True, True],
])
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both((7, 3), n=2, seed=seed, entry_wait=2, exit_wait=2)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, False],
[True, True, True],
[False, False, False],
[False, False, False]
])
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, False],
[True, True, True]
])
)
)
n = 10
a = np.full(n * 2, 0.)
for i in range(10000):
en, ex = pd.Series.vbt.signals.generate_random_both(1000, n, entry_wait=2, exit_wait=2)
_a = np.empty((n * 2,), dtype=np.int_)
_a[0::2] = np.flatnonzero(en)
_a[1::2] = np.flatnonzero(ex)
a += _a
greater = a > 10000000 / (2 * n + 1) * np.arange(0, 2 * n)
less = a < 10000000 / (2 * n + 1) * np.arange(2, 2 * n + 2)
assert np.all(greater & less)
# probs
en, ex = pd.Series.vbt.signals.generate_random_both(
5, entry_prob=0.5, exit_prob=1., seed=seed, index=mask['a'].index, name=mask['a'].name)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, False, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, False, False, False]),
index=mask['a'].index,
name=mask['a'].name
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=0.5, exit_prob=1., seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=[0., 0.5, 1.], exit_prob=[0., 0.5, 1.],
seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[False, True, True],
[False, False, False],
[False, False, True],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, True, True],
[False, False, False],
[False, False, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=1., exit_prob=1., exit_wait=0,
seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=1., exit_prob=1., entry_pick_first=False, exit_pick_first=True,
seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=1., exit_prob=1., entry_pick_first=True, exit_pick_first=False,
seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
# none
with pytest.raises(Exception):
pd.DataFrame.vbt.signals.generate_random((5, 3))
def test_generate_random_exits(self):
pd.testing.assert_series_equal(
mask['a'].vbt.signals.generate_random_exits(seed=seed),
pd.Series(
np.array([False, False, True, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(seed=seed),
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[True, True, False],
[False, False, False],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(seed=seed, wait=0),
pd.DataFrame(
np.array([
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, True],
[True, True, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_series_equal(
mask['a'].vbt.signals.generate_random_exits(prob=1., seed=seed),
pd.Series(
np.array([False, True, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(prob=1., seed=seed),
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(prob=[0., 0.5, 1.], seed=seed),
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, True, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(prob=1., wait=0, seed=seed),
pd.DataFrame(
np.array([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(prob=1., until_next=False, seed=seed),
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
def test_generate_stop_exits(self):
e = pd.Series([True, False, False, False, False, False])
t = pd.Series([2, 3, 4, 3, 2, 1]).astype(np.float64)
# stop loss
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(t, -0.1),
pd.Series(np.array([False, False, False, False, False, True]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True),
pd.Series(np.array([False, False, False, True, False, False]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, pick_first=False),
pd.Series(np.array([False, False, False, True, True, True]))
)
pd.testing.assert_frame_equal(
e.vbt.signals.generate_stop_exits(t.vbt.tile(3), [np.nan, -0.5, -1.], trailing=True, pick_first=False),
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, True, False],
[False, True, False]
]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, exit_wait=3),
pd.Series(np.array([False, False, False, False, True, False]))
)
# take profit
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(4 - t, 0.1),
pd.Series(np.array([False, False, False, False, False, True]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(4 - t, 0.1, trailing=True),
pd.Series(np.array([False, False, False, True, False, False]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(4 - t, 0.1, trailing=True, pick_first=False),
pd.Series(np.array([False, False, False, True, True, True]))
)
pd.testing.assert_frame_equal(
e.vbt.signals.generate_stop_exits((4 - t).vbt.tile(3), [np.nan, 0.5, 1.], trailing=True, pick_first=False),
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, True, True],
[False, True, True],
[False, True, True]
]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(4 - t, 0.1, trailing=True, exit_wait=3),
pd.Series(np.array([False, False, False, False, True, False]))
)
# chain
e = pd.Series([True, True, True, True, True, True])
en, ex = e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, chain=True)
pd.testing.assert_series_equal(
en,
pd.Series(np.array([True, False, False, False, True, False]))
)
pd.testing.assert_series_equal(
ex,
pd.Series(np.array([False, False, False, True, False, True]))
)
en, ex = e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, entry_wait=2, chain=True)
pd.testing.assert_series_equal(
en,
pd.Series(np.array([True, False, False, False, False, True]))
)
pd.testing.assert_series_equal(
ex,
pd.Series(np.array([False, False, False, True, False, False]))
)
en, ex = e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, exit_wait=2, chain=True)
pd.testing.assert_series_equal(
en,
pd.Series(np.array([True, False, False, False, True, False]))
)
pd.testing.assert_series_equal(
ex,
pd.Series(np.array([False, False, False, True, False, False]))
)
# until_next and pick_first
e2 = pd.Series([True, True, True, True, True, True])
t2 = pd.Series([6, 5, 4, 3, 2, 1]).astype(np.float64)
ex = e2.vbt.signals.generate_stop_exits(t2, -0.1, until_next=False, pick_first=False)
pd.testing.assert_series_equal(
ex,
pd.Series(np.array([False, True, True, True, True, True]))
)
def test_generate_ohlc_stop_exits(self):
with pytest.raises(Exception):
_ = mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=-0.1)
with pytest.raises(Exception):
_ = mask.vbt.signals.generate_ohlc_stop_exits(ts, tp_stop=-0.1)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, -0.1),
mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=0.1)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, -0.1, trailing=True),
mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=0.1, sl_trail=True)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, 0.1),
mask.vbt.signals.generate_ohlc_stop_exits(ts, tp_stop=0.1)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, 0.1),
mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=0.1, reverse=True)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, 0.1, trailing=True),
mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=0.1, sl_trail=True, reverse=True)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, -0.1),
mask.vbt.signals.generate_ohlc_stop_exits(ts, tp_stop=0.1, reverse=True)
)
def _test_ohlc_stop_exits(**kwargs):
out_dict = {'stop_price': np.nan, 'stop_type': -1}
result = mask.vbt.signals.generate_ohlc_stop_exits(
price['open'], price['high'], price['low'], price['close'],
out_dict=out_dict, **kwargs
)
if isinstance(result, tuple):
_, ex = result
else:
ex = result
return result, out_dict['stop_price'], out_dict['stop_type']
ex, stop_price, stop_type = _test_ohlc_stop_exits()
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(sl_stop=0.1)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, True],
[True, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 10.8],
[9.9, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, 0],
[0, -1, -1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(sl_stop=0.1, sl_trail=True)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, True, True],
[True, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 11.7, 10.8],
[9.9, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1],
[-1, 1, 1],
[1, -1, -1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(tp_stop=0.1)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, False],
[False, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[11.0, np.nan, np.nan],
[np.nan, 12.1, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[2, -1, -1],
[-1, 2, -1],
[-1, -1, -1],
[-1, -1, -1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(sl_stop=0.1, sl_trail=True, tp_stop=0.1)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[11.0, np.nan, np.nan],
[np.nan, 12.1, np.nan],
[np.nan, np.nan, 10.8],
[9.9, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[2, -1, -1],
[-1, 2, -1],
[-1, -1, 1],
[1, -1, -1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(
sl_stop=[np.nan, 0.1, 0.2], sl_trail=True, tp_stop=[np.nan, 0.1, 0.2])
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, True, False],
[False, False, False],
[False, False, True]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 12.1, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 9.6]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[-1, -1, -1],
[-1, 2, -1],
[-1, -1, -1],
[-1, -1, 1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(sl_stop=0.1, sl_trail=True, tp_stop=0.1, exit_wait=0)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, True],
[True, True, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[9.0, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 12.1, np.nan],
[np.nan, np.nan, 11.7],
[10.8, 9.0, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[1, -1, -1],
[-1, -1, -1],
[-1, 2, -1],
[-1, -1, 1],
[1, 1, -1]
]), index=mask.index, columns=mask.columns)
)
(en, ex), stop_price, stop_type = _test_ohlc_stop_exits(
sl_stop=0.1, sl_trail=True, tp_stop=0.1, chain=True)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(np.array([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[11.0, np.nan, np.nan],
[np.nan, 12.1, np.nan],
[np.nan, np.nan, 10.8],
[9.9, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[2, -1, -1],
[-1, 2, -1],
[-1, -1, 1],
[1, -1, -1]
]), index=mask.index, columns=mask.columns)
)
def test_between_ranges(self):
ranges = mask.vbt.signals.between_ranges()
record_arrays_close(
ranges.values,
np.array([
(0, 0, 0, 3, 1), (1, 1, 1, 4, 1)
], dtype=range_dt)
)
assert ranges.wrapper == mask.vbt.wrapper
mask2 = pd.DataFrame([
[True, True, True],
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, False]
], index=mask.index, columns=mask.columns)
other_mask = pd.DataFrame([
[False, False, False],
[True, False, False],
[True, True, False],
[False, True, True],
[False, False, True]
], index=mask.index, columns=mask.columns)
ranges = mask2.vbt.signals.between_ranges(other=other_mask)
record_arrays_close(
ranges.values,
np.array([
(0, 0, 0, 1, 1), (1, 0, 1, 1, 1), (2, 1, 0, 2, 1),
(3, 1, 1, 2, 1), (4, 2, 0, 3, 1), (5, 2, 1, 3, 1)
], dtype=range_dt)
)
assert ranges.wrapper == mask2.vbt.wrapper
ranges = mask2.vbt.signals.between_ranges(other=other_mask, from_other=True)
record_arrays_close(
ranges.values,
np.array([
(0, 0, 1, 1, 1), (1, 0, 1, 2, 1), (2, 1, 1, 2, 1),
(3, 1, 1, 3, 1), (4, 2, 1, 3, 1), (5, 2, 1, 4, 1)
], dtype=range_dt)
)
assert ranges.wrapper == mask2.vbt.wrapper
def test_partition_ranges(self):
mask2 = pd.DataFrame([
[False, False, False],
[True, False, False],
[True, True, False],
[False, True, True],
[True, False, True]
], index=mask.index, columns=mask.columns)
ranges = mask2.vbt.signals.partition_ranges()
record_arrays_close(
ranges.values,
np.array([
(0, 0, 1, 3, 1), (1, 0, 4, 4, 0), (2, 1, 2, 4, 1), (3, 2, 3, 4, 0)
], dtype=range_dt)
)
assert ranges.wrapper == mask2.vbt.wrapper
def test_between_partition_ranges(self):
mask2 = pd.DataFrame([
[True, False, False],
[True, True, False],
[False, True, True],
[True, False, True],
[False, True, False]
], index=mask.index, columns=mask.columns)
ranges = mask2.vbt.signals.between_partition_ranges()
record_arrays_close(
ranges.values,
np.array([
(0, 0, 1, 3, 1), (1, 1, 2, 4, 1)
], dtype=range_dt)
)
assert ranges.wrapper == mask2.vbt.wrapper
def test_pos_rank(self):
pd.testing.assert_series_equal(
(~mask['a']).vbt.signals.pos_rank(),
pd.Series([-1, 0, 1, -1, 0], index=mask['a'].index, name=mask['a'].name)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.pos_rank(),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 1],
[1, 0, -1],
[-1, 1, 0],
[0, -1, 1]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.pos_rank(after_false=True),
pd.DataFrame(
np.array([
[-1, -1, -1],
[0, -1, -1],
[1, 0, -1],
[-1, 1, 0],
[0, -1, 1]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.pos_rank(allow_gaps=True),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 1],
[1, 1, -1],
[-1, 2, 2],
[2, -1, 3]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.pos_rank(reset_by=mask['a'], allow_gaps=True),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 1],
[1, 1, -1],
[-1, 0, 0],
[0, -1, 1]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.pos_rank(reset_by=mask, allow_gaps=True),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 1],
[1, 0, -1],
[-1, 1, 0],
[0, -1, 1]
]),
index=mask.index,
columns=mask.columns
)
)
def test_partition_pos_rank(self):
pd.testing.assert_series_equal(
(~mask['a']).vbt.signals.partition_pos_rank(),
pd.Series([-1, 0, 0, -1, 1], index=mask['a'].index, name=mask['a'].name)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.partition_pos_rank(),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 0],
[0, 1, -1],
[-1, 1, 1],
[1, -1, 1]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.partition_pos_rank(after_false=True),
pd.DataFrame(
np.array([
[-1, -1, -1],
[0, -1, -1],
[0, 0, -1],
[-1, 0, 0],
[1, -1, 0]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.partition_pos_rank(reset_by=mask['a']),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 0],
[0, 1, -1],
[-1, 0, 0],
[0, -1, 0]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.partition_pos_rank(reset_by=mask),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 0],
[0, 0, -1],
[-1, 0, 0],
[0, -1, 0]
]),
index=mask.index,
columns=mask.columns
)
)
def test_pos_rank_fns(self):
pd.testing.assert_frame_equal(
(~mask).vbt.signals.first(),
pd.DataFrame(
np.array([
[False, True, True],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.nth(1),
pd.DataFrame(
np.array([
[False, False, False],
[False, False, True],
[True, False, False],
[False, True, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.nth(2),
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.from_nth(0),
pd.DataFrame(
np.array([
[False, True, True],
[True, False, True],
[True, True, False],
[False, True, True],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
def test_pos_rank_mapped(self):
mask2 = pd.DataFrame([
[True, False, False],
[True, True, False],
[False, True, True],
[True, False, True],
[False, True, False]
], index=mask.index, columns=mask.columns)
mapped = mask2.vbt.signals.pos_rank_mapped()
np.testing.assert_array_equal(
mapped.values,
np.array([0, 1, 0, 0, 1, 0, 0, 1])
)
np.testing.assert_array_equal(
mapped.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2])
)
np.testing.assert_array_equal(
mapped.idx_arr,
np.array([0, 1, 3, 1, 2, 4, 2, 3])
)
assert mapped.wrapper == mask2.vbt.wrapper
def test_partition_pos_rank_mapped(self):
mask2 = pd.DataFrame([
[True, False, False],
[True, True, False],
[False, True, True],
[True, False, True],
[False, True, False]
], index=mask.index, columns=mask.columns)
mapped = mask2.vbt.signals.partition_pos_rank_mapped()
np.testing.assert_array_equal(
mapped.values,
np.array([0, 0, 1, 0, 0, 1, 0, 0])
)
np.testing.assert_array_equal(
mapped.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2])
)
np.testing.assert_array_equal(
mapped.idx_arr,
np.array([0, 1, 3, 1, 2, 4, 2, 3])
)
assert mapped.wrapper == mask2.vbt.wrapper
def test_nth_index(self):
assert mask['a'].vbt.signals.nth_index(0) == pd.Timestamp('2020-01-01 00:00:00')
pd.testing.assert_series_equal(
mask.vbt.signals.nth_index(0),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-02 00:00:00'),
pd.Timestamp('2020-01-03 00:00:00')
], index=mask.columns, name='nth_index', dtype='datetime64[ns]')
)
pd.testing.assert_series_equal(
mask.vbt.signals.nth_index(-1),
pd.Series([
pd.Timestamp('2020-01-04 00:00:00'),
pd.Timestamp('2020-01-05 00:00:00'),
pd.Timestamp('2020-01-03 00:00:00')
], index=mask.columns, name='nth_index', dtype='datetime64[ns]')
)
pd.testing.assert_series_equal(
mask.vbt.signals.nth_index(-2),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-02 00:00:00'),
np.nan
], index=mask.columns, name='nth_index', dtype='datetime64[ns]')
)
pd.testing.assert_series_equal(
mask.vbt.signals.nth_index(0, group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-03 00:00:00')
], index=['g1', 'g2'], name='nth_index', dtype='datetime64[ns]')
)
pd.testing.assert_series_equal(
mask.vbt.signals.nth_index(-1, group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-05 00:00:00'),
pd.Timestamp('2020-01-03 00:00:00')
], index=['g1', 'g2'], name='nth_index', dtype='datetime64[ns]')
)
def test_norm_avg_index(self):
assert mask['a'].vbt.signals.norm_avg_index() == -0.25
pd.testing.assert_series_equal(
mask.vbt.signals.norm_avg_index(),
pd.Series([-0.25, 0.25, 0.0], index=mask.columns, name='norm_avg_index')
)
pd.testing.assert_series_equal(
mask.vbt.signals.norm_avg_index(group_by=group_by),
pd.Series([0.0, 0.0], index=['g1', 'g2'], name='norm_avg_index')
)
def test_index_mapped(self):
mapped = mask.vbt.signals.index_mapped()
np.testing.assert_array_equal(
mapped.values,
np.array([0, 3, 1, 4, 2])
)
np.testing.assert_array_equal(
mapped.col_arr,
np.array([0, 0, 1, 1, 2])
)
np.testing.assert_array_equal(
mapped.idx_arr,
np.array([0, 3, 1, 4, 2])
)
assert mapped.wrapper == mask.vbt.wrapper
def test_total(self):
assert mask['a'].vbt.signals.total() == 2
pd.testing.assert_series_equal(
mask.vbt.signals.total(),
pd.Series([2, 2, 1], index=mask.columns, name='total')
)
pd.testing.assert_series_equal(
mask.vbt.signals.total(group_by=group_by),
pd.Series([4, 1], index=['g1', 'g2'], name='total')
)
def test_rate(self):
assert mask['a'].vbt.signals.rate() == 0.4
pd.testing.assert_series_equal(
mask.vbt.signals.rate(),
pd.Series([0.4, 0.4, 0.2], index=mask.columns, name='rate')
)
pd.testing.assert_series_equal(
mask.vbt.signals.rate(group_by=group_by),
pd.Series([0.4, 0.2], index=['g1', 'g2'], name='rate')
)
def test_total_partitions(self):
assert mask['a'].vbt.signals.total_partitions() == 2
pd.testing.assert_series_equal(
mask.vbt.signals.total_partitions(),
pd.Series([2, 2, 1], index=mask.columns, name='total_partitions')
)
pd.testing.assert_series_equal(
mask.vbt.signals.total_partitions(group_by=group_by),
pd.Series([4, 1], index=['g1', 'g2'], name='total_partitions')
)
def test_partition_rate(self):
assert mask['a'].vbt.signals.partition_rate() == 1.0
pd.testing.assert_series_equal(
mask.vbt.signals.partition_rate(),
pd.Series([1.0, 1.0, 1.0], index=mask.columns, name='partition_rate')
)
pd.testing.assert_series_equal(
mask.vbt.signals.partition_rate(group_by=group_by),
pd.Series([1.0, 1.0], index=['g1', 'g2'], name='partition_rate')
)
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Total', 'Rate [%]', 'First Index',
'Last Index', 'Norm Avg Index [-1, 1]', 'Distance: Min',
'Distance: Max', 'Distance: Mean', 'Distance: Std', 'Total Partitions',
'Partition Rate [%]', 'Partition Length: Min', 'Partition Length: Max',
'Partition Length: Mean', 'Partition Length: Std',
'Partition Distance: Min', 'Partition Distance: Max',
'Partition Distance: Mean', 'Partition Distance: Std'
], dtype='object')
pd.testing.assert_series_equal(
mask.vbt.signals.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'),
1.6666666666666667,
33.333333333333336,
pd.Timestamp('2020-01-02 00:00:00'),
pd.Timestamp('2020-01-04 00:00:00'),
0.0,
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
np.nan,
1.6666666666666667,
100.0,
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
np.nan
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mask.vbt.signals.stats(column='a'),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'),
2,
40.0,
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-04 00:00:00'),
-0.25,
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
np.nan,
2,
100.0,
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
np.nan
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mask.vbt.signals.stats(column='a', settings=dict(to_timedelta=False)),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'), 5, 2, 40.0,
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-04 00:00:00'), -0.25, 3.0,
3.0, 3.0, np.nan, 2, 100.0, 1.0, 1.0, 1.0, 0.0, 3.0, 3.0, 3.0, np.nan
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mask.vbt.signals.stats(column='a', settings=dict(other=mask['b'], from_other=True)),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'),
2,
40.0,
0,
0.0,
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-04 00:00:00'),
-0.25,
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'),
2,
100.0,
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
np.nan
],
index=pd.Index([
'Start', 'End', 'Period', 'Total', 'Rate [%]', 'Total Overlapping',
'Overlapping Rate [%]', 'First Index', 'Last Index',
'Norm Avg Index [-1, 1]', 'Distance <- Other: Min',
'Distance <- Other: Max', 'Distance <- Other: Mean',
'Distance <- Other: Std', 'Total Partitions', 'Partition Rate [%]',
'Partition Length: Min', 'Partition Length: Max',
'Partition Length: Mean', 'Partition Length: Std',
'Partition Distance: Min', 'Partition Distance: Max',
'Partition Distance: Mean', 'Partition Distance: Std'
], dtype='object'),
name='a'
)
)
pd.testing.assert_series_equal(
mask.vbt.signals.stats(column='g1', group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'),
4,
40.0,
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-05 00:00:00'),
0.0,
pd.Timedelta('3 days 00:00:00'),
|
pd.Timedelta('3 days 00:00:00')
|
pandas.Timedelta
|
import numpy as np
import pandas as pd
class NB:
def __init__(self):
self.target = "" # name of the label
self.columns = pd.Index([]) # name of the features
self.num_cols = pd.Index([]) # name of numerical features
self.cat_cols = pd.Index([]) # name of categorical features
self.py = {} # P(y)
self.px = {} # P(xi|y)
def train(self, X: pd.DataFrame, y: pd.Series):
# Sanity check
assert all(X.index == y.index), "Indices mismatch"
# Drop rows with missing data
Xy = pd.concat([X, y], axis=1).dropna(axis=0, how='any')
_X, _y = Xy[X.columns], Xy[y.name]
# Initialization
self.target = _y.name
self.columns = _X.columns
self.num_cols = _X.select_dtypes(include='number').columns
self.cat_cols = _X.select_dtypes(exclude='number').columns
self.cat_cols = self.columns.drop(self.num_cols)
# Estimate log P(y)
y_counts = _y.value_counts()
y_total = y_counts.sum()
self.py = {y_val: y_count / y_total for y_val, y_count in y_counts.iteritems()}
# Estimate log P(xi|y)
for y_val, py in self.py.items():
self.px[y_val] = {}
X_given_y = _X[_y == y_val]
# Split X_given_y into numerical and categorical parts
X_num_given_y = X_given_y[self.num_cols]
X_cat_given_y = X_given_y[self.cat_cols]
# Numerical: mean and standard deviation
self.px[y_val]['numerical'] = X_num_given_y.describe().loc[['mean', 'std'], :]
# Categorical: frequency
self.px[y_val]['categorical'] = {feature: xi.value_counts(normalize=True)
for feature, xi in X_cat_given_y.iteritems()}
def predict(self, X: pd.DataFrame, return_LL: bool = False):
r"""Predict the labels of all the instances in a feature matrix
Args:
X: pd.DataFrame
return_LL: bool
If set to True, return the log-posterior
Returns:
pred (return_LL=False)
pred, LL (return_LL=True)
"""
pred = []
LL = []
for index, x in X.iterrows():
# Compute log-likelihood
ll = self.LL_numerical(x) + self.LL_categorical(x)
# Find the most likely label
ll.sort_values(ascending=False, inplace=True)
LL.append(ll)
if np.inf in ll.values: # xi contains values not included by the training set
# Break ties by comparing P(y)
pred.append(pd.Series(self.py).sort_values(ascending=False).index[0])
else:
pred.append(ll.index[0])
# Clean up LL and pred
LL =
|
pd.concat(LL, axis=1)
|
pandas.concat
|
from ...utils import constants
import pandas as pd
import geopandas as gpd
import numpy as np
import shapely
import pytest
from contextlib import ExitStack
from sklearn.metrics import mean_absolute_error
from ...preprocessing import detection, clustering
from ...models.sts_epr import STS_epr
from ...core.trajectorydataframe import TrajDataFrame
from ...models.markov_diary_generator import MarkovDiaryGenerator
def global_variables():
# tessellation
tess_polygons = [[[7.481, 45.184],
[7.481, 45.216],
[7.526, 45.216],
[7.526, 45.184],
[7.481, 45.184]],
[[7.481, 45.216],
[7.481, 45.247],
[7.526, 45.247],
[7.526, 45.216],
[7.481, 45.216]],
[[7.526, 45.184],
[7.526, 45.216],
[7.571, 45.216],
[7.571, 45.184],
[7.526, 45.184]],
[[7.526, 45.216],
[7.526, 45.247],
[7.571, 45.247],
[7.571, 45.216],
[7.526, 45.216]]]
geom = [shapely.geometry.Polygon(p) for p in tess_polygons]
tessellation = gpd.GeoDataFrame(geometry=geom, crs="EPSG:4326")
tessellation = tessellation.reset_index().rename(columns={"index": constants.TILE_ID})
relevance = np.random.randint(5, 10, size=len(tessellation))
tessellation[constants.RELEVANCE] = relevance
social_graph = [[0,1],[0,2],[0,3],[1,3],[2,4]]
# mobility diary generator
lats_lngs = np.array([[39.978253, 116.3272755],
[40.013819, 116.306532],
[39.878987, 116.1266865],
[40.013819, 116.306532],
[39.97958, 116.313649],
[39.978696, 116.3262205],
[39.98153775, 116.31079],
[39.978161, 116.3272425],
[38.978161, 115.3272425]])
traj = pd.DataFrame(lats_lngs, columns=[constants.LATITUDE, constants.LONGITUDE])
traj[constants.DATETIME] = pd.to_datetime([
'20130101 8:34:04', '20130101 10:34:08', '20130105 10:34:08',
'20130110 12:34:15', '20130101 1:34:28', '20130101 3:34:54',
'20130101 4:34:55', '20130105 5:29:12', '20130115 00:29:12'])
traj[constants.UID] = [1 for _ in range(5)] + [2 for _ in range(3)] + [3]
tdf = TrajDataFrame(traj)
ctdf = clustering.cluster(tdf)
mdg = MarkovDiaryGenerator()
mdg.fit(ctdf, 3, lid='cluster')
return tessellation, social_graph, mdg
tessellation, social_graph, mdg = global_variables()
sts_epr = STS_epr()
@pytest.mark.parametrize('start_date', [pd.to_datetime('2020/01/01 08:00:00')])
@pytest.mark.parametrize('end_date', [pd.to_datetime('2020/01/10 08:00:00')])
@pytest.mark.parametrize('spatial_tessellation', [tessellation])
@pytest.mark.parametrize('diary_generator', [mdg])
@pytest.mark.parametrize('social_graph', [social_graph, 'random'])
@pytest.mark.parametrize('n_agents', [1,5])
@pytest.mark.parametrize('rsl', [True, False])
@pytest.mark.parametrize('relevance_column',['relevance'])
@pytest.mark.parametrize('random_state', [2])
@pytest.mark.parametrize('show_progress', [True])
# First test set: CORRECT arguments, no ERRORS expected (#test: 8)
def test_sts_generate_success(start_date, end_date, spatial_tessellation, diary_generator,
social_graph, n_agents, rsl, relevance_column, random_state, show_progress):
sts_epr = STS_epr()
tdf = sts_epr.generate(start_date=start_date, end_date=end_date, spatial_tessellation=spatial_tessellation,
social_graph=social_graph, diary_generator=diary_generator, n_agents= n_agents, rsl=rsl,
relevance_column=relevance_column, random_state=random_state, show_progress=show_progress)
assert isinstance(tdf, TrajDataFrame)
# Second test set: WRONG arguments, expected to FAIL
# test 2.1: wrong n_agents (#test: 3)
@pytest.mark.parametrize('start_date', [pd.to_datetime('2020/01/01 08:00:00')])
@pytest.mark.parametrize('end_date', [pd.to_datetime('2020/01/10 08:00:00')])
@pytest.mark.parametrize('spatial_tessellation', [tessellation])
@pytest.mark.parametrize('diary_generator', [mdg])
@pytest.mark.parametrize('social_graph', [social_graph])
@pytest.mark.parametrize('n_agents', [-2,-1,0])
@pytest.mark.parametrize('rsl', [True])
@pytest.mark.parametrize('relevance_column',['relevance'])
@pytest.mark.parametrize('random_state', [2])
@pytest.mark.parametrize('show_progress', [True])
@pytest.mark.xfail(raises=ValueError)
def test_sts_wrong_n_agents(start_date, end_date, spatial_tessellation, diary_generator,
social_graph, n_agents, rsl, relevance_column, random_state, show_progress):
sts_epr = STS_epr()
tdf = sts_epr.generate(start_date=start_date, end_date=end_date, spatial_tessellation=spatial_tessellation,
social_graph=social_graph, diary_generator=diary_generator, n_agents= n_agents, rsl=rsl,
relevance_column=relevance_column, random_state=random_state, show_progress=show_progress)
assert isinstance(tdf, TrajDataFrame)
# test 2.2: end_date prior to start_date (#test: 1)
@pytest.mark.parametrize('start_date', [pd.to_datetime('2020/01/10 08:00:00')])
@pytest.mark.parametrize('end_date', [pd.to_datetime('2020/01/01 08:00:00')])
@pytest.mark.parametrize('spatial_tessellation', [tessellation])
@pytest.mark.parametrize('diary_generator', [mdg])
@pytest.mark.parametrize('social_graph', [social_graph])
@pytest.mark.parametrize('n_agents', [5])
@pytest.mark.parametrize('rsl', [True])
@pytest.mark.parametrize('relevance_column',['relevance'])
@pytest.mark.parametrize('random_state', [2])
@pytest.mark.parametrize('show_progress', [True])
@pytest.mark.xfail(raises=ValueError)
def test_sts_wrong_dates(start_date, end_date, spatial_tessellation, diary_generator,
social_graph, n_agents, rsl, relevance_column, random_state, show_progress):
sts_epr = STS_epr()
tdf = sts_epr.generate(start_date=start_date, end_date=end_date, spatial_tessellation=spatial_tessellation,
social_graph=social_graph, diary_generator=diary_generator, n_agents= n_agents, rsl=rsl,
relevance_column=relevance_column, random_state=random_state, show_progress=show_progress)
# test 2.3: wrong rsl type (#test: 3)
@pytest.mark.parametrize('start_date', [pd.to_datetime('2020/01/01 08:00:00')])
@pytest.mark.parametrize('end_date', [pd.to_datetime('2020/01/10 08:00:00')])
@pytest.mark.parametrize('spatial_tessellation', [tessellation])
@pytest.mark.parametrize('diary_generator', [mdg])
@pytest.mark.parametrize('social_graph', [social_graph])
@pytest.mark.parametrize('n_agents', [5])
@pytest.mark.parametrize('rsl', [1, None, 'True'])
@pytest.mark.parametrize('relevance_column',['relevance'])
@pytest.mark.parametrize('random_state', [2])
@pytest.mark.parametrize('show_progress', [True])
@pytest.mark.xfail(raises=TypeError)
def test_sts_wrong_rsl_type(start_date, end_date, spatial_tessellation, diary_generator,
social_graph, n_agents, rsl, relevance_column, random_state, show_progress):
sts_epr = STS_epr()
tdf = sts_epr.generate(start_date=start_date, end_date=end_date, spatial_tessellation=spatial_tessellation,
social_graph=social_graph, diary_generator=diary_generator, n_agents= n_agents, rsl=rsl,
relevance_column=relevance_column, random_state=random_state, show_progress=show_progress)
# test 2.4: wrong type for the spatial_tessellation (#test: 5)
@pytest.mark.parametrize('start_date', [pd.to_datetime('2020/01/01 08:00:00')])
@pytest.mark.parametrize('end_date', [pd.to_datetime('2020/01/10 08:00:00')])
@pytest.mark.parametrize('spatial_tessellation', ["", None, [], "tessellation", [1,2,3]])
@pytest.mark.parametrize('diary_generator', [mdg])
@pytest.mark.parametrize('social_graph', [social_graph])
@pytest.mark.parametrize('n_agents', [5])
@pytest.mark.parametrize('rsl', [True])
@pytest.mark.parametrize('relevance_column',['relevance'])
@pytest.mark.parametrize('random_state', [2])
@pytest.mark.parametrize('show_progress', [True])
@pytest.mark.xfail(raises=TypeError)
def test_sts_wrong_tex_type(start_date, end_date, spatial_tessellation, diary_generator,
social_graph, n_agents, rsl, relevance_column, random_state, show_progress):
sts_epr = STS_epr()
tdf = sts_epr.generate(start_date=start_date, end_date=end_date, spatial_tessellation=spatial_tessellation,
social_graph=social_graph, diary_generator=diary_generator, n_agents= n_agents, rsl=rsl,
relevance_column=relevance_column, random_state=random_state, show_progress=show_progress)
# test 2.5: # of tiles in spatial_tessellation < 3 (#test: 3)
@pytest.mark.parametrize('start_date', [pd.to_datetime('2020/01/01 08:00:00')])
@pytest.mark.parametrize('end_date', [pd.to_datetime('2020/01/10 08:00:00')])
@pytest.mark.parametrize('spatial_tessellation', [pd.DataFrame(),tessellation[:1],tessellation[:2]])
@pytest.mark.parametrize('diary_generator', [mdg])
@pytest.mark.parametrize('social_graph', [social_graph])
@pytest.mark.parametrize('n_agents', [5])
@pytest.mark.parametrize('rsl', [True])
@pytest.mark.parametrize('relevance_column',['relevance'])
@pytest.mark.parametrize('random_state', [2])
@pytest.mark.parametrize('show_progress', [True])
@pytest.mark.xfail(raises=ValueError)
def test_sts_wrong_tiles_num(start_date, end_date, spatial_tessellation, diary_generator,
social_graph, n_agents, rsl, relevance_column, random_state, show_progress):
sts_epr = STS_epr()
tdf = sts_epr.generate(start_date=start_date, end_date=end_date, spatial_tessellation=spatial_tessellation,
social_graph=social_graph, diary_generator=diary_generator, n_agents= n_agents, rsl=rsl,
relevance_column=relevance_column, random_state=random_state, show_progress=show_progress)
# test 2.6: wrong relevance's column name (#test: 1)
@pytest.mark.parametrize('start_date', [pd.to_datetime('2020/01/01 08:00:00')])
@pytest.mark.parametrize('end_date', [pd.to_datetime('2020/01/10 08:00:00')])
@pytest.mark.parametrize('spatial_tessellation', [tessellation])
@pytest.mark.parametrize('diary_generator', [mdg])
@pytest.mark.parametrize('social_graph', [social_graph])
@pytest.mark.parametrize('n_agents', [5])
@pytest.mark.parametrize('rsl', [True,])
@pytest.mark.parametrize('relevance_column',['rel'])
@pytest.mark.parametrize('random_state', [2])
@pytest.mark.parametrize('show_progress', [True])
@pytest.mark.xfail(raises=IndexError)
def test_sts_wrong_relevance_col_name(start_date, end_date, spatial_tessellation, diary_generator,
social_graph, n_agents, rsl, relevance_column, random_state, show_progress):
sts_epr = STS_epr()
tdf = sts_epr.generate(start_date=start_date, end_date=end_date, spatial_tessellation=spatial_tessellation,
social_graph=social_graph, diary_generator=diary_generator, n_agents= n_agents, rsl=rsl,
relevance_column=relevance_column, random_state=random_state, show_progress=show_progress)
# test 2.7: wrong type for the diary_generator (#test: 3)
@pytest.mark.parametrize('start_date', [pd.to_datetime('2020/01/01 08:00:00')])
@pytest.mark.parametrize('end_date', [pd.to_datetime('2020/01/10 08:00:00')])
@pytest.mark.parametrize('spatial_tessellation', [tessellation])
@pytest.mark.parametrize('diary_generator', [[],None,pd.DataFrame()])
@pytest.mark.parametrize('social_graph', [social_graph])
@pytest.mark.parametrize('n_agents', [5])
@pytest.mark.parametrize('rsl', [True,])
@pytest.mark.parametrize('relevance_column',['relevance'])
@pytest.mark.parametrize('random_state', [2])
@pytest.mark.parametrize('show_progress', [True])
@pytest.mark.xfail(raises=TypeError)
def test_sts_wrong_relevance_col_name(start_date, end_date, spatial_tessellation, diary_generator,
social_graph, n_agents, rsl, relevance_column, random_state, show_progress):
sts_epr = STS_epr()
tdf = sts_epr.generate(start_date=start_date, end_date=end_date, spatial_tessellation=spatial_tessellation,
social_graph=social_graph, diary_generator=diary_generator, n_agents= n_agents, rsl=rsl,
relevance_column=relevance_column, random_state=random_state, show_progress=show_progress)
# Third test set: assert the correctness of the model's functions
def all_equal(a, b, threshold=1e-3):
return mean_absolute_error(a, b) <= threshold
def correcteness_set_exp(a,b):
for i in range(len(b)):
if a[i]>0 and b[i]>0:
return False
return True
def correcteness_set_ret(a, b):
for i in range(len(b)):
if b[i]>0 and a[i]==0:
return False
return True
def correcteness_set_exp_social(lva,lvc,choices):
for i in range(len(choices)):
if choices[i]>0:
if not (lva[i]==0 and lvc[i]>0):
return False
return True
def correcteness_set_ret_social(lva,lvc,choices):
for i in range(len(choices)):
if choices[i]>0:
if not (lva[i]>0 and lvc[i]>0):
return False
return True
# test 3.1: correct random_weighted_choice (#test: 1)
@pytest.mark.parametrize('size', [1000])
@pytest.mark.parametrize('n_picks', [int(1e4)])
def test_weighted_random_choice(size,n_picks):
np.random.seed(24)
sts_epr = STS_epr()
weigths = np.random.randint(0, 10, size=size)
theoretical = weigths/np.sum(weigths)
empirical = [0]*len(weigths)
for j in range(n_picks):
i = sts_epr.random_weighted_choice(weigths)
empirical[i]+=1
empirical = empirical/np.sum(empirical)
assert(all_equal(theoretical,empirical))
# test 3.2: correct exploration choices (#test: 1)
# create a fake location vector of size n for the agent A (id=0)
# m elements = 0 and j elements > 0, m+j=n
# RETURN
@pytest.mark.parametrize('m', [3])
@pytest.mark.parametrize('j', [1])
@pytest.mark.parametrize('n_picks', [int(1e4)])
@pytest.mark.parametrize('start_date', [
|
pd.to_datetime('2020/01/01 08:00:00')
|
pandas.to_datetime
|
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with tm.assertRaises(TypeError):
dti + dti
with tm.assertRaises(TypeError):
dti_tz + dti_tz
with tm.assertRaises(TypeError):
dti_tz + dti
with tm.assertRaises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with tm.assertRaises(TypeError):
dti_tz - dti
with tm.assertRaises(TypeError):
dti - dti_tz
with tm.assertRaises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with tm.assertRaises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
tm.assertIn(idx[0], idx)
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.DatetimeIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
for tz in self.tz:
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_nat(self):
self.assertIs(pd.DatetimeIndex._na_value, pd.NaT)
self.assertIs(pd.DatetimeIndex([])._na_value, pd.NaT)
for tz in [None, 'US/Eastern', 'UTC']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
|
tm.assert_index_equal(result, expected)
|
pandas.util.testing.assert_index_equal
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
TCGA_GENE_EXPRESSION = '../../drp-data/preprocessed/gdsc_tcga/tcga_labeled_log2_gene_expr.csv'
tcga = pd.read_csv(TCGA_GENE_EXPRESSION, index_col=0)
genes = tcga.index
print("feature genes:", len(genes))
pathway_file = '../../drp-data/pathways/9606.enrichr_pathway.edge'
pathway = pd.read_csv(pathway_file, sep='\t', header=None)
print("pathways:", pathway[0].nunique())
print("pathway genes:", pathway[1].nunique())
ppi_file = '../../drp-data/pathways/9606.STRING_experimental.edge'
ppi = pd.read_csv(ppi_file, sep='\t', header=None)
print("PPI original edges:", len(ppi))
ppi['norm_score'] = ppi[2]/ppi[2].max()
ppi = ppi.loc[ppi['norm_score'] > 0.5]
print("PPI filtered edges:", len(ppi))
nodes = list(set(ppi[0]).union(set(ppi[1])))
print("PPI nodes:", len(nodes) )
mean_attribution_file = '../../'
drugs = [
'bleomycin',
'cisplatin',
'cyclophosphamide',
'docetaxel',
'doxorubicin',
'etoposide',
'gemcitabine',
'irinotecan',
'oxaliplatin',
'paclitaxel',
'pemetrexed',
'tamoxifen',
'temozolomide',
'vinorelbine']
folder = 'CX_ens10'
attr_dict = {}
for i, drug in enumerate(drugs):
print(drug)
_, _, _, test_tcga_expr = dataset.filter_and_normalize_data(drug)
exp = CXPlain.load('gene_finding/results/%s/%s/explainer'%(folder, drug), custom_model_loader=None, relpath=True)
attr,_ = exp.explain(test_tcga_expr.values)
attr = pd.DataFrame(attr, index=test_tcga_expr.index, columns=dataset.genes)
attr_dict[drug]=attr
fig, axes = plt.subplots(7, 2, figsize=(14, 35))
writer_a = pd.ExcelWriter('gene_finding/results/%s/top_genes_mean_aggregation.xlsx'%folder, engine='xlsxwriter')
# writer_b = pd.ExcelWriter('gene_finding/results/%s/all_attributions.xlsx'%folder, engine='xlsxwriter')
conv =
|
pd.DataFrame(index=dataset.genes, columns=['hgnc'])
|
pandas.DataFrame
|
"""A collection of pandas data interfaces to a project instance."""
from __future__ import absolute_import
import os.path as osp
from glob import glob
import warnings
try:
import pandas as pd
except ImportError:
raise ImportError('The pandas package is required for this plugin. '
'Try pip install pandas')
class ProjectOrRunData(pd.DataFrame):
"""
A representation of data read from either the project, a Run or path.
"""
path = None
plugin = []
def __init__(self, projectrunorpath):
from modelmanager.project import Project
# init DataFrame
|
pd.DataFrame.__init__(self)
|
pandas.DataFrame.__init__
|
import pandas as pd
data = pd.read_excel('Questions.xlsx')
df =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
from layers.makeup_dataframe import merge_initialized_table
from libs.extensions import Extension
import pandas as pd
import numpy as np
class Yjzt(Extension):
"""预警状态"""
"""
计算公式:
当同比或环比的绝对值超过20%即为告警,绝对值超过10%即为异常,否则为平稳
"""
def __init__(self, apis_copy, apis, *args, **kwargs):
# 执行父类方法,获得self.apis/self.apis_copy/self.value
super(Yjzt, self).__init__(apis_copy, apis, *args, **kwargs) # 执行父类方法,获得self.apis/self.apis_copy
def before_search(self):
self._before_search() # 此时self.before_waiting_for_search中就有了最基本的内容
condition_tb = Yjzt.params_parse_tb(self.apis.copy())
condition_hb = Yjzt.params_parse_hb(self.apis.copy())
condition_tb_container = self.before_waiting_for_search[0]["conditions"].copy()
condition_hb_container = self.before_waiting_for_search[0]["conditions"].copy()
condition_tb_container.append(condition_tb)
condition_hb_container.append(condition_hb)
# waiting_for_search = [{"conditions: [当前, 同比的上一阶段]"}, {"conditions: [当前, 环比的上一阶段]"}]
self.before_waiting_for_search.append(self.before_waiting_for_search[0].copy())
self.before_waiting_for_search[0]["conditions"] = condition_tb_container
self.before_waiting_for_search[1]["conditions"] = condition_hb_container
self.code, self.msg, self.waiting_for_search = Yjzt.get_waiting_for_search(self.before_waiting_for_search)
def after_search(self):
"""
self.db_results: [[df_my],[df_bmy],[df_jbmy]]
:return:
"""
# 获取结果
self.db_results[0][0] = Extension.groupby_and_sum(self.db_results[0][0], self.value)
self.db_results[0][1] = Extension.groupby_and_sum(self.db_results[0][1], self.value)
self.db_results[1][0] = Extension.groupby_and_sum(self.db_results[1][0], self.value)
self.db_results[1][1] = Extension.groupby_and_sum(self.db_results[1][1], self.value)
if isinstance(self.db_results[0][0], pd.DataFrame) and self.db_results[0][0].shape[1] == 1 and self.db_results[0][0][self.value][0] is None:
self.db_results[0][0] = np.int32(0)
if isinstance(self.db_results[0][1], pd.DataFrame) and self.db_results[0][1].shape[1] == 1 and self.db_results[0][1][self.value][0] is None:
self.db_results[0][1] = np.int32(0)
if isinstance(self.db_results[1][0], pd.DataFrame) and self.db_results[1][0].shape[1] == 1 and self.db_results[1][0][self.value][0] is None:
self.db_results[1][0] = np.int32(0)
if isinstance(self.db_results[1][1], pd.DataFrame) and self.db_results[1][1].shape[1] == 1 and self.db_results[1][1][self.value][0] is None:
self.db_results[1][1] = np.int32(0)
df_tb, df_hb = Yjzt.calculate_tb_and_hb(self.db_results, self.apis_copy)
self.apis_copy["value"] = "yjzt"
if not isinstance(df_tb, pd.DataFrame) or not isinstance(df_hb, pd.DataFrame):
res = "平稳"
if abs(df_tb) > 0.2 or abs(df_hb) > 0.2:
res = "告警"
elif abs(df_tb) > 0.1 or abs(df_hb) > 0.1:
res = "异常"
self.df =
|
pd.DataFrame({"yjzt": [res]})
|
pandas.DataFrame
|
import numpy as np
import numpy.testing as npt
import pandas as pd
from stumpy import aampi, core, config
import pytest
import naive
substitution_locations = [(slice(0, 0), 0, -1, slice(1, 3), [0, 3])]
substitution_values = [np.nan, np.inf]
def test_aampi_int_input():
with pytest.raises(TypeError):
aampi(np.arange(10), 5)
def test_aampi_self_join():
m = 3
for p in [1.0, 2.0, 3.0]:
seed = np.random.randint(100000)
np.random.seed(seed)
n = 30
T = np.random.rand(n)
stream = aampi(T, m, egress=False, p=p)
for i in range(34):
t = np.random.rand()
stream.update(t)
comp_P = stream.P_
comp_I = stream.I_
comp_left_P = stream.left_P_
comp_left_I = stream.left_I_
ref_mp = naive.aamp(stream.T_, m, p=p)
ref_P = ref_mp[:, 0]
ref_I = ref_mp[:, 1]
ref_left_P = np.full(ref_P.shape, np.inf)
ref_left_I = ref_mp[:, 2]
for i, j in enumerate(ref_left_I):
if j >= 0:
ref_left_P[i] = np.linalg.norm(
stream.T_[i : i + m] - stream.T_[j : j + m], ord=p
)
naive.replace_inf(ref_P)
naive.replace_inf(ref_left_P)
naive.replace_inf(comp_P)
naive.replace_inf(comp_left_P)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
npt.assert_almost_equal(ref_left_P, comp_left_P)
npt.assert_almost_equal(ref_left_I, comp_left_I)
np.random.seed(seed)
n = 30
T = np.random.rand(n)
T = pd.Series(T)
stream = aampi(T, m, egress=False, p=p)
for i in range(34):
t = np.random.rand()
stream.update(t)
comp_P = stream.P_
comp_I = stream.I_
comp_left_P = stream.left_P_
comp_left_I = stream.left_I_
naive.replace_inf(comp_P)
naive.replace_inf(comp_left_P)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
npt.assert_almost_equal(ref_left_P, comp_left_P)
npt.assert_almost_equal(ref_left_I, comp_left_I)
def test_aampi_self_join_egress():
m = 3
for p in [1.0, 2.0, 3.0]:
seed = np.random.randint(100000)
np.random.seed(seed)
n = 30
T = np.random.rand(n)
ref_mp = naive.aampi_egress(T, m, p=p)
ref_P = ref_mp.P_.copy()
ref_I = ref_mp.I_
ref_left_P = ref_mp.left_P_.copy()
ref_left_I = ref_mp.left_I_
stream = aampi(T, m, egress=True, p=p)
comp_P = stream.P_.copy()
comp_I = stream.I_
comp_left_P = stream.left_P_.copy()
comp_left_I = stream.left_I_
naive.replace_inf(ref_P)
naive.replace_inf(ref_left_P)
naive.replace_inf(comp_P)
naive.replace_inf(comp_left_P)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
npt.assert_almost_equal(ref_left_P, comp_left_P)
npt.assert_almost_equal(ref_left_I, comp_left_I)
for i in range(34):
t = np.random.rand()
ref_mp.update(t)
stream.update(t)
comp_P = stream.P_.copy()
comp_I = stream.I_
comp_left_P = stream.left_P_.copy()
comp_left_I = stream.left_I_
ref_P = ref_mp.P_.copy()
ref_I = ref_mp.I_
ref_left_P = ref_mp.left_P_.copy()
ref_left_I = ref_mp.left_I_
naive.replace_inf(ref_P)
naive.replace_inf(ref_left_P)
naive.replace_inf(comp_P)
naive.replace_inf(comp_left_P)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
npt.assert_almost_equal(ref_left_P, comp_left_P)
npt.assert_almost_equal(ref_left_I, comp_left_I)
np.random.seed(seed)
T = np.random.rand(n)
T = pd.Series(T)
ref_mp = naive.aampi_egress(T, m, p=p)
ref_P = ref_mp.P_.copy()
ref_I = ref_mp.I_
stream = aampi(T, m, egress=True, p=p)
comp_P = stream.P_.copy()
comp_I = stream.I_
naive.replace_inf(ref_P)
naive.replace_inf(comp_P)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
for i in range(34):
t = np.random.rand()
ref_mp.update(t)
stream.update(t)
comp_P = stream.P_.copy()
comp_I = stream.I_
comp_left_P = stream.left_P_.copy()
comp_left_I = stream.left_I_
ref_P = ref_mp.P_.copy()
ref_I = ref_mp.I_
ref_left_P = ref_mp.left_P_.copy()
ref_left_I = ref_mp.left_I_
naive.replace_inf(ref_P)
naive.replace_inf(ref_left_P)
naive.replace_inf(comp_P)
naive.replace_inf(comp_left_P)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
npt.assert_almost_equal(ref_left_P, comp_left_P)
npt.assert_almost_equal(ref_left_I, comp_left_I)
@pytest.mark.parametrize("substitute", substitution_values)
@pytest.mark.parametrize("substitution_locations", substitution_locations)
def test_aampi_init_nan_inf_self_join(substitute, substitution_locations):
m = 3
seed = np.random.randint(100000)
# seed = 58638
for substitution_location in substitution_locations:
np.random.seed(seed)
n = 30
T = np.random.rand(n)
if substitution_location == -1:
substitution_location = T.shape[0] - 1
T[substitution_location] = substitute
stream = aampi(T, m, egress=False)
for i in range(34):
t = np.random.rand()
stream.update(t)
comp_P = stream.P_
comp_I = stream.I_
stream.T_[substitution_location] = substitute
ref_mp = naive.aamp(stream.T_, m)
ref_P = ref_mp[:, 0]
ref_I = ref_mp[:, 1]
naive.replace_inf(ref_P)
naive.replace_inf(comp_P)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
np.random.seed(seed)
n = 30
T = np.random.rand(n)
if substitution_location == -1: # pragma: no cover
substitution_location = T.shape[0] - 1
T[substitution_location] = substitute
T = pd.Series(T)
stream = aampi(T, m, egress=False)
for i in range(34):
t = np.random.rand()
stream.update(t)
comp_P = stream.P_
comp_I = stream.I_
naive.replace_inf(comp_P)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
@pytest.mark.parametrize("substitute", substitution_values)
@pytest.mark.parametrize("substitution_locations", substitution_locations)
def test_aampi_init_nan_inf_self_join_egress(substitute, substitution_locations):
m = 3
seed = np.random.randint(100000)
# seed = 58638
for substitution_location in substitution_locations:
np.random.seed(seed)
n = 30
T = np.random.rand(n)
if substitution_location == -1:
substitution_location = T.shape[0] - 1
T[substitution_location] = substitute
ref_mp = naive.aampi_egress(T, m)
ref_P = ref_mp.P_.copy()
ref_I = ref_mp.I_
ref_left_P = ref_mp.left_P_.copy()
ref_left_I = ref_mp.left_I_
stream = aampi(T, m, egress=True)
comp_P = stream.P_.copy()
comp_I = stream.I_
comp_left_P = stream.left_P_.copy()
comp_left_I = stream.left_I_
naive.replace_inf(ref_P)
naive.replace_inf(comp_P)
naive.replace_inf(ref_left_P)
naive.replace_inf(comp_left_P)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
npt.assert_almost_equal(ref_left_P, comp_left_P)
npt.assert_almost_equal(ref_left_I, comp_left_I)
for i in range(34):
t = np.random.rand()
ref_mp.update(t)
stream.update(t)
comp_P = stream.P_.copy()
comp_I = stream.I_
comp_left_P = stream.left_P_.copy()
comp_left_I = stream.left_I_
ref_P = ref_mp.P_.copy()
ref_I = ref_mp.I_
ref_left_P = ref_mp.left_P_.copy()
ref_left_I = ref_mp.left_I_
naive.replace_inf(ref_P)
naive.replace_inf(ref_left_P)
naive.replace_inf(comp_P)
naive.replace_inf(comp_left_P)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
npt.assert_almost_equal(ref_left_P, comp_left_P)
npt.assert_almost_equal(ref_left_I, comp_left_I)
np.random.seed(seed)
n = 30
T = np.random.rand(n)
T = pd.Series(T)
ref_mp = naive.aampi_egress(T, m)
ref_P = ref_mp.P_.copy()
ref_I = ref_mp.I_
ref_left_P = ref_mp.left_P_.copy()
ref_left_I = ref_mp.left_I_
stream = aampi(T, m, egress=True)
comp_P = stream.P_.copy()
comp_I = stream.I_
comp_left_P = stream.left_P_.copy()
comp_left_I = stream.left_I_
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
naive.replace_inf(ref_left_P)
naive.replace_inf(comp_left_P)
for i in range(34):
t = np.random.rand()
ref_mp.update(t)
stream.update(t)
comp_P = stream.P_.copy()
comp_I = stream.I_
comp_left_P = stream.left_P_.copy()
comp_left_I = stream.left_I_
ref_P = ref_mp.P_.copy()
ref_I = ref_mp.I_
ref_left_P = ref_mp.left_P_.copy()
ref_left_I = ref_mp.left_I_
naive.replace_inf(ref_P)
naive.replace_inf(ref_left_P)
naive.replace_inf(comp_P)
naive.replace_inf(comp_left_P)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
npt.assert_almost_equal(ref_left_P, comp_left_P)
npt.assert_almost_equal(ref_left_I, comp_left_I)
@pytest.mark.parametrize("substitute", substitution_values)
@pytest.mark.parametrize("substitution_locations", substitution_locations)
def test_aampi_stream_nan_inf_self_join(substitute, substitution_locations):
m = 3
seed = np.random.randint(100000)
for substitution_location in substitution_locations:
np.random.seed(seed)
n = 30
T = np.random.rand(64)
stream = aampi(T[:n], m, egress=False)
if substitution_location == -1:
substitution_location = T[n:].shape[0] - 1
T[n:][substitution_location] = substitute
for t in T[n:]:
stream.update(t)
comp_P = stream.P_
comp_I = stream.I_
stream.T_[n:][substitution_location] = substitute
ref_mp = naive.aamp(stream.T_, m)
ref_P = ref_mp[:, 0]
ref_I = ref_mp[:, 1]
naive.replace_inf(ref_P)
naive.replace_inf(comp_P)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
np.random.seed(seed)
T = np.random.rand(64)
stream = aampi(pd.Series(T[:n]), m, egress=False)
if substitution_location == -1: # pragma: no cover
substitution_location = T[n:].shape[0] - 1
T[n:][substitution_location] = substitute
for t in T[n:]:
stream.update(t)
comp_P = stream.P_
comp_I = stream.I_
naive.replace_inf(comp_P)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
@pytest.mark.parametrize("substitute", substitution_values)
@pytest.mark.parametrize("substitution_locations", substitution_locations)
def test_aampi_stream_nan_inf_self_join_egress(substitute, substitution_locations):
m = 3
seed = np.random.randint(100000)
for substitution_location in substitution_locations:
np.random.seed(seed)
n = 30
T = np.random.rand(64)
ref_mp = naive.aampi_egress(T[:n], m)
ref_P = ref_mp.P_.copy()
ref_I = ref_mp.I_
ref_left_P = ref_mp.left_P_.copy()
ref_left_I = ref_mp.left_I_
stream = aampi(T[:n], m, egress=True)
comp_P = stream.P_.copy()
comp_I = stream.I_
comp_left_P = stream.left_P_.copy()
comp_left_I = stream.left_I_
naive.replace_inf(ref_P)
naive.replace_inf(comp_P)
naive.replace_inf(ref_left_P)
naive.replace_inf(comp_left_P)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
npt.assert_almost_equal(ref_left_P, comp_left_P)
npt.assert_almost_equal(ref_left_I, comp_left_I)
if substitution_location == -1:
substitution_location = T[n:].shape[0] - 1
T[n:][substitution_location] = substitute
for t in T[n:]:
ref_mp.update(t)
stream.update(t)
comp_P = stream.P_.copy()
comp_I = stream.I_
comp_left_P = stream.left_P_.copy()
comp_left_I = stream.left_I_
ref_P = ref_mp.P_.copy()
ref_I = ref_mp.I_
ref_left_P = ref_mp.left_P_.copy()
ref_left_I = ref_mp.left_I_
naive.replace_inf(ref_P)
naive.replace_inf(ref_left_P)
naive.replace_inf(comp_P)
naive.replace_inf(comp_left_P)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
npt.assert_almost_equal(ref_left_P, comp_left_P)
npt.assert_almost_equal(ref_left_I, comp_left_I)
np.random.seed(seed)
T = np.random.rand(64)
ref_mp = naive.aampi_egress(T[:n], m)
ref_P = ref_mp.P_.copy()
ref_I = ref_mp.I_
ref_left_P = ref_mp.left_P_.copy()
ref_left_I = ref_mp.left_I_
stream = aampi(pd.Series(T[:n]), m, egress=True)
comp_P = stream.P_.copy()
comp_I = stream.I_
comp_left_P = stream.left_P_.copy()
comp_left_I = stream.left_I_
naive.replace_inf(ref_P)
naive.replace_inf(comp_P)
naive.replace_inf(ref_left_P)
naive.replace_inf(comp_left_P)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
npt.assert_almost_equal(ref_left_P, comp_left_P)
npt.assert_almost_equal(ref_left_I, comp_left_I)
if substitution_location == -1: # pragma: no cover
substitution_location = T[n:].shape[0] - 1
T[n:][substitution_location] = substitute
for t in T[n:]:
ref_mp.update(t)
stream.update(t)
comp_P = stream.P_.copy()
comp_I = stream.I_
comp_left_P = stream.left_P_.copy()
comp_left_I = stream.left_I_
ref_P = ref_mp.P_.copy()
ref_I = ref_mp.I_
ref_left_P = ref_mp.left_P_.copy()
ref_left_I = ref_mp.left_I_
naive.replace_inf(ref_P)
naive.replace_inf(ref_left_P)
naive.replace_inf(comp_P)
naive.replace_inf(comp_left_P)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
npt.assert_almost_equal(ref_left_P, comp_left_P)
npt.assert_almost_equal(ref_left_I, comp_left_I)
def test_aampi_constant_subsequence_self_join():
m = 3
seed = np.random.randint(100000)
np.random.seed(seed)
T = np.concatenate((np.zeros(20, dtype=np.float64), np.ones(10, dtype=np.float64)))
stream = aampi(T, m, egress=False)
for i in range(34):
t = np.random.rand()
stream.update(t)
comp_P = stream.P_
# comp_I = stream.I_
ref_mp = naive.aamp(stream.T_, m)
ref_P = ref_mp[:, 0]
# ref_I = ref_mp[:, 1]
naive.replace_inf(ref_P)
naive.replace_inf(comp_P)
npt.assert_almost_equal(ref_P, comp_P)
# npt.assert_almost_equal(ref_I, comp_I)
np.random.seed(seed)
T = np.concatenate((np.zeros(20, dtype=np.float64), np.ones(10, dtype=np.float64)))
T = pd.Series(T)
stream = aampi(T, m, egress=False)
for i in range(34):
t = np.random.rand()
stream.update(t)
comp_P = stream.P_
# comp_I = stream.I_
naive.replace_inf(comp_P)
npt.assert_almost_equal(ref_P, comp_P)
# npt.assert_almost_equal(ref_I, comp_I)
def test_aampi_constant_subsequence_self_join_egress():
m = 3
seed = np.random.randint(100000)
np.random.seed(seed)
T = np.concatenate((np.zeros(20, dtype=np.float64), np.ones(10, dtype=np.float64)))
ref_mp = naive.aampi_egress(T, m)
ref_P = ref_mp.P_.copy()
# ref_I = ref_mp.I_
ref_left_P = ref_mp.left_P_.copy()
# ref_left_I = ref_mp.left_I_
stream = aampi(T, m, egress=True)
comp_P = stream.P_.copy()
# comp_I = stream.I_
comp_left_P = stream.left_P_.copy()
# comp_left_I = stream.left_I_
naive.replace_inf(ref_P)
naive.replace_inf(comp_P)
naive.replace_inf(ref_left_P)
naive.replace_inf(comp_left_P)
npt.assert_almost_equal(ref_P, comp_P)
# npt.assert_almost_equal(ref_I, comp_I)
npt.assert_almost_equal(ref_left_P, comp_left_P)
# npt.assert_almost_equal(ref_left_I, comp_left_I)
for i in range(34):
t = np.random.rand()
ref_mp.update(t)
stream.update(t)
comp_P = stream.P_.copy()
# comp_I = stream.I_
comp_left_P = stream.left_P_.copy()
# comp_left_I = stream.left_I_
ref_P = ref_mp.P_.copy()
# ref_I = ref_mp.I_
ref_left_P = ref_mp.left_P_.copy()
# ref_left_I = ref_mp.left_I_
naive.replace_inf(ref_P)
naive.replace_inf(ref_left_P)
naive.replace_inf(comp_P)
naive.replace_inf(comp_left_P)
npt.assert_almost_equal(ref_P, comp_P)
# npt.assert_almost_equal(ref_I, comp_I)
npt.assert_almost_equal(ref_left_P, comp_left_P)
# npt.assert_almost_equal(ref_left_I, comp_left_I)
np.random.seed(seed)
T = np.concatenate((np.zeros(20, dtype=np.float64), np.ones(10, dtype=np.float64)))
T =
|
pd.Series(T)
|
pandas.Series
|
import pandas as pd
import pytest
import numpy as np
import dask.dataframe as dd
from dask.dataframe.utils import assert_eq, PANDAS_VERSION
N = 40
df = pd.DataFrame(
{
"a": np.random.randn(N).cumsum(),
"b": np.random.randint(100, size=(N,)),
"c": np.random.randint(100, size=(N,)),
"d": np.random.randint(100, size=(N,)),
"e": np.random.randint(100, size=(N,)),
}
)
ddf = dd.from_pandas(df, 3)
idx = (
pd.date_range("2016-01-01", freq="3s", periods=100)
| pd.date_range("2016-01-01", freq="5s", periods=100)
)[:N]
ts = pd.DataFrame(
{
"a": np.random.randn(N).cumsum(),
"b": np.random.randint(100, size=(N,)),
"c": np.random.randint(100, size=(N,)),
"d": np.random.randint(100, size=(N,)),
"e": np.random.randint(100, size=(N,)),
},
index=idx,
)
dts = dd.from_pandas(ts, 3)
def shifted_sum(df, before, after, c=0):
a = df.shift(before)
b = df.shift(-after)
return df + a + b + c
def ts_shifted_sum(df, before, after, c=0):
a = df.shift(before.seconds)
b = df.shift(-after.seconds)
return df + a + b + c
@pytest.mark.parametrize("npartitions", [1, 4])
def test_map_overlap(npartitions):
ddf = dd.from_pandas(df, npartitions)
for before, after in [(0, 3), (3, 0), (3, 3), (0, 0)]:
# DataFrame
res = ddf.map_overlap(shifted_sum, before, after, before, after, c=2)
sol = shifted_sum(df, before, after, c=2)
assert_eq(res, sol)
# Series
res = ddf.b.map_overlap(shifted_sum, before, after, before, after, c=2)
sol = shifted_sum(df.b, before, after, c=2)
assert_eq(res, sol)
def test_map_overlap_names():
npartitions = 3
ddf = dd.from_pandas(df, npartitions)
res = ddf.map_overlap(shifted_sum, 0, 3, 0, 3, c=2)
res2 = ddf.map_overlap(shifted_sum, 0, 3, 0, 3, c=2)
assert set(res.dask) == set(res2.dask)
res3 = ddf.map_overlap(shifted_sum, 0, 3, 0, 3, c=3)
assert res3._name != res._name
# Difference is just the final map
diff = set(res3.dask).difference(res.dask)
assert len(diff) == npartitions
res4 = ddf.map_overlap(shifted_sum, 3, 0, 0, 3, c=2)
assert res4._name != res._name
def test_map_overlap_errors():
# Non-integer
with pytest.raises(ValueError):
ddf.map_overlap(shifted_sum, 0.5, 3, 0, 2, c=2)
# Negative
with pytest.raises(ValueError):
ddf.map_overlap(shifted_sum, 0, -5, 0, 2, c=2)
# Partition size < window size
with pytest.raises(NotImplementedError):
ddf.map_overlap(shifted_sum, 0, 100, 0, 100, c=2).compute()
# Offset with non-datetime
with pytest.raises(TypeError):
ddf.map_overlap(shifted_sum, pd.Timedelta("1s"), pd.Timedelta("1s"), 0, 2, c=2)
def test_map_overlap_provide_meta():
df = pd.DataFrame(
{"x": [1, 2, 4, 7, 11], "y": [1.0, 2.0, 3.0, 4.0, 5.0]}
).rename_axis("myindex")
ddf = dd.from_pandas(df, npartitions=2)
# Provide meta spec, but not full metadata
res = ddf.map_overlap(
lambda df: df.rolling(2).sum(), 2, 0, meta={"x": "i8", "y": "i8"}
)
sol = df.rolling(2).sum()
assert_eq(res, sol)
def mad(x):
return np.fabs(x - x.mean()).mean()
rolling_method_args_check_less_precise = [
("count", (), False),
("sum", (), False),
("mean", (), False),
("median", (), False),
("min", (), False),
("max", (), False),
("std", (), True),
("var", (), True),
("skew", (), True), # here and elsewhere, results for kurt and skew are
("kurt", (), True), # checked with check_less_precise=True so that we are
# only looking at 3ish decimal places for the equality check
# rather than 5ish. I have encountered a case where a test
# seems to have failed due to numerical problems with kurt.
# So far, I am only weakening the check for kurt and skew,
# as they involve third degree powers and higher
("quantile", (0.38,), False),
("apply", (mad,), False),
]
@pytest.mark.parametrize(
"method,args,check_less_precise", rolling_method_args_check_less_precise
)
@pytest.mark.parametrize("window", [1, 2, 4, 5])
@pytest.mark.parametrize("center", [True, False])
def test_rolling_methods(method, args, window, center, check_less_precise):
# DataFrame
prolling = df.rolling(window, center=center)
drolling = ddf.rolling(window, center=center)
if method == "apply":
kwargs = {"raw": False}
else:
kwargs = {}
assert_eq(
getattr(prolling, method)(*args, **kwargs),
getattr(drolling, method)(*args, **kwargs),
check_less_precise=check_less_precise,
)
# Series
prolling = df.a.rolling(window, center=center)
drolling = ddf.a.rolling(window, center=center)
assert_eq(
getattr(prolling, method)(*args, **kwargs),
getattr(drolling, method)(*args, **kwargs),
check_less_precise=check_less_precise,
)
if PANDAS_VERSION <= "0.25.0":
filter_panel_warning = pytest.mark.filterwarnings(
"ignore::DeprecationWarning:pandas[.*]"
)
else:
filter_panel_warning = lambda f: f
@filter_panel_warning
@pytest.mark.parametrize("window", [1, 2, 4, 5])
@pytest.mark.parametrize("center", [True, False])
def test_rolling_cov(window, center):
# DataFrame
prolling = df.drop("a", 1).rolling(window, center=center)
drolling = ddf.drop("a", 1).rolling(window, center=center)
assert_eq(prolling.cov(), drolling.cov())
# Series
prolling = df.b.rolling(window, center=center)
drolling = ddf.b.rolling(window, center=center)
assert_eq(prolling.cov(), drolling.cov())
def test_rolling_raises():
df = pd.DataFrame(
{"a": np.random.randn(25).cumsum(), "b": np.random.randint(100, size=(25,))}
)
ddf = dd.from_pandas(df, 3)
pytest.raises(ValueError, lambda: ddf.rolling(1.5))
pytest.raises(ValueError, lambda: ddf.rolling(-1))
pytest.raises(ValueError, lambda: ddf.rolling(3, min_periods=1.2))
pytest.raises(ValueError, lambda: ddf.rolling(3, min_periods=-2))
pytest.raises(ValueError, lambda: ddf.rolling(3, axis=10))
pytest.raises(ValueError, lambda: ddf.rolling(3, axis="coulombs"))
pytest.raises(NotImplementedError, lambda: ddf.rolling(100).mean().compute())
def test_rolling_names():
df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
a = dd.from_pandas(df, npartitions=2)
assert sorted(a.rolling(2).sum().dask) == sorted(a.rolling(2).sum().dask)
def test_rolling_axis():
df = pd.DataFrame(np.random.randn(20, 16))
ddf = dd.from_pandas(df, npartitions=3)
assert_eq(df.rolling(3, axis=0).mean(), ddf.rolling(3, axis=0).mean())
assert_eq(df.rolling(3, axis=1).mean(), ddf.rolling(3, axis=1).mean())
assert_eq(
df.rolling(3, min_periods=1, axis=1).mean(),
ddf.rolling(3, min_periods=1, axis=1).mean(),
)
assert_eq(
df.rolling(3, axis="columns").mean(), ddf.rolling(3, axis="columns").mean()
)
assert_eq(df.rolling(3, axis="rows").mean(), ddf.rolling(3, axis="rows").mean())
s = df[3]
ds = ddf[3]
assert_eq(s.rolling(5, axis=0).std(), ds.rolling(5, axis=0).std())
def test_rolling_partition_size():
df = pd.DataFrame(np.random.randn(50, 2))
ddf = dd.from_pandas(df, npartitions=5)
for obj, dobj in [(df, ddf), (df[0], ddf[0])]:
assert_eq(obj.rolling(10).mean(), dobj.rolling(10).mean())
assert_eq(obj.rolling(11).mean(), dobj.rolling(11).mean())
with pytest.raises(NotImplementedError):
dobj.rolling(12).mean().compute()
def test_rolling_repr():
ddf = dd.from_pandas(pd.DataFrame([10] * 30), npartitions=3)
res = repr(ddf.rolling(4))
assert res == "Rolling [window=4,center=False,axis=0]"
def test_time_rolling_repr():
res = repr(dts.rolling("4s"))
assert res == "Rolling [window=4000000000,center=False,win_type=freq,axis=0]"
def test_time_rolling_constructor():
result = dts.rolling("4s")
assert result.window == "4s"
assert result.min_periods is None
assert result.win_type is None
assert result._win_type == "freq"
assert result._window == 4000000000 # ns
assert result._min_periods == 1
@pytest.mark.parametrize(
"method,args,check_less_precise", rolling_method_args_check_less_precise
)
@pytest.mark.parametrize("window", ["1S", "2S", "3S", pd.offsets.Second(5)])
def test_time_rolling_methods(method, args, window, check_less_precise):
# DataFrame
if method == "apply":
kwargs = {"raw": False}
else:
kwargs = {}
prolling = ts.rolling(window)
drolling = dts.rolling(window)
assert_eq(
getattr(prolling, method)(*args, **kwargs),
getattr(drolling, method)(*args, **kwargs),
check_less_precise=check_less_precise,
)
# Series
prolling = ts.a.rolling(window)
drolling = dts.a.rolling(window)
assert_eq(
getattr(prolling, method)(*args, **kwargs),
getattr(drolling, method)(*args, **kwargs),
check_less_precise=check_less_precise,
)
@filter_panel_warning
@pytest.mark.parametrize("window", ["1S", "2S", "3S", pd.offsets.Second(5)])
def test_time_rolling_cov(window):
# DataFrame
prolling = ts.drop("a", 1).rolling(window)
drolling = dts.drop("a", 1).rolling(window)
assert_eq(prolling.cov(), drolling.cov())
# Series
prolling = ts.b.rolling(window)
drolling = dts.b.rolling(window)
assert_eq(prolling.cov(), drolling.cov())
@pytest.mark.parametrize(
"window,N",
[("1s", 10), ("2s", 10), ("10s", 10), ("10h", 10), ("10s", 100), ("10h", 100)],
)
def test_time_rolling_large_window_fixed_chunks(window, N):
df = pd.DataFrame(
{
"a":
|
pd.date_range("2016-01-01 00:00:00", periods=N, freq="1s")
|
pandas.date_range
|
from pathlib import Path
import numpy as np
import pandas as pd
import time
import pickle
import json
import h5py
import sys
import traceback
import warnings
import Analyses.spike_functions as spike_funcs
import Analyses.spatial_functions as spatial_funcs
import Analyses.open_field_functions as of_funcs
import Pre_Processing.pre_process_functions as pp_funcs
from Utils.robust_stats import robust_zscore
import Analyses.tree_maze_functions as tmf
import Analyses.plot_functions as pf
import scipy.signal as signal
"""
Classes in this file will have several retrieval processes to acquire the required information for each
subject and session.
:class SubjectInfo
-> class that takes a subject as an input. contains general information about what processes have been performed,
clusters, and importantly all the session paths. The contents of this class are saved as a pickle in the results
folder.
:class SubjectSessionInfo
-> children class of SubjectInfo, takes session as an input. This class contains session specific retrieval methods
Low level things, like reading position (eg. 'get_track_dat') are self contained in the class. Higher level
functions like 'get_spikes', are outsourced to the appropriate submodules in the Analyses folder.
If it is the first time calling a retrieval method, the call will save the contents according the paths variable
Otherwise the contents will be loaded from existing data, as opposed to recalculation. Exception is the get_time
method, as this is easily regenerated on each call.
"""
class SummaryInfo:
subjects = ['Li', 'Ne', 'Cl', 'Al', 'Ca', 'Mi']
min_n_units = 1
min_n_trials = 50 # task criteria
min_pct_coverage = 0.75 # open field criteria
invalid_sessions = ['Li_OF_080718']
figure_names = [f"f{ii}" for ii in range(5)]
_root_paths = dict(GD=Path("/home/alexgonzalez/google-drive/TreeMazeProject/"),
BigPC=Path("/mnt/Data_HD2T/TreeMazeProject/"))
def __init__(self, data_root='BigPC'):
self.main_path = self._root_paths[data_root]
self.paths = self._get_paths()
self.unit_table = self.get_unit_table()
self.analyses_table = self.get_analyses_table()
self.valid_track_table = self.get_track_validity_table()
self.sessions_by_subject = {}
self.tasks_by_subject = {}
for s in self.subjects:
self.sessions_by_subject[s] = self.unit_table[self.unit_table.subject == s].session.unique()
self.tasks_by_subject[s] = self.unit_table[self.unit_table.subject == s].task.unique()
def run_analyses(self, task='all', which='all', verbose=False, overwrite=False):
interrupt_flag = False
for subject in self.subjects:
if not interrupt_flag:
subject_info = SubjectInfo(subject)
for session in subject_info.sessions:
try:
if task == 'all':
pass
elif task not in session:
continue
else:
pass
if verbose:
t0 = time.time()
print(f'Processing Session {session}')
session_info = SubjectSessionInfo(subject, session)
session_info.run_analyses(overwrite=overwrite, which=which, verbose=verbose)
if verbose:
t1 = time.time()
print(f"Session Processing Completed: {t1 - t0:0.2f}s")
print()
else:
print(".", end='')
except KeyboardInterrupt:
interrupt_flag = True
break
except ValueError:
pass
except FileNotFoundError:
pass
except:
if verbose:
traceback.print_exc(file=sys.stdout)
pass
if verbose:
print(f"Subject {subject} Analyses Completed.")
def get_analyses_table(self, overwrite=False):
if not self.paths['analyses_table'].exists() or overwrite:
analyses_table = pd.DataFrame()
for subject in self.subjects:
analyses_table = analyses_table.append(SubjectInfo(subject).get_sessions_analyses())
analyses_table.to_csv(self.paths['analyses_table'])
else:
analyses_table = pd.read_csv(self.paths['analyses_table'], index_col=0)
self.analyses_table = analyses_table
return analyses_table
def get_track_validity_table(self, overwrite=False):
if not self.paths['valid_track_table'].exists() or overwrite:
valid_track_table = pd.DataFrame()
for subject in self.subjects:
valid_track_table = valid_track_table.append(SubjectInfo(subject).valid_track_table)
valid_track_table.to_csv(self.paths['valid_track_table'])
else:
valid_track_table = pd.read_csv(self.paths['valid_track_table'], index_col=0)
return valid_track_table
def get_behav_perf(self, overwrite=False):
if not self.paths['behavior'].exists() or overwrite:
perf = pd.DataFrame()
for subject in self.subjects:
subject_info = SubjectInfo(subject)
for session in subject_info.sessions:
if 'T3' in session:
try:
session_info = SubjectSessionInfo(subject, session)
b = session_info.get_event_behavior()
sp = b.get_session_perf()
sp['session'] = session
sp['task'] = session_info.task
sp['subject'] = subject
sp['n_units'] = session_info.n_units
sp['n_cells'] = session_info.n_cells
sp['n_mua'] = session_info.n_mua
perf = pd.concat((perf, sp), ignore_index=True)
except:
pass
perf.to_csv(self.paths['behavior'])
else:
perf = pd.read_csv(self.paths['behavior'], index_col=0)
return perf
def _get_paths(self, root_path=None):
if root_path is None:
results_path = self.main_path / 'Results_Summary'
figures_path = self.main_path / 'Figures'
else:
results_path = root_path / 'Results_Summary'
figures_path = root_path / 'Figures'
paths = dict(
analyses_table=results_path / 'analyses_table.csv',
valid_track_table=results_path / 'valid_track_table.csv',
behavior=results_path / 'behavior_session_perf.csv',
units=results_path / 'all_units_table.csv',
of_metric_scores=results_path / 'of_metric_scores_summary_table.csv',
of_model_scores=results_path / 'of_model_scores_summary_table_agg.csv',
zone_rates_comps=results_path / 'zone_rates_comps_summary_table.csv',
zone_rates_remap=results_path / 'zone_rates_remap_summary_table.csv',
bal_conds_seg_rates=results_path / 'bal_conds_seg_rates_summary_table.csv',
)
paths['results'] = results_path
paths['figures'] = figures_path
return paths
def update_paths(self):
for subject in self.subjects:
_ = SubjectInfo(subject, overwrite=True)
def get_zone_rates_comps(self, overwrite=False):
"""
Aggregates tables across sessions and adds unit information.
Note, that overwrite only overwrites the aggregate table and does not perform the analysis on each session.
:param overwrite:
:return:
pandas data frame with n_units as index
"""
if not self.paths['zone_rates_comps'].exists() or overwrite:
sessions_validity = self.get_track_validity_table()
zone_rates = pd.DataFrame()
unit_count = 0
valid_sessions = list(self.analyses_table.loc[self.analyses_table.zone_rates_comps == True].index)
for subject in self.subjects:
subject_info = SubjectInfo(subject)
for session in subject_info.sessions:
if session in valid_sessions:
session_info = SubjectSessionInfo(subject, session)
n_session_units = session_info.n_units
if n_session_units > 0:
try:
session_zone_rate_comp_table = session_info.get_zone_rates_remapping()
comp_table_columns = session_zone_rate_comp_table.columns
session_table = pd.DataFrame(index=np.arange(n_session_units),
columns=['unit_id', 'subject', 'session',
'session_pct_cov', 'session_valid',
'session_unit_id', 'unit_type', 'tt', 'tt_cl',
'cl_name'])
session_table['session'] = session
session_table['subject'] = session_info.subject
session_table['session_unit_id'] = np.arange(n_session_units)
session_table['unit_id'] = np.arange(n_session_units) + unit_count
session_table['unit_type'] = [v[0] for k, v in session_info.cluster_ids.items()]
session_table['tt'] = [v[1] for k, v in session_info.cluster_ids.items()]
session_table['tt_cl'] = [v[2] for k, v in session_info.cluster_ids.items()]
if session in sessions_validity.columns:
session_table['session_pct_cov'] = sessions_validity[session]
session_table['session_valid'] = 1
else:
session_table['session_pct_cov'] = 0
session_table['session_valid'] = 0
cl_names = []
for k, v in session_info.cluster_ids.items():
tt = v[1]
cl = v[2]
depth = subject_info.sessions_tt_positions.loc[session, f"tt_{tt}"]
cl_name = f"{session}-tt{tt}_d{depth}_cl{cl}"
cl_names.append(cl_name)
session_table['cl_name'] = cl_names
unit_count += n_session_units
session_table = session_table.join(session_zone_rate_comp_table)
except:
print(f'Error Processing Session {session}')
traceback.print_exc(file=sys.stdout)
continue
zone_rates = zone_rates.append(session_table)
zone_rates = zone_rates.reset_index(drop=True)
zone_rates.to_csv(self.paths['zone_rates_comps'])
else:
zone_rates = pd.read_csv(self.paths['zone_rates_comps'], index_col=0)
return zone_rates
def get_bal_conds_seg_rates(self, segment_type='bigseg', overwrite=False):
fn = self.paths['bal_conds_seg_rates']
if segment_type != 'bigseg':
name = fn.name.split('.')
name2 = name[0] + segment_type + name[1]
fn = fn.parent / name2
if not fn.exists() or overwrite:
sessions_validity = self.get_track_validity_table()
seg_rates = pd.DataFrame()
unit_count = 0
valid_sessions = list(self.analyses_table.loc[self.analyses_table.bal_conds_seg_rates == True].index)
for subject in self.subjects:
subject_info = SubjectInfo(subject)
for session in subject_info.sessions:
if session in valid_sessions:
session_info = SubjectSessionInfo(subject, session)
n_session_units = session_info.n_units
if n_session_units > 0:
try:
session_zone_rate_comp_table = session_info.get_bal_conds_seg_rates(segment_type=segment_type)
comp_table_columns = session_zone_rate_comp_table.columns
session_table = pd.DataFrame(index=np.arange(n_session_units),
columns=['unit_id', 'subject', 'session',
'session_pct_cov', 'session_valid',
'session_unit_id', 'unit_type', 'tt', 'tt_cl',
'cl_name'])
session_table['session'] = session
session_table['subject'] = session_info.subject
session_table['session_unit_id'] = np.arange(n_session_units)
session_table['unit_id'] = np.arange(n_session_units) + unit_count
session_table['unit_type'] = [v[0] for k, v in session_info.cluster_ids.items()]
session_table['tt'] = [v[1] for k, v in session_info.cluster_ids.items()]
session_table['tt_cl'] = [v[2] for k, v in session_info.cluster_ids.items()]
if session in sessions_validity.columns:
session_table['session_pct_cov'] = sessions_validity[session]
session_table['session_valid'] = 1
else:
session_table['session_pct_cov'] = 0
session_table['session_valid'] = 0
cl_names = []
for k, v in session_info.cluster_ids.items():
tt = v[1]
cl = v[2]
depth = subject_info.sessions_tt_positions.loc[session, f"tt_{tt}"]
cl_name = f"{session}-tt{tt}_d{depth}_cl{cl}"
cl_names.append(cl_name)
session_table['cl_name'] = cl_names
unit_count += n_session_units
session_table = session_table.join(session_zone_rate_comp_table)
except:
print(f'Error Processing Session {session}')
traceback.print_exc(file=sys.stdout)
continue
seg_rates = seg_rates.append(session_table)
seg_rates = seg_rates.reset_index(drop=True)
seg_rates.to_csv(fn)
else:
seg_rates = pd.read_csv(fn, index_col=0)
return seg_rates
def get_zone_rates_remap(self, overwrite=False):
if not self.paths['zone_rates_remap'].exists() or overwrite:
sessions_validity = self.get_track_validity_table()
zone_rates = pd.DataFrame()
unit_count = 0
valid_sessions = list(self.analyses_table.loc[self.analyses_table.zone_rates_comps == True].index)
for subject in self.subjects:
subject_info = SubjectInfo(subject)
for session in subject_info.sessions:
if session in valid_sessions:
session_info = SubjectSessionInfo(subject, session)
n_session_units = session_info.n_units
if n_session_units > 0:
try:
session_zone_rate_comp_table = session_info.get_zone_rates_remap()
comp_table_columns = session_zone_rate_comp_table.columns
session_table = pd.DataFrame(index=np.arange(n_session_units),
columns=['unit_id', 'subject', 'session',
'session_pct_cov', 'session_valid',
'session_unit_id', 'unit_type', 'tt', 'tt_cl',
'cl_name'])
session_table['session'] = session
session_table['subject'] = session_info.subject
session_table['session_unit_id'] = np.arange(n_session_units)
session_table['unit_id'] = np.arange(n_session_units) + unit_count
session_table['unit_type'] = [v[0] for k, v in session_info.cluster_ids.items()]
session_table['tt'] = [v[1] for k, v in session_info.cluster_ids.items()]
session_table['tt_cl'] = [v[2] for k, v in session_info.cluster_ids.items()]
if session in sessions_validity.columns:
session_table['session_pct_cov'] = sessions_validity[session]
session_table['session_valid'] = 1
else:
session_table['session_pct_cov'] = 0
session_table['session_valid'] = 0
cl_names = []
for k, v in session_info.cluster_ids.items():
tt = v[1]
cl = v[2]
depth = subject_info.sessions_tt_positions.loc[session, f"tt_{tt}"]
cl_name = f"{session}-tt{tt}_d{depth}_cl{cl}"
cl_names.append(cl_name)
session_table['cl_name'] = cl_names
unit_count += n_session_units
session_table = session_table.join(session_zone_rate_comp_table)
except:
print(f'Error Processing Session {session}')
traceback.print_exc(file=sys.stdout)
continue
zone_rates = zone_rates.append(session_table)
zone_rates = zone_rates.reset_index(drop=True)
zone_rates.to_csv(self.paths['zone_rates_remap'])
else:
zone_rates = pd.read_csv(self.paths['zone_rates_remap'], index_col=0)
return zone_rates
def get_of_results(self, overwrite=False):
curate_flag = False
# get metrics
if not self.paths['of_metric_scores'].exists() or overwrite:
metric_scores = self._get_of_metric_scores()
curate_flag = True
else:
metric_scores = pd.read_csv(self.paths['of_metric_scores'], index_col=0)
# get models
if not self.paths['of_model_scores'].exists() or overwrite:
model_scores = self._get_of_models_scores()
curate_flag = True
else:
model_scores = pd.read_csv(self.paths['of_model_scores'], index_col=0)
if curate_flag:
metric_scores, model_scores = self._match_unit_ids(metric_scores, model_scores)
for session in self.invalid_sessions:
unit_idx = self.unit_table[self.unit_table.session == session].unique_cl_name
metric_scores.loc[metric_scores.cl_name.isin(unit_idx), 'session_valid'] = False
model_scores.loc[model_scores.cl_name.isin(unit_idx), 'session_valid'] = False
metric_scores.to_csv(self.paths['of_metric_scores'])
model_scores.to_csv(self.paths['of_model_scores'])
return metric_scores, model_scores
def _get_of_metric_scores(self, overwrite=False):
if not self.paths['of_metric_scores'].exists() or overwrite:
analyses = ['speed', 'hd', 'border', 'grid', 'stability']
output_scores_names = ['score', 'sig']
n_analyses = len(analyses)
unit_count = 0
metric_scores = pd.DataFrame()
for subject in self.subjects:
subject_info = SubjectInfo(subject)
for session in subject_info.sessions:
if 'OF' in session:
session_info = SubjectSessionInfo(subject, session)
if session_info.n_units > 0:
temp = session_info.get_scores()
session_scores = pd.DataFrame(index=np.arange(session_info.n_units * n_analyses),
columns=['unit_id', 'subject', 'session',
'session_pct_cov', 'session_valid',
'session_unit_id', 'unit_type', 'tt', 'tt_cl',
'cl_name', 'analysis_type',
'score', 'sig', ])
session_scores['analysis_type'] = np.repeat(np.array(analyses), session_info.n_units)
session_scores['session'] = session_info.session
session_scores['subject'] = session_info.subject
session_scores['session_unit_id'] = np.tile(np.arange(session_info.n_units), n_analyses)
session_scores['unit_id'] = np.tile(np.arange(session_info.n_units),
n_analyses) + unit_count
session_scores['unit_type'] = [v[0] for k, v in
session_info.cluster_ids.items()] * n_analyses
session_scores['tt'] = [v[1] for k, v in session_info.cluster_ids.items()] * n_analyses
session_scores['tt_cl'] = [v[2] for k, v in
session_info.cluster_ids.items()] * n_analyses
behav = session_info.get_track_data()
# noinspection PyTypeChecker
coverage = np.around(behav['pos_valid_mask'].mean(), 2)
session_scores['session_pct_cov'] = coverage
session_scores['session_valid'] = coverage >= self.min_pct_coverage
cl_names = []
for k, v in session_info.cluster_ids.items():
tt = v[1]
cl = v[2]
depth = subject_info.sessions_tt_positions.loc[session, f"tt_{tt}"]
cl_name = f"{session}-tt{tt}_d{depth}_cl{cl}"
cl_names.append(cl_name)
session_scores['cl_name'] = cl_names * n_analyses
unit_count += session_info.n_units
try:
for ii, analysis in enumerate(analyses):
indices = np.arange(session_info.n_units) + ii * session_info.n_units
session_scores.at[indices, 'sig'] = temp[analysis + '_sig'].values
if analysis == 'stability':
session_scores.at[indices, 'score'] = temp[analysis + '_corr'].values
else:
session_scores.at[indices, 'score'] = temp[analysis + '_score'].values
except:
print(f'Error Processing Session {session}')
traceback.print_exc(file=sys.stdout)
pass
session_scores[output_scores_names] = session_scores[output_scores_names].astype(float)
metric_scores = metric_scores.append(session_scores)
metric_scores = metric_scores.reset_index(drop=True)
metric_scores.to_csv(self.paths['of_metric_scores'])
else:
metric_scores = pd.read_csv(self.paths['of_metric_scores'], index_col=0)
return metric_scores
def _get_of_models_scores(self):
models = ['speed', 'hd', 'border', 'grid', 'pos', 'agg_all', 'agg_sdp', 'agg_sdbg']
metrics = ['r2', 'map_r', 'n_err', 'coef', 'agg_all_coef', 'agg_sdbg_coef', 'agg_sdp_coef']
splits = ['train', 'test']
unit_count = 0
model_scores = pd.DataFrame()
for subject in self.subjects:
subject_info = SubjectInfo(subject)
for session in subject_info.sessions:
if 'OF' in session:
session_info = SubjectSessionInfo(subject, session)
n_session_units = session_info.n_units
if n_session_units > 0:
try:
temp = session_info.get_encoding_models_scores()
if temp.empty:
continue
# noinspection PyTypeChecker
mask = (temp['metric'].isin(metrics)) & (temp['model'].isin(models))
session_models_scores = pd.DataFrame(index=range(mask.sum()),
columns=['unit_id', 'subject', 'session',
'session_unit_id',
'unit_type', 'session_pct_cov',
'session_valid',
'tt', 'tt_cl', 'model', 'split', 'metric',
'value'])
session_models_scores.loc[:, ['model', 'split', 'metric', 'value']] = \
temp.loc[mask, ['model', 'split', 'metric', 'value']].values
session_models_scores['session'] = session_info.session
session_models_scores['subject'] = session_info.subject
session_models_scores['session_unit_id'] = temp.loc[mask, 'unit_id'].values
session_models_scores['unit_id'] = session_models_scores['session_unit_id'] + unit_count
for session_unit_id, cluster_info in session_info.cluster_ids.items():
mask = session_models_scores.session_unit_id == int(session_unit_id)
tt = cluster_info[1]
cl = cluster_info[2]
depth = subject_info.sessions_tt_positions.loc[session, f"tt_{tt}"]
cl_name = f"{session}-tt{tt}_d{depth}_cl{cl}"
session_models_scores.loc[mask, 'unit_type'] = cluster_info[0]
session_models_scores.loc[mask, 'tt'] = tt
session_models_scores.loc[mask, 'tt_cl'] = cl
session_models_scores.loc[mask, 'cl_name'] = cl_name
behav = session_info.get_track_data()
# noinspection PyTypeChecker
coverage = np.around(behav['pos_valid_mask'].mean(), 2)
session_models_scores['session_pct_cov'] = coverage
session_models_scores['session_valid'] = coverage >= self.min_pct_coverage
#
model_scores = model_scores.append(session_models_scores)
unit_count += n_session_units
except ValueError:
traceback.print_exc(file=sys.stdout)
pass
#
model_scores = model_scores.reset_index(drop=True)
model_scores = model_scores.astype({"value": float})
model_scores = model_scores.to_csv(self.paths['of_model_scores'])
return model_scores
def _match_unit_ids(self, metric_scores, model_scores):
session_unit_id_array = metric_scores[['session', 'session_unit_id']].values
session_unit_id_tuple = [tuple(ii) for ii in session_unit_id_array]
sid_2_uid = {}
uid_2_sid = {}
used_ids = []
unique_id_cnt = 0
for suid in session_unit_id_tuple:
if not suid in used_ids:
sid_2_uid[suid] = unique_id_cnt
uid_2_sid[unique_id_cnt] = suid
unique_id_cnt += 1
used_ids += [suid]
session_unit_id_array = model_scores[['session', 'session_unit_id']].values
session_unit_id_tuple = [tuple(ii) for ii in session_unit_id_array]
model_scores['unit_id'] = [sid_2_uid[suid] for suid in session_unit_id_tuple]
metric_scores.to_csv(self.paths['of_metric_scores'])
model_scores = model_scores.to_csv(self.paths['of_model_scores'])
return metric_scores, model_scores
def get_unit_table(self, overwrite=False):
if not self.paths['units'].exists() or overwrite:
raise NotImplementedError
else:
unit_table = pd.read_csv(self.paths['units'], index_col=0)
return unit_table
def plot(self, fig_id, save=False, dpi=1000, root_dir=None, fig_format='jpg'):
if fig_id == 'f1':
f1 = pf.Fig1()
f = f1.plot_all()
else:
return
if save:
fn = f"{fig_id}.{fig_format}"
if root_dir is None:
f.savefig(self.paths['figures'] / fn, dpi=dpi, bbox_inches='tight')
else:
if root_dir in self._root_paths.keys():
paths = self._get_paths(self._root_paths[root_dir])
f.savefig(paths['figures'] / fn, dpi=dpi, bbox_inches='tight')
return f
class SubjectInfo:
def __init__(self, subject, sorter='KS2', data_root='BigPC', overwrite=False, time_step=0.02,
samp_rate=32000, n_tetrodes=16, fr_temporal_smoothing=0.125, spk_outlier_thr=None,
overwrite_cluster_stats=False, overwrite_session_clusters=False):
subject = str(subject.title())
self.subject = subject
self.sorter = sorter
self.params = {'time_step': time_step, 'samp_rate': samp_rate, 'n_tetrodes': n_tetrodes,
'fr_temporal_smoothing': fr_temporal_smoothing, 'spk_outlier_thr': spk_outlier_thr,
'spk_recording_buffer': 3}
self.tetrodes = np.arange(n_tetrodes, dtype=int) + 1
if data_root == 'BigPC':
if subject in ['Li', 'Ne']:
self.root_path = Path('/mnt/Data1_SSD2T/Data')
elif subject in ['Cl']:
self.root_path = Path('/mnt/Data2_SSD2T/Data')
elif subject in ['Ca', 'Mi', 'Al']:
self.root_path = Path('/mnt/Data3_SSD2T/Data')
self.raw_path = Path('/mnt/Raw_Data/Data', subject)
elif data_root == 'oak':
self.root_path = Path('/mnt/o/giocomo/alexg/')
self.raw_path = self.root_path / 'RawData/InVivo' / subject
# self.sorted_path = self.root_path / 'Clustered' / subject
# self.results_path = self.root_path / 'Analyses' / subject
else:
self.root_path = Path(data_root)
self.raw_path = self.root_path / 'Raw_Data' / subject
self.preprocessed_path = self.root_path / 'PreProcessed' / subject
self.sorted_path = self.root_path / 'Sorted' / subject
self.results_path = self.root_path / 'Results' / subject
self.subject_info_file = self.results_path / ('subject_info_{}_{}.pkl'.format(sorter, subject))
# check if instance of DataPaths for subject and sorter exists already
if self.subject_info_file.exists() and not overwrite:
self.load_subject_info()
else:
# get channel table
self._channel_table_file = self.preprocessed_path / ('chan_table_{}.csv'.format(subject))
if not self._channel_table_file.exists():
_task_fn = self.preprocessed_path / 'TasksDir' / f"pp_table_{self.subject}.json"
if _task_fn.exists():
with _task_fn.open(mode='r') as f:
_task_table = json.load(f)
pp_funcs.post_process_channel_table(self.subject, _task_table)
else:
sys.exit(f"Error. Task table for pre-processing does not exists: {_task_fn}")
self.channel_table = pd.read_csv(self._channel_table_file, index_col=0)
# get sessions from channel table information
self.sessions = list(self.channel_table.index)
self.n_sessions = len(self.sessions)
self.session_paths = {}
for session in self.sessions:
self.session_paths[session] = self._session_paths(session)
# get cluster information
try:
if overwrite_cluster_stats:
# overwrite cluster stats & clusters tables
self.update_clusters()
else:
# load tables
self.session_clusters = self.get_session_clusters(overwrite=overwrite_session_clusters)
self.sort_tables = self.get_sort_tables(overwrite=overwrite_session_clusters)
except:
print("Error obtaining clusters.")
print(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2].tb_lineno)
traceback.print_exc(file=sys.stdout)
# get tetrode depths & match sessions
self.sessions_tt_positions = self.get_sessions_tt_position()
self.tt_depth_match = self.get_tetrode_depth_match()
# hack because get sessions analyses calls subjects info before it saves,
# so need to save it first. alternative is to feed subject session info the subject_info object instead.
# TO DO #
self.save_subject_info()
# check analyses table
self.analyses_table = self.get_sessions_analyses()
self.valid_track_table = self.check_track_data_validty()
self.save_subject_info()
def load_subject_info(self):
with self.subject_info_file.open(mode='rb') as f:
loaded_self = pickle.load(f)
self.__dict__.update(loaded_self.__dict__)
return self
def save_subject_info(self):
with self.subject_info_file.open(mode='wb') as f:
pickle.dump(self, f, protocol=pickle.HIGHEST_PROTOCOL)
def get_sessions_analyses(self):
analyses_table = pd.DataFrame()
for session in self.sessions:
session_info = SubjectSessionInfo(self.subject, session)
analyses_table = analyses_table.append(session_info.session_analyses_table)
analyses_table.fillna(-1, inplace=True)
return analyses_table
def check_track_data_validty(self):
df = pd.DataFrame(index=self.sessions, columns=['task', 'validity'])
for session in self.sessions:
if self.analyses_table.loc[session, 'track_data'] == 1:
session_info = SubjectSessionInfo(self.subject, session)
df.loc[session, 'task'] = session_info.task
df.loc[session, 'validity'] = session_info.check_track_data_validity()
return df
# tetrode methods
def update_clusters(self):
self.session_clusters = self.get_session_clusters(overwrite=True)
self.sort_tables = self.get_sort_tables(overwrite=True)
self.save_subject_info()
def get_sessions_tt_position(self):
p = Path(self.results_path / f"{self.subject}_tetrodes.csv")
if p.exists():
tt_pos = pd.read_csv(p)
tt_pos['Date'] = pd.to_datetime(tt_pos['Date']).dt.strftime('%m%d%y')
tt_pos = tt_pos.set_index('Date')
tt_pos = tt_pos[['TT' + str(tt) + '_overall' for tt in self.tetrodes]]
session_dates = {session: session.split('_')[2] for session in self.sessions}
sessions_tt_pos = pd.DataFrame(index=self.sessions, columns=['tt_' + str(tt) for tt in self.tetrodes])
tt_pos_dates = tt_pos.index
prev_date = tt_pos_dates[0]
for session in self.sessions:
date = session_dates[session]
# below if is to correct for incorrect session dates for Cl
if (date in ['010218', '010318', '010418']) & (self.subject == 'Cl'):
date = date[:5] + '9'
# this part accounts for missing dates by assigning it to the previous update
if date in tt_pos_dates:
sessions_tt_pos.loc[session] = tt_pos.loc[date].values
prev_date = str(date)
else:
sessions_tt_pos.loc[session] = tt_pos.loc[prev_date].values
return sessions_tt_pos
else:
print(f"Tetrode depth table not found at '{str(p)}'")
return None
def get_depth_wf(self):
raise NotImplementedError
def get_session_tt_wf(self, session, tt, cluster_ids=None, wf_lims=None, n_wf=200):
import Sorting.sort_functions as sort_funcs
if wf_lims is None:
wf_lims = [-12, 20]
tt_str = 'tt_' + str(tt)
_sort_path = Path(self.session_paths[session]['Sorted'], tt_str, self.sorter)
_cluster_spike_time_fn = _sort_path / 'spike_times.npy'
_cluster_spike_ids_fn = _sort_path / 'spike_clusters.npy'
_hp_data_fn = _sort_path / 'recording.dat'
if _hp_data_fn.exists():
hp_data = sort_funcs.load_hp_binary_data(_hp_data_fn)
else: # filter data
hp_data = self._spk_filter_data(session, tt)
spike_times = np.load(_cluster_spike_time_fn)
spike_ids = np.load(_cluster_spike_ids_fn)
wf_samps = np.arange(wf_lims[0], wf_lims[1])
if cluster_ids is None:
cluster_ids = np.unique(spike_ids)
n_clusters = len(cluster_ids)
out = np.zeros((n_clusters, n_wf, len(wf_samps) * 4), dtype=np.float16)
for cl_idx, cluster in enumerate(cluster_ids):
cl_spk_times = spike_times[spike_ids == cluster]
n_cl_spks = len(cl_spk_times)
if n_wf == 'all':
sampled_spikes = cl_spk_times
elif n_wf > n_cl_spks:
# Note that if number of spikes < n_wf, spikes will be repeated such that sampled_spikes has n_wf
sampled_spikes = cl_spk_times[np.random.randint(n_cl_spks, size=n_wf)]
else: # sample from spikes
sampled_spikes = cl_spk_times[np.random.choice(np.arange(n_cl_spks), size=n_wf, replace=False)]
for wf_idx, samp_spk in enumerate(sampled_spikes):
out[cl_idx, wf_idx] = hp_data[:, wf_samps + samp_spk].flatten()
return out
def get_session_clusters(self, overwrite=False):
_clusters_file = self.sorted_path / ('clusters_{}_{}.json'.format(self.sorter, self.subject))
if _clusters_file.exists() and not overwrite: # load
with _clusters_file.open(mode='r') as f:
session_clusters = json.load(f)
else: # create
session_clusters = {}
for session in self.sessions:
self._cluster_stats(session)
session_clusters[session] = self._session_clusters(session)
try:
with _clusters_file.open(mode='w') as f:
json.dump(session_clusters, f, indent=4)
except TypeError:
print(session)
return session_clusters
# cluster matching methods
def get_tetrode_depth_match(self):
tt_pos = self.sessions_tt_positions
try:
tt_depth_matchs = {tt: {} for tt in self.tetrodes}
for tt in self.tetrodes:
tt_str = 'tt_' + str(tt)
tt_depths = tt_pos[tt_str].unique()
for depth in tt_depths:
tt_depth_matchs[tt][depth] = list(tt_pos[tt_pos[tt_str] == depth].index)
return tt_depth_matchs
except:
print("Error Matching Sessions based on tetrode depth")
print(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2].tb_lineno)
traceback.print_exc(file=sys.stdout)
return None
def get_session_match_analysis(self):
# # determine sessions/tt to match based on depth
# matching_analyses = []
# for tt in np.arange(1, 17):
# tt_depths = list(self.tt_depth_match[tt].keys())
#
# for tt_d in tt_depths:
# tt_d_sessions = self.tt_depth_match[tt][tt_d]
# # check if there are more 2 or more sessions with units
# n_cells_session = np.zeros(len(tt_d_sessions), dtype=int)
#
# for ii, session in enumerate(tt_d_sessions):
# session_cell_ids = self.session_clusters[session]['cell_IDs']
# if tt in session_cell_ids.keys():
# n_cells_session[ii] = len(session_cell_ids[tt])
# sessions_with_cells = np.where(n_cells_session > 0)[0]
#
# if len(sessions_with_cells) >= 2:
# n_units = n_cells_session[sessions_with_cells].sum()
# matching_analyses.append((tt, tt_d, np.array(tt_d_sessions)[sessions_with_cells].tolist(),
# n_units, n_cells_session[sessions_with_cells].tolist()))
## version as a dict ##
matching_analyses = {}
cnt = 0
for tt in np.arange(1, 17):
tt_depths = list(self.tt_depth_match[tt].keys())
for tt_d in tt_depths:
tt_d_sessions = self.tt_depth_match[tt][tt_d]
# check if there are more 2 or more sessions with units
n_cells_session = np.zeros(len(tt_d_sessions), dtype=int)
for ii, session in enumerate(tt_d_sessions):
session_cell_ids = self.session_clusters[session]['cell_IDs']
if tt in session_cell_ids.keys():
n_cells_session[ii] = len(session_cell_ids[tt])
sessions_with_cells = np.where(n_cells_session > 0)[0]
n_units = n_cells_session[sessions_with_cells].sum()
if len(sessions_with_cells) >= 1:
matching_analyses[cnt] = {'tt': tt, 'd': tt_d, 'n_units': n_units,
'sessions': np.array(tt_d_sessions)[sessions_with_cells].tolist(),
'n_session_units': n_cells_session[sessions_with_cells].tolist()}
cnt += 1
return matching_analyses
def get_cluster_dists(self, overwrite=False, **kwargs):
import Analyses.cluster_match_functions as cmf
params = {'dim_reduc_method': 'umap', 'n_wf': 1000, 'zscore_wf': True}
params.update(kwargs)
cl_dists_fn = self.results_path / f"cluster_dists.pickle"
if not cl_dists_fn.exists() or overwrite:
matching_analyses = self.get_session_match_analysis()
n_wf = params['n_wf']
dim_reduc_method = params['dim_reduc_method']
n_samps = 32 * 4
cluster_dists = {k: {} for k in np.arange(len(matching_analyses))}
for analysis_id, analysis in matching_analyses.items():
tt, d, sessions = analysis['tt'], analysis['d'], analysis['sessions']
n_units, n_session_units = analysis['n_units'], analysis['n_session_units']
# Obtain cluster labels & mapping between labels [this part can be improved]
cl_names = []
for session_num, session in enumerate(sessions):
cluster_ids = self.session_clusters[session]['cell_IDs'][tt]
for cl_num, cl_id in enumerate(cluster_ids):
cl_name = f"{session}-tt{tt}_d{d}_cl{cl_id}"
cl_names.append(cl_name)
# load waveforms
X = np.empty((0, n_wf, n_samps), dtype=np.float16)
for session in sessions:
cluster_ids = self.session_clusters[session]['cell_IDs'][tt]
session_cell_wf = self.get_session_tt_wf(session, tt, cluster_ids=cluster_ids, n_wf=n_wf)
X = np.concatenate((X, session_cell_wf), axis=0)
if params['zscore_wf']:
X = robust_zscore(X, axis=2)
X[np.isnan(X)] = 0
X[np.isinf(X)] = 0
# Obtain cluster label namess
clusters_label_num = np.arange(n_units).repeat(n_wf)
# Reduce dims
X_2d = cmf.dim_reduction(X.reshape(-1, X.shape[-1]), method=dim_reduc_method)
# compute covariance and location
clusters_loc, clusters_cov = cmf.get_clusters_moments(data=X_2d, labels=clusters_label_num)
# compute distance metrics
dist_mats = cmf.get_clusters_all_dists(clusters_loc, clusters_cov, data=X_2d, labels=clusters_label_num)
# create data frames with labeled cluster names
dists_mats_df = {}
for metric, dist_mat in dist_mats.items():
dists_mats_df[metric] = pd.DataFrame(dist_mat, index=cl_names, columns=cl_names)
# store
clusters_loc = {k: v for k, v in zip(cl_names, clusters_loc)}
clusters_cov = {k: v for k, v in zip(cl_names, clusters_cov)}
cluster_dists[analysis_id] = {'analysis': analysis, 'cl_names': cl_names,
'clusters_loc': clusters_loc, 'clusters_cov': clusters_cov,
'dists_mats': dists_mats_df}
print(".", end="")
with cl_dists_fn.open(mode='wb') as f:
pickle.dump(cluster_dists, f, protocol=pickle.HIGHEST_PROTOCOL)
else:
with cl_dists_fn.open(mode='rb') as f:
cluster_dists = pickle.load(f)
return cluster_dists
def match_clusters(self, overwrite=False, require_subsets=True, **kwargs):
import Analyses.cluster_match_functions as cmf
params = {'dist_metric': 'pe', 'dist_metric_thr': 0.5, 'select_lower': True}
params.update(kwargs)
dist_metric = params['dist_metric']
dist_metric_thr = params['dist_metric_thr']
select_lower = params['select_lower']
if require_subsets: # rs -> require subsets, conservative in grouping clusters
cl_match_results_fn = self.results_path / f"cluster_matches_rs_{params['dist_metric']}.pickle"
else: # nrs -> doesn't require subsets, results in more sessions being grouped
cl_match_results_fn = self.results_path / f"cluster_matches_nrs_{params['dist_metric']}.pickle"
if not cl_match_results_fn.exists() or overwrite:
cluster_dists = self.get_cluster_dists()
matching_analyses = self.get_session_match_analysis()
# [cluster_dists[k]['analysis'] for k in cluster_dists.keys()]
cluster_match_results = {k: {} for k in np.arange(len(matching_analyses))}
for analysis_id, analysis in matching_analyses.items():
dist_mat = cluster_dists[analysis_id]['dists_mats'][dist_metric]
matches_dict = cmf.find_session_cl_matches(dist_mat, thr=dist_metric_thr,
session_cl_sep="-", select_lower=select_lower)
unique_matches_sets, unique_matches_dict = \
cmf.matches_dict_to_unique_sets(matches_dict, dist_mat, select_lower=select_lower,
require_subsets=require_subsets)
cluster_match_results[analysis_id] = {'analysis': analysis,
'matches_dict': unique_matches_dict,
'matches_sets': unique_matches_sets
}
with cl_match_results_fn.open(mode='wb') as f:
pickle.dump(cluster_match_results, f, protocol=pickle.HIGHEST_PROTOCOL)
else:
with cl_match_results_fn.open(mode='rb') as f:
cluster_match_results = pickle.load(f)
return cluster_match_results
# sort/unit tables methods
def get_sort_tables(self, overwrite=False):
_sort_table_ids = ['tt', 'valid', 'curated', 'summary']
_sort_table_files = {ii: Path(self.sorted_path, 'sort_{}_{}_{}'.format(ii, self.sorter, self.subject))
for ii in _sort_table_ids}
if _sort_table_files['summary'].exists() and not overwrite:
sort_tables = {ii: [] for ii in _sort_table_ids}
for ii in _sort_table_ids:
sort_tables[ii] = pd.read_csv(_sort_table_files[ii], index_col=0)
else:
sort_tables = self._sort_tables()
for ii in _sort_table_ids:
sort_tables[ii].to_csv(_sort_table_files[ii])
return sort_tables
def get_units_table(self, overwrite=False):
units_table_fn = self.results_path / f"units_table.csv"
if not units_table_fn.exists() or overwrite:
n_total_units = 0
for session in self.sessions:
n_total_units += self.session_clusters[session]['n_cell']
n_total_units += self.session_clusters[session]['n_mua']
subject_units_table = pd.DataFrame(index=np.arange(n_total_units),
columns=["subject_cl_id", "subject", "session", "task", "date",
"subsession", "tt", "depth", "unique_cl_name",
"session_cl_id", "unit_type", "n_matches_con",
"subject_cl_match_con_id", "n_matches_lib",
"subject_cl_match_lib_id",
"snr", "fr", "isi_viol_rate"])
subject_units_table["subject"] = self.subject
subject_cl_matches_con = self.match_clusters()
matches_con_sets = {}
matches_con_set_num = {}
matches_con_dict = {}
cnt = 0
for k, cma in subject_cl_matches_con.items():
matches_con_sets.update({cnt + ii: clx_set for ii, clx_set in enumerate(cma['matches_sets'])})
cnt = len(matches_con_sets)
matches_con_dict.update(cma['matches_dict'])
# creates a dict indexing each session to a set number
for set_num, clm_set in matches_con_sets.items():
for cl in clm_set:
matches_con_set_num[cl] = set_num
subject_cl_matches_lib = self.match_clusters(require_subsets=False)
matches_lib_sets = {}
matches_lib_set_num = {}
matches_lib_dict = {}
cnt = 0
for k, cma in subject_cl_matches_lib.items():
matches_lib_sets.update({cnt + ii: clx_set for ii, clx_set in enumerate(cma['matches_sets'])})
cnt = len(matches_lib_sets)
matches_lib_dict.update(cma['matches_dict'])
for set_num, clm_set in matches_lib_sets.items():
for cl in clm_set:
matches_lib_set_num[cl] = set_num
try:
unit_cnt = 0
for session in self.sessions:
session_details = session.split("_")
if len(session_details) > 3:
subsession = session_details[3]
else:
subsession = "0000"
session_clusters = self.session_clusters[session]
n_session_cells = session_clusters['n_cell']
n_session_mua = session_clusters['n_mua']
n_session_units = n_session_cells + n_session_mua
session_unit_idx = np.arange(n_session_units) + unit_cnt
subject_units_table.loc[session_unit_idx, "subject_cl_id"] = session_unit_idx
subject_units_table.loc[session_unit_idx, "session"] = session
subject_units_table.loc[session_unit_idx, "task"] = session_details[1]
subject_units_table.loc[session_unit_idx, "date"] = session_details[2]
subject_units_table.loc[session_unit_idx, "subsession"] = subsession
for unit_type in ['cell', 'mua']:
for tt, tt_clusters in session_clusters[f'{unit_type}_IDs'].items():
if len(tt_clusters) > 0:
depth = self.sessions_tt_positions.loc[session, f"tt_{tt}"]
for cl in tt_clusters:
cl_name = f"{session}-tt{tt}_d{depth}_cl{cl}"
subject_units_table.loc[unit_cnt, "subject_cl_id"] = unit_cnt
subject_units_table.loc[unit_cnt, "unique_cl_name"] = cl_name
subject_units_table.loc[unit_cnt, "tt"] = tt
subject_units_table.loc[unit_cnt, "depth"] = depth
subject_units_table.loc[unit_cnt, "unit_type"] = unit_type
subject_units_table.loc[unit_cnt, "session_cl_id"] = cl
subject_units_table.loc[unit_cnt, "snr"] = session_clusters["clusters_snr"][tt][cl]
subject_units_table.loc[unit_cnt, "fr"] = session_clusters["clusters_fr"][tt][cl]
subject_units_table.loc[unit_cnt, "isi_viol_rate"] = \
session_clusters["clusters_isi_viol_rate"][tt][cl]
if unit_type == 'cell':
# add fields of conservative cluster matching (requires subset)
if cl_name in matches_con_dict.keys():
cl_matches = matches_con_dict[cl_name][0]
subject_units_table.loc[unit_cnt, "n_matches_con"] = len(cl_matches)
subject_units_table.loc[unit_cnt, "subject_cl_match_con_id"] = \
matches_con_set_num[cl_name]
# add fields of liberal cluster matching ( does not require subset matching)
if cl_name in matches_lib_dict.keys():
cl_matches = matches_lib_dict[cl_name][0]
subject_units_table.loc[unit_cnt, "n_matches_lib"] = len(cl_matches)
subject_units_table.loc[unit_cnt, "subject_cl_match_lib_id"] = \
matches_lib_set_num[cl_name]
unit_cnt += 1
except:
print(session, tt, cl)
traceback.print_exc(file=sys.stdout)
subject_units_table.to_csv(units_table_fn)
else:
subject_units_table = pd.read_csv(units_table_fn, index_col=0)
return subject_units_table
# private methods
def _spk_filter_data(self, session, tt):
tt_str = 'tt_' + str(tt)
sos, _ = pp_funcs.get_sos_filter_bank(['Sp'], fs=self.params['samp_rate'])
sig = np.load(self.session_paths[session]['PreProcessed'] / (tt_str + '.npy'))
hp_data = np.zeros_like(sig)
for ch in range(4):
hp_data[ch] = signal.sosfiltfilt(sos, sig[ch])
return hp_data
def _session_paths(self, session):
time_step = self.params['time_step']
samp_rate = self.params['samp_rate']
tmp = session.split('_')
subject = tmp[0]
task = tmp[1]
date = tmp[2]
paths = {'session': session, 'subject': subject, 'task': task, 'date': date, 'step': time_step, 'SR': samp_rate,
'Sorted': self.sorted_path / session, 'Raw': self.raw_path / session,
'PreProcessed': self.preprocessed_path / session, 'Results': self.results_path / session}
paths['Results'].mkdir(parents=True, exist_ok=True)
paths['behav_track_data'] = paths['Results'] / ('behav_track_data{}ms.pkl'.format(int(time_step * 1000)))
# these paths are mostly legacy
paths['Spike_IDs'] = paths['Results'] / 'Spike_IDs.json'
for ut in ['Cell', 'Mua']:
paths[ut + '_wf_info'] = paths['Results'] / (ut + '_wf_info.pkl')
paths[ut + '_Spikes'] = paths['Results'] / (ut + '_Spikes.json')
paths[ut + '_WaveForms'] = paths['Results'] / (ut + '_WaveForms.pkl')
paths[ut + '_Bin_Spikes'] = paths['Results'] / ('{}_Bin_Spikes_{}ms.npy'.format(ut, int(time_step * 1000)))
paths[ut + '_FR'] = paths['Results'] / ('{}_FR_{}ms.npy'.format(ut, int(time_step * 1000)))
paths['cluster_spikes'] = paths['Results'] / 'spikes.npy'
paths['cluster_spikes_ids'] = paths['Results'] / 'spikes_ids.json'
paths['cluster_wf_info'] = paths['Results'] / 'wf_info.pkl'
paths['cluster_binned_spikes'] = paths['Results'] / f'binned_spikes_{int(time_step * 1000)}ms.npy'
paths['cluster_fr'] = paths['Results'] / 'fr.npy'
paths['cluster_spike_maps'] = paths['Results'] / 'spike_maps.npy'
paths['cluster_fr_maps'] = paths['Results'] / 'maps.npy'
if task == 'OF':
paths['cluster_OF_metrics'] = paths['Results'] / 'OF_metrics.csv'
paths['cluster_OF_encoding_models'] = paths['Results'] / 'OF_encoding.csv'
paths['cluster_OF_encoding_agg_coefs'] = paths['Results'] / 'OF_encoding_agg_coefs.csv'
else:
paths['trial_table'] = paths['Results'] / 'trial_table.csv'
paths['event_table'] = paths['Results'] / 'event_table.csv'
paths['track_table'] = paths['Results'] / 'track_table.csv'
paths['event_time_series'] = paths['Results'] / 'event_time_series.csv'
paths['not_valid_pos_samps'] = paths['Results'] / 'not_valid_pos_samps.npy'
paths['pos_zones'] = paths['Results'] / 'pos_zones.npy'
paths['pos_zones_invalid_samps'] = paths['Results'] / 'pos_zones_invalid_samps.npy'
paths['trial_zone_rates'] = paths['Results'] / 'trial_zone_rates.npy'
paths['zone_rates_comps'] = paths['Results'] / 'zone_rates_comps.csv'
paths['zone_rates_remap'] = paths['Results'] / 'zone_rates_remap.csv'
paths['bal_conds_seg_rates'] = paths['Results'] / 'bal_conds_seg_rates.csv'
paths['zone_analyses'] = paths['Results'] / 'ZoneAnalyses.pkl'
paths['TrialInfo'] = paths['Results'] / 'TrInfo.pkl'
paths['TrialCondMat'] = paths['Results'] / 'TrialCondMat.csv'
paths['TrLongPosMat'] = paths['Results'] / 'TrLongPosMat.csv'
paths['TrLongPosFRDat'] = paths['Results'] / 'TrLongPosFRDat.csv'
paths['TrModelFits2'] = paths['Results'] / 'TrModelFits2.csv'
paths['CueDesc_SegUniRes'] = paths['Results'] / 'CueDesc_SegUniRes.csv'
paths['CueDesc_SegDecRes'] = paths['Results'] / 'CueDesc_SegDecRes.csv'
paths['CueDesc_SegDecSumRes'] = paths['Results'] / 'CueDesc_SegDecSumRes.csv'
paths['PopCueDesc_SegDecSumRes'] = paths['Results'] / 'PopCueDesc_SegDecSumRes.csv'
# plots directories
# paths['Plots'] = paths['Results'] / 'Plots'
# # paths['Plots'].mkdir(parents=True, exist_ok=True)
# paths['SampCountsPlots'] = paths['Plots'] / 'SampCountsPlots'
# # paths['SampCountsPlots'].mkdir(parents=True, exist_ok=True)
#
# paths['ZoneFRPlots'] = paths['Plots'] / 'ZoneFRPlots'
# # paths['ZoneFRPlots'].mkdir(parents=True, exist_ok=True)
#
# paths['ZoneCorrPlots'] = paths['Plots'] / 'ZoneCorrPlots'
# # paths['ZoneCorrPlots'].mkdir(parents=True, exist_ok=True)
# paths['SIPlots'] = paths['Plots'] / 'SIPlots'
# # paths['SIPlots'].mkdir(parents=True, exist_ok=True)
#
# paths['TrialPlots'] = paths['Plots'] / 'TrialPlots'
# # paths['TrialPlots'].mkdir(parents=True, exist_ok=True)
#
# paths['CueDescPlots'] = paths['Plots'] / 'CueDescPlots'
# # paths['CueDescPlots'].mkdir(parents=True, exist_ok=True)
return paths
def _cluster_stats(self, session):
import Sorting.sort_functions as sort_funcs
sort_path = self.session_paths[session]['Sorted']
for tt in self.tetrodes:
tt_str = 'tt_' + str(tt)
_cluster_spike_time_fn = Path(sort_path, tt_str, self.sorter, 'spike_times.npy')
_cluster_spike_ids_fn = Path(sort_path, tt_str, self.sorter, 'spike_clusters.npy')
_cluster_groups_fn = Path(sort_path, ('tt_' + str(tt)), self.sorter, 'cluster_group.tsv')
_cluster_stats_fn = Path(sort_path, ('tt_' + str(tt)), self.sorter, 'cluster_stats.csv')
_hp_data_fn = Path(sort_path, tt_str, self.sorter, 'recording.dat')
_hp_data_info_fn = Path(sort_path, tt_str, tt_str + '_info.pickle')
_cluster_stats_fn2 = Path(sort_path, tt_str, self.sorter, 'cluster_stats_curated.csv')
try:
# load
cluster_groups = pd.read_csv(_cluster_groups_fn, sep='\t')
try:
cluster_stats = pd.read_csv(_cluster_stats_fn2, index_col=0)
except:
cluster_stats = pd.DataFrame(columns=['cl_num'])
# get units and units already with computed stats
valid_units = cluster_groups.cluster_id.values
unit_keys_with_stats = cluster_stats.index.values
units_with_stats = cluster_stats.cl_num.values
unit_overlap = np.intersect1d(valid_units, units_with_stats)
missing_units = np.setdiff1d(valid_units, units_with_stats)
# get stats for overlapping units
cluster_stats2 = cluster_stats.loc[cluster_stats.cl_num.isin(valid_units)].copy()
if len(missing_units) > 0:
spike_times = np.load(_cluster_spike_time_fn)
spike_ids = np.load(_cluster_spike_ids_fn)
spike_times_dict = {unit: spike_times[spike_ids == unit].flatten() for unit in missing_units}
# print(spike_times_dict[0])
if _hp_data_fn.exists():
hp_data = sort_funcs.load_hp_binary_data(_hp_data_fn)
else: # filter data
hp_data = self._spk_filter_data(session, tt)
with _hp_data_info_fn.open(mode='rb') as f:
hp_data_info = pickle.load(f)
cluster_stats_missing = sort_funcs.get_cluster_stats(spike_times_dict, hp_data, hp_data_info)
# cluster_stats2 = cluster_stats_missing
cluster_stats2 = cluster_stats2.append(cluster_stats_missing)
cluster_stats2 = cluster_stats2.sort_values('cl_num')
cluster_stats2 = cluster_stats2.drop_duplicates()
# attached curated group labels to table
cluster_stats2.loc[cluster_stats2.cl_num.isin(valid_units), 'group'] \
= cluster_groups.loc[cluster_groups.cluster_id.isin(valid_units), 'group'].values
cluster_stats2.to_csv(_cluster_stats_fn2)
except FileNotFoundError:
pass
except:
print(f"Error Computing Cluster Stats for {session}")
pass
def _session_clusters(self, session):
table = {'session': session, 'path': str(self.session_paths[session]['Sorted']),
'n_cell': 0, 'n_mua': 0, 'n_noise': 0, 'n_unsorted': 0, 'sorted_TTs': [], 'curated_TTs': [],
'cell_IDs': {}, 'mua_IDs': {}, 'noise_IDs': {}, 'unsorted_IDs': {}, 'clusters_snr': {},
'clusters_fr': {}, 'clusters_valid': {}, 'clusters_isi_viol_rate': {}}
sort_paths = table['path']
_cluster_stats_names = ['fr', 'snr', 'isi_viol_rate', 'valid']
for tt in self.tetrodes:
_cluster_groups_file = Path(sort_paths, ('tt_' + str(tt)), self.sorter, 'cluster_group.tsv')
# check what stats file to load
if Path(sort_paths, ('tt_' + str(tt)), self.sorter, 'cluster_stats_curated.csv').exists():
_cl_stat_file = Path(sort_paths, ('tt_' + str(tt)), self.sorter, 'cluster_stats_curated.csv')
else:
_cl_stat_file = Path(sort_paths, ('tt_' + str(tt)), self.sorter, 'cluster_stats.csv')
if _cl_stat_file.exists():
table['sorted_TTs'].append(int(tt))
d = pd.read_csv(_cl_stat_file, index_col=0)
d = d.astype({'cl_num': int, 'valid': bool})
keys = d.index.values
for st in _cluster_stats_names:
if st == 'valid':
table['clusters_valid'][int(tt)] = {int(d.loc[k, 'cl_num']):
int(d.loc[k, 'valid']) for k in keys}
else:
try:
table['clusters_' + st][int(tt)] = {int(d.loc[k, 'cl_num']):
np.around(d.loc[k, st], 2) for k in keys}
except TypeError:
print(st, keys, tt, session)
sys.exit()
if _cluster_groups_file.exists():
d =
|
pd.read_csv(_cluster_groups_file, delimiter='\t')
|
pandas.read_csv
|
# -*- coding: UTF-8 -*-
import csv
import pandas as pd
import os
mainlineGames = ["Pokémon Sword", "Pokémon Shield", "Pokémon Ultra Sun", "Pokémon Ultra Moon",
"Pokémon Sun", "Pokémon Moon", "Pokémon X", "Pokémon Y", "Pokémon Omega Ruby", "Pokémon Alpha Sapphire",
"Pokémon Black 2", "Pokémon White 2", "Pokémon Black", "Pokémon White", "Pokémon HeartGold",
"Pokémon SoulSilver", "Pokémon Diamond", "Pokémon Pearl", "Pokémon Platinum", "Pokémon Sword and Shield",
"Pokémon Ultra Sun and Ultra Moon", "Pokémon Omega Ruby and Alpha Sapphire", "Pokémon X and Y", "Pokémon Black 2 and White 2",
"Pokémon Black and White", "Pokémon HeartGold and SoulSilver", "Pokémon Diamond and Pearl"]
forbiddenMethods = ["Trade", "Event", "Poké Transfer", "Pokémon HOME", "Unobtainable", "Pokémon Home",
"event", "Trade, Event", "Poké Transfer, Event", "", "Pokémon Bank", "Global Link Event", "Poké Pelago*"]
data = pd.read_csv("PokemonCatchMethods.csv")
pokemonCatchMethodsPreProcessed = data.values.tolist()
#Remove non-mainline games, trade
pokemonCatchMethods = []
for method in pokemonCatchMethodsPreProcessed:
gameFlag = False
for game in mainlineGames:
if(game == method[0]):
#print(game + " = " +method[0])
gameFlag = True
methodFlag = True
for badMethod in forbiddenMethods:
if(badMethod == method[1].rstrip(os.linesep).strip()):
methodFlag = False
if(gameFlag and methodFlag):
pokemonCatchMethods.append(method)
#print(method[2] + "(" +method[1] + "): " +method[0])
pokemonNames = []
pokemonGames = []
catchMethods = []
for method in pokemonCatchMethods:
pokemonNames.append(method[2])
pokemonGames.append(method[0])
catchMethods.append(method[1])
df =
|
pd.DataFrame({"Catch Method": catchMethods, "Pokemon Game":pokemonGames, "Pokemon Name":pokemonNames })
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import pandas as pd
# Monthly time-series starting in May, 1874
values = [
365.1,
415.2,
1033.5,
954.1,
335.3,
515.0,
465.4,
192.5,
89.8,
399.2,
423.0,
393.8,
145.4,
454.0,
76.8,
124.2,
34.0,
132.4,
194.0,
91.0,
167.5,
263.6,
231.4,
15.8,
37.3,
15.2,
67.8,
34.7,
104.4,
155.3,
69.1,
149.3,
188.9,
66.2,
65.7,
93.8,
85.5,
69.0,
4.2,
34.9,
73.0,
126.3,
283.5,
23.3,
14.9,
18.1,
47.9,
0.4,
51.5,
41.0,
0.4,
0.0,
57.2,
9.6,
23.6,
2.0,
1.2,
0.0,
0.0,
50.5,
5.8,
19.6,
51.4,
38.5,
41.3,
81.5,
112.4,
33.8,
292.2,
337.6,
142.7,
232.2,
409.0,
488.2,
206.1,
821.3,
926.5,
561.5,
483.1,
460.6,
585.2,
675.9,
640.5,
530.2,
376.9,
612.4,
941.1,
738.0,
867.0,
705.1,
840.4,
641.2,
364.0,
673.1,
928.9,
1799.1,
1519.6,
557.8,
461.5,
607.6,
1026.9,
1393.8,
1931.1,
352.9,
849.8,
734.8,
621.2,
1132.5,
339.4,
1451.4,
2007.7,
595.6,
1230.2,
1793.3,
1725.6,
1305.4,
1669.7,
1536.9,
1207.3,
1581.2,
1210.8,
501.6,
561.3,
727.1,
1115.1,
994.5,
438.0,
866.0,
512.0,
1083.1,
747.0,
855.4,
1059.0,
1702.4,
1155.4,
598.0,
646.0,
718.5,
425.2,
220.5,
544.5,
312.9,
1032.2,
623.6,
688.4,
398.1,
395.4,
115.4,
193.6,
57.5,
1.6,
189.7,
79.7,
75.6,
30.5,
44.8,
252.7,
302.1,
370.5,
162.9,
121.8,
54.0,
52.3,
580.2,
117.4,
91.4,
88.2,
26.0,
185.6,
32.5,
22.2,
65.3,
98.7,
10.5,
240.3,
77.0,
2.4,
27.9,
27.8,
26.9,
14.4,
217.2,
153.5,
282.3,
89.9,
11.6,
0.0,
66.4,
42.3,
1.4,
69.5,
8.9,
21.8,
9.3,
139.8,
157.6,
229.0,
245.6,
181.5,
78.5,
211.7,
266.7,
158.7,
354.4,
553.4,
594.5,
1021.9,
550.7,
1301.4,
605.3,
637.4,
541.2,
1252.7,
1788.1,
757.8,
994.3,
1541.8,
1117.7,
1920.7,
1429.5,
1002.7,
910.9,
953.6,
865.5,
1158.1,
1283.8,
962.9,
1178.4,
1161.8,
1379.0,
1573.8,
2340.8,
1644.5,
1504.0,
1312.7,
2027.7,
1564.7,
1486.2,
623.5,
1603.6,
1779.1,
1898.4,
1740.6,
1164.5,
666.2,
1227.8,
585.4,
1038.5,
942.0,
937.1,
1018.0,
1074.5,
901.8,
1143.5,
465.2,
1376.0,
831.7,
1231.8,
722.5,
1037.1,
285.0,
900.0,
543.6,
423.5,
181.1,
820.6,
513.4,
273.6,
1182.6,
303.6,
608.6,
533.1,
1331.0,
912.6,
523.8,
384.1,
398.3,
217.5,
308.7,
492.7,
703.9,
82.7,
34.2,
750.3,
411.0,
524.0,
624.5,
159.1,
247.5,
64.8,
48.1,
463.8,
858.2,
605.5,
317.4,
172.0,
215.6,
40.1,
280.6,
143.7,
50.7,
203.3,
197.1,
1.4,
25.4,
74.8,
32.9,
54.2,
56.5,
109.9,
151.5,
97.4,
127.6,
113.7,
51.5,
10.7,
23.1,
138.2,
11.6,
0.0,
0.2,
6.4,
13.3,
0.0,
208.5,
41.6,
1.0,
0.0,
0.2,
14.8,
48.3,
0.0,
98.3,
0.4,
179.8,
0.0,
36.1,
5.9,
0.3,
2.4,
76.0,
161.4,
149.6,
3.5,
30.0,
129.9,
189.8,
336.3,
84.1,
105.9,
277.3,
112.8,
84.4,
1204.7,
911.3,
596.7,
411.7,
345.4,
375.8,
901.3,
268.1,
330.4,
511.3,
576.2,
254.6,
634.9,
470.5,
777.9,
1100.6,
1983.4,
1297.7,
408.4,
667.6,
651.4,
1585.3,
1159.5,
696.2,
2060.5,
1946.3,
793.7,
736.5,
395.1,
1057.4,
687.9,
1105.5,
749.3,
1446.9,
738.5,
516.3,
98.7,
560.7,
1207.0,
1196.7,
2453.0,
809.0,
700.9,
608.3,
949.4,
882.5,
824.5,
1410.2,
1193.4,
1074.6,
1002.8,
507.2,
270.4,
217.9,
707.3,
508.7,
630.8,
350.8,
1750.4,
1701.4,
369.3,
717.5,
638.1,
1192.5,
774.4,
1026.5,
470.9,
503.1,
282.7,
563.5,
128.9,
801.6,
866.9,
794.0,
893.3,
381.8,
632.8,
287.4,
70.0,
326.8,
118.3,
186.5,
181.7,
434.4,
511.7,
23.7,
37.0,
39.3,
88.5,
70.5,
260.0,
101.8,
30.0,
11.5,
27.6,
53.1,
17.9,
57.2,
15.5,
0.0,
0.0,
57.4,
88.2,
21.4,
84.4,
9.5,
0.3,
47.3,
55.0,
2.9,
81.6,
14.8,
23.2,
0.8,
3.3,
0.0,
0.0,
6.8,
0.0,
3.6,
14.8,
5.2,
17.8,
13.0,
10.7,
38.7,
286.8,
69.9,
172.5,
46.7,
296.7,
344.0,
33.1,
222.0,
294.5,
302.5,
670.6,
483.1,
964.9,
528.3,
969.0,
1307.9,
946.2,
750.0,
647.3,
444.9,
358.5,
509.6,
712.6,
950.9,
806.4,
1037.8,
1156.9,
572.5,
338.9,
466.2,
722.0,
637.5,
794.2,
1045.8,
1415.4,
1152.9,
919.7,
1445.9,
1717.2,
1554.0,
2977.7,
1875.0,
978.5,
1027.9,
2297.1,
1797.4,
800.4,
854.2,
979.4,
953.3,
567.5,
1281.5,
1805.1,
944.4,
1229.1,
1040.8,
1097.7,
623.5,
1363.4,
1190.2,
707.5,
1651.3,
1876.6,
902.4,
1383.9,
1095.4,
774.4,
761.2,
326.4,
1167.2,
553.0,
1416.0,
327.8,
301.4,
539.2,
407.1,
183.6,
851.6,
617.6,
603.4,
439.2,
490.7,
318.0,
392.6,
448.6,
604.0,
387.4,
577.4,
270.8,
332.9,
310.6,
505.4,
397.0,
90.5,
493.0,
1235.9,
85.4,
123.8,
16.1,
116.8,
86.6,
57.5,
63.5,
176.3,
478.1,
93.5,
1.8,
16.0,
30.4,
19.5,
73.6,
29.3,
1.7,
134.4,
117.2,
112.7,
25.8,
1.7,
68.0,
33.3,
148.5,
246.8,
427.0,
358.5,
360.3,
422.8,
468.5,
608.4,
192.3,
36.2,
313.6,
231.0,
311.8,
703.2,
648.3,
426.1,
473.7,
759.7,
1304.9,
1432.5,
3259.9,
2695.0,
1915.8,
1272.8,
469.0,
1085.9,
1277.2,
842.5,
874.5,
1216.3,
1418.6,
650.3,
1439.9,
1810.0,
1364.8,
902.3,
1379.0,
967.3,
873.6,
811.8,
1030.1,
1173.1,
819.3,
975.2,
623.1,
1509.2,
1146.2,
1694.3,
1368.1,
1243.1,
1358.9,
1936.4,
1331.9,
2213.5,
992.5,
794.4,
1078.5,
980.1,
821.6,
1058.0,
1037.0,
849.4,
1199.0,
1026.9,
744.0,
278.0,
1309.9,
2478.8,
3084.3,
1141.3,
683.7,
656.7,
561.5,
490.3,
278.7,
152.9,
283.1,
368.2,
682.3,
661.3,
239.3,
179.7,
812.1,
552.6,
431.8,
291.6,
178.5,
108.6,
82.9,
149.1,
94.5,
263.2,
204.2,
127.1,
123.5,
150.4,
222.5,
323.0,
344.0,
116.1,
79.9,
8.6,
71.0,
125.2,
266.5,
179.8,
608.5,
162.3,
15.4,
15.7,
39.0,
12.2,
0.3,
19.5,
37.3,
4.3,
0.7,
42.8,
58.9,
11.4,
355.0,
373.4,
99.7,
105.9,
96.5,
21.1,
21.1,
79.9,
153.0,
312.2,
256.1,
196.9,
126.2,
361.8,
690.2,
571.8,
444.5,
544.7,
741.4,
1557.1,
1662.0,
1647.4,
1226.7,
1128.0,
1044.6,
566.0,
804.2,
628.8,
952.0,
924.1,
1084.8,
1938.1,
1745.4,
2252.9,
2347.5,
1142.9,
2256.6,
2337.3,
2712.5,
3363.4,
2504.1,
1504.5,
2358.4,
852.8,
1240.4,
2264.8,
1744.0,
1473.5,
2365.6,
2319.7,
1322.8,
3163.3,
1678.7,
1603.4,
1917.3,
2412.9,
1916.8,
1159.1,
1031.2,
763.2,
1999.7,
1751.0,
1130.6,
1962.4,
2326.9,
3165.1,
1920.3,
1170.6,
541.9,
1158.5,
963.7,
1467.5,
642.1,
668.3,
1297.0,
893.1,
1899.8,
945.8,
1057.6,
594.3,
858.3,
483.9,
478.9,
519.5,
347.3,
206.5,
775.8,
997.1,
907.8,
1347.8,
828.7,
641.7,
362.5,
361.8,
991.3,
923.6,
977.6,
275.5,
89.5,
188.0,
242.4,
108.7,
230.9,
485.4,
253.3,
72.3,
551.6,
628.0,
753.2,
248.6,
36.7,
240.0,
142.7,
242.0,
234.7,
126.8,
284.6,
25.6,
1.6,
145.3,
0.7,
8.8,
52.5,
24.2,
78.4,
250.1,
192.8,
177.5,
538.3,
182.3,
92.2,
514.6,
584.6,
283.0,
399.2,
505.6,
270.7,
450.7,
1111.2,
398.6,
325.3,
1237.5,
3091.0,
1550.9,
1433.5,
1084.9,
715.7,
2912.4,
2290.7,
1859.8,
1344.5,
1852.4,
2513.5,
2307.6,
2721.0,
3595.8,
3950.3,
3741.3,
1933.8,
2451.1,
3207.3,
1991.6,
2379.8,
1686.4,
1642.6,
1113.1,
1089.6,
1261.3,
3432.1,
3305.2,
2559.4,
1766.0,
2075.3,
1864.9,
1745.3,
1006.1,
2476.9,
2240.0,
3640.3,
3276.7,
1882.3,
1217.1,
1606.4,
1752.0,
2265.1,
2185.5,
1815.0,
2313.0,
1486.5,
1416.2,
1927.3,
1660.3,
2359.1,
1578.1,
1048.3,
1255.7,
1068.5,
488.4,
571.1,
744.9,
610.2,
746.4,
626.7,
845.2,
1599.0,
2899.0,
2477.1,
779.9,
664.0,
959.9,
757.4,
666.0,
602.5,
464.0,
305.4,
141.9,
351.0,
256.6,
393.1,
654.3,
688.7,
307.0,
339.5,
455.4,
477.3,
288.7,
10.5,
70.3,
405.5,
195.6,
217.8,
80.7,
268.9,
141.1,
47.0,
9.1,
6.0,
0.7,
1.4,
157.4,
3.2,
0.8,
1.8,
10.5,
58.0,
2.2,
15.7,
57.8,
106.2,
540.9,
287.4,
37.2,
80.1,
302.1,
490.5,
216.4,
574.0,
436.8,
921.3,
1664.7,
1076.8,
1584.0,
2878.2,
1855.9,
1418.5,
2220.6,
1673.4,
1872.8,
3016.2,
2598.4,
2385.4,
3823.3,
3409.6,
2328.1,
1454.6,
1814.2,
2064.0,
2568.7,
4535.8,
3150.3,
2356.6,
4554.0,
4473.7,
3011.6,
4270.1,
2994.3,
2546.6,
4437.6,
3603.3,
2707.8,
2600.3,
3298.8,
2899.1,
3340.6,
2597.1,
1877.9,
3231.8,
4902.0,
2410.2,
3740.2,
2701.0,
2807.4,
3080.9,
2684.8,
3535.9,
2415.1,
1465.1,
2386.1,
2346.6,
2536.1,
1805.2,
1191.7,
1786.5,
2078.9,
1414.4,
1857.3,
2034.0,
1439.2,
1168.6,
1476.1,
906.6,
772.7,
357.6,
512.6,
638.1,
459.9,
961.9,
1155.5,
498.4,
963.1,
329.0,
314.8,
398.4,
593.8,
826.9,
693.0,
554.3,
634.0,
362.8,
262.9,
140.3,
657.5,
371.6,
291.1,
174.0,
174.5,
194.1,
144.5,
303.0,
507.2,
349.4,
147.9,
322.5,
622.7,
433.9,
185.3,
69.0,
105.1,
84.0,
149.8,
17.8,
30.9,
25.2,
7.6,
51.5,
16.6,
24.6,
27.1,
106.7,
145.9,
112.5,
52.9,
23.6,
224.6,
94.1,
73.4,
22.0,
156.1,
175.3,
139.6,
139.7,
283.2,
240.3,
563.8,
786.9,
411.9,
318.8,
762.1,
521.5,
939.3,
637.7,
710.4,
934.8,
1840.4,
1493.2,
2081.5,
892.2,
1730.6,
1025.7,
1476.4,
2128.5,
886.1,
1117.5,
1553.4,
2004.2,
2482.8,
1879.3,
1117.5,
1002.0,
1904.1,
1838.5,
1481.9,
1640.6,
1264.4,
1406.7,
1134.3,
1685.0,
1571.0,
1272.4,
2187.3,
1508.7,
1335.5,
1793.9,
1167.6,
1200.1,
845.8,
1574.1,
1698.7,
1246.5,
1593.4,
2255.9,
1382.8,
1920.1,
2126.9,
1529.6,
1646.2,
1310.9,
1256.4,
1271.2,
1566.3,
1355.4,
1942.5,
1433.8,
688.1,
884.0,
701.8,
487.3,
1122.0,
1156.2,
599.0,
954.6,
913.9,
999.2,
522.4,
1338.2,
1207.6,
572.4,
1318.6,
1321.9,
929.2,
1051.7,
604.7,
1017.1,
554.9,
561.7,
399.1,
423.7,
722.0,
765.1,
411.5,
441.0,
281.6,
262.2,
883.4,
407.0,
242.6,
251.4,
204.6,
252.8,
128.2,
513.5,
382.9,
292.6,
831.1,
432.2,
584.3,
603.0,
309.5,
251.6,
155.4,
80.2,
64.1,
17.3,
59.9,
106.9,
353.6,
702.0,
74.0,
114.2,
235.1,
34.2,
115.5,
11.3,
336.3,
376.3,
104.1,
76.3,
8.7,
290.7,
169.3,
181.9,
92.0,
274.9,
148.4,
216.0,
36.6,
123.9,
131.9,
617.4,
205.2,
273.2,
885.7,
611.0,
270.7,
644.5,
558.6,
1901.5,
1294.5,
1520.7,
1346.8,
1175.9,
878.4,
530.5,
1996.2,
1455.6,
1436.0,
2327.5,
2688.0,
2217.3,
1581.6,
1823.8,
1676.7,
2614.9,
1795.6,
2003.7,
2258.3,
2703.9,
3132.3,
1838.2,
1935.2,
2113.0,
1134.0,
2827.5,
2884.0,
1960.7,
2003.4,
1456.5,
1994.8,
2469.2,
3083.3,
2067.0,
1248.5,
2216.6,
2348.6,
2216.7,
2082.4,
1451.1,
3295.0,
1840.5,
2572.0,
3239.7,
2207.1,
2524.7,
1607.7,
3857.0,
3569.5,
2053.7,
1390.1,
3070.7,
1909.0,
2065.2,
1778.5,
1539.5,
1851.3,
2707.4,
1214.4,
957.5,
905.9,
976.7,
1736.5,
1577.8,
1105.1,
845.9,
607.1,
904.1,
203.5,
293.5,
1582.0,
2174.8,
1033.3,
1723.4,
1619.0,
588.9,
289.0,
191.5,
130.7,
69.1,
234.3,
99.8,
168.5,
102.0,
132.3,
368.2,
342.5,
246.9,
392.5,
45.2,
19.1,
193.3,
100.3,
51.0,
32.5,
417.5,
145.9,
174.5,
45.2,
1.9,
105.2,
81.7,
23.8,
359.9,
94.3,
14.0,
25.3,
13.5,
108.8,
502.6,
457.3,
104.1,
256.5,
565.9,
249.7,
493.6,
490.0,
294.5,
489.5,
507.9,
870.7,
1032.7,
492.3,
1755.6,
2044.0,
1714.3,
1220.8,
2081.5,
1593.2,
2423.8,
2952.2,
2592.5,
3054.7,
1954.4,
1824.1,
3713.7,
1503.4,
3150.0,
3308.7,
2007.9,
2489.2,
2399.9,
2080.1,
1727.0,
1826.8,
1565.2,
1872.4,
1333.7,
2074.3,
3414.2,
1662.3,
1957.7,
2793.5,
2277.0,
3211.4,
3168.0,
2794.1,
1486.8,
2275.7,
2728.6,
2525.4,
2646.0,
1635.2,
2753.9,
1370.1,
3047.0,
2413.4,
2837.2,
1271.3,
1336.5,
761.9,
721.5,
1201.3,
1135.8,
733.8,
1411.3,
1399.1,
967.4,
613.7,
1550.5,
1001.2,
687.9,
568.6,
595.5,
481.9,
409.2,
311.7,
798.0,
655.2,
681.0,
1030.1,
438.0,
171.2,
120.9,
251.5,
196.0,
295.4,
216.8,
390.1,
523.9,
113.4,
337.4,
185.6,
235.5,
315.2,
142.8,
189.7,
136.7,
149.0,
82.2,
54.6,
269.6,
90.1,
64.6,
74.1,
16.9,
49.2,
22.4,
47.9,
83.5,
163.5,
130.1,
5.1,
1.8,
277.7,
110.6,
10.8,
38.0,
51.0,
86.8,
116.5,
60.7,
38.8,
149.5,
726.1,
131.4,
632.3,
481.0,
257.9,
308.0,
810.2,
520.8,
673.8,
679.0,
690.1,
1358.9,
1331.9,
469.2,
1006.6,
1050.5,
634.5,
937.0,
655.7,
460.1,
1146.6,
1690.3,
1533.7,
1397.3,
609.9,
1509.7,
1871.8,
1497.5,
927.2,
1460.5,
2221.5,
1862.9,
2182.2,
2093.5,
2343.9,
1082.5,
1872.3,
948.8,
1351.0,
1024.3,
934.8,
669.5,
1671.0,
1892.8,
1109.2,
1785.9,
819.2,
1619.0,
3040.4,
2128.9,
2424.1,
2354.3,
1781.4,
1637.0,
1367.6,
2053.3,
2182.2,
1346.8,
2196.6,
2527.2,
2093.5,
1799.5,
1843.8,
1115.9,
987.7,
474.0,
1183.2,
1107.9,
950.6,
1280.1,
1177.4,
948.8,
593.6,
2261.2,
1594.1,
632.3,
762.8,
619.9,
700.0,
439.6,
627.3,
593.6,
1239.5,
985.4,
566.1,
600.4,
796.1,
274.6,
900.5,
534.5,
419.5,
395.7,
795.3,
701.9,
700.0,
539.2,
598.3,
52.4,
398.1,
475.5,
150.8,
10.5,
78.6,
490.9,
188.8,
196.9,
214.6,
438.5,
193.2,
79.5,
528.7,
370.3,
377.5,
200.5,
41.5,
132.5,
209.5,
220.3,
193.3,
64.1,
15.4,
4.5,
5.1,
135.9,
14.5,
8.7,
144.5,
28.9,
11.1,
9.3,
1.8,
0.0,
3.3,
15.8,
31.7,
4.1,
5.9,
3.5,
1.8,
1.9,
7.7,
17.7,
51.9,
0.5,
31.3,
82.2,
14.5,
99.8,
223.5,
182.0,
222.0,
71.4,
55.3,
89.6,
187.9,
233.9,
399.1,
405.1,
193.7,
308.0,
237.8,
404.5,
981.4,
662.2,
380.9,
314.5,
452.3,
613.1,
1205.4,
1256.6,
1536.6,
949.5,
1000.8,
380.4,
802.1,
739.9,
1047.1,
768.0,
1282.1,
709.5,
706.5,
840.5,
825.5,
460.2,
996.7,
442.5,
635.4,
1116.3,
1117.1,
635.6,
668.8,
605.2,
]
dr = pd.date_range("1874-05-31", periods=len(values), freq="M")
tsd =
|
pd.Series(values, index=dr)
|
pandas.Series
|
import emoji
import collections as c
import pandas as pd
# for visualization
import plotly.express as px
import matplotlib.pyplot as plt
# word cloud
from wordcloud import WordCloud, STOPWORDS
def authors_name(data):
"""
It returns the name of participants in chat.
"""
authors = data.Author.unique().tolist()
return [name for name in authors if name != None]
def extract_emojis(s):
"""
This function is used to calculate emojis in text and return in a list.
"""
return [c for c in s if c in emoji.UNICODE_EMOJI]
def stats(data):
"""
This function takes input as data and return number of messages and total emojis used in chat.
"""
total_messages = data.shape[0]
media_messages = data[data['Message'] == '<Media omitted>'].shape[0]
emojis = sum(data['emoji'].str.len())
return "Total Messages 💬: {} \n Total Media 🎬: {} \n Total Emoji's 😂: {}".format(total_messages, media_messages, emojis)
def popular_emoji(data):
"""
This function returns the list of emoji's with it's frequency.
"""
total_emojis_list = list([a for b in data.emoji for a in b])
emoji_dict = dict(c.Counter(total_emojis_list))
emoji_list = sorted(emoji_dict.items(), key=lambda x: x[1], reverse=True)
return emoji_list
def visualize_emoji(data):
"""
This function is used to make pie chart of popular emoji's.
"""
emoji_df = pd.DataFrame(popular_emoji(data), columns=['emoji', 'count'])
fig = px.pie(emoji_df, values='count', names='emoji')
fig.update_traces(textposition='inside', textinfo='percent+label')
# fig.show()
return fig
def word_cloud(df):
"""
This function is used to generate word cloud using dataframe.
"""
df = df[df['Message'] != '<Media omitted>']
df = df[df['Message'] != 'This message was deleted']
words = ' '.join(df['Message'])
processed_words = ' '.join([word for word in words.split() if 'http' not in word and not word.startswith('@') and word != 'RT'])
# To stop article, punctuations
wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', height=640, width=800).generate(processed_words)
# plt.figure(figsize=(45,8))
plt.imshow(wordcloud, interpolation='bilinear')
plt.xticks([])
plt.yticks([])
def active_date(data):
"""
This function is used to generate horizontal bar graph between date and
number of messages dataframe.
"""
ax = data['Date'].value_counts().head(10).plot.barh()
ax.set_title('Top 10 active date')
ax.set_xlabel('Number of Messages')
ax.set_ylabel('Date')
plt.tight_layout()
def active_time(data):
"""
This function generate horizontal bar graph between time and number of messages.
Parameters
----------
data : Dataframe
With this data graph is generated.
Returns
-------
None.
"""
ax = data['Time'].value_counts().head(10).plot.barh()
ax.set_title('Top 10 active time')
ax.set_xlabel('Number of messages')
ax.set_ylabel('Time')
plt.tight_layout()
def day_wise_count(data):
"""
This function generate a line polar plot.
Parameters
----------
data : DataFrame
DESCRIPTION.
Returns
-------
fig : TYPE
DESCRIPTION.
"""
days = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
day_df =
|
pd.DataFrame(data["Message"])
|
pandas.DataFrame
|
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2021/7/8 22:08
Desc: 金十数据中心-经济指标-美国
https://datacenter.jin10.com/economic
"""
import json
import time
import pandas as pd
import demjson
import requests
from akshare.economic.cons import (
JS_USA_NON_FARM_URL,
JS_USA_UNEMPLOYMENT_RATE_URL,
JS_USA_EIA_CRUDE_URL,
JS_USA_INITIAL_JOBLESS_URL,
JS_USA_CORE_PCE_PRICE_URL,
JS_USA_CPI_MONTHLY_URL,
JS_USA_LMCI_URL,
JS_USA_ADP_NONFARM_URL,
JS_USA_GDP_MONTHLY_URL,
)
# 东方财富-美国-未决房屋销售月率
def macro_usa_phs():
"""
未决房屋销售月率
http://data.eastmoney.com/cjsj/foreign_0_5.html
:return: 未决房屋销售月率
:rtype: pandas.DataFrame
"""
url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
'type': 'GJZB',
'sty': 'HKZB',
'js': '({data:[(x)],pages:(pc)})',
'p': '1',
'ps': '2000',
'mkt': '0',
'stat': '5',
'pageNo': '1',
'pageNum': '1',
'_': '1625474966006'
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(',') for item in data_json['data']])
temp_df.columns = [
'时间',
'前值',
'现值',
'发布日期',
]
temp_df['前值'] = pd.to_numeric(temp_df['前值'])
temp_df['现值'] = pd.to_numeric(temp_df['现值'])
return temp_df
# 金十数据中心-经济指标-美国-经济状况-美国GDP
def macro_usa_gdp_monthly():
"""
美国国内生产总值(GDP)报告, 数据区间从20080228-至今
https://datacenter.jin10.com/reportType/dc_usa_gdp
:return: pandas.Series
2008-02-28 0.6
2008-03-27 0.6
2008-04-30 0.9
2008-06-26 1
2008-07-31 1.9
...
2019-06-27 3.1
2019-07-26 2.1
2019-08-29 2
2019-09-26 2
2019-10-30 0
"""
t = time.time()
res = requests.get(
JS_USA_GDP_MONTHLY_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国国内生产总值(GDP)"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "53",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "gdp"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国CPI月率报告
def macro_usa_cpi_monthly():
"""
美国CPI月率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_cpi
https://cdn.jin10.com/dc/reports/dc_usa_cpi_all.js?v=1578741110
:return: 美国CPI月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_CPI_MONTHLY_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国居民消费价格指数(CPI)(月环比)"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "9",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "cpi_monthly"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国核心CPI月率报告
def macro_usa_core_cpi_monthly():
"""
美国核心CPI月率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_core_cpi
https://cdn.jin10.com/dc/reports/dc_usa_core_cpi_all.js?v=1578740570
:return: 美国核心CPI月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_core_cpi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国核心CPI月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "6",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_core_cpi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国个人支出月率报告
def macro_usa_personal_spending():
"""
美国个人支出月率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_personal_spending
https://cdn.jin10.com/dc/reports/dc_usa_personal_spending_all.js?v=1578741327
:return: 美国个人支出月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_personal_spending_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国个人支出月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "35",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_personal_spending"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国零售销售月率报告
def macro_usa_retail_sales():
"""
美国零售销售月率报告, 数据区间从19920301-至今
https://datacenter.jin10.com/reportType/dc_usa_retail_sales
https://cdn.jin10.com/dc/reports/dc_usa_retail_sales_all.js?v=1578741528
:return: 美国零售销售月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_retail_sales_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国零售销售月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "39",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_retail_sales"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国进口物价指数报告
def macro_usa_import_price():
"""
美国进口物价指数报告, 数据区间从19890201-至今
https://datacenter.jin10.com/reportType/dc_usa_import_price
https://cdn.jin10.com/dc/reports/dc_usa_import_price_all.js?v=1578741716
:return: 美国进口物价指数报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_import_price_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国进口物价指数"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "18",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_import_price"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国出口价格指数报告
def macro_usa_export_price():
"""
美国出口价格指数报告, 数据区间从19890201-至今
https://datacenter.jin10.com/reportType/dc_usa_export_price
https://cdn.jin10.com/dc/reports/dc_usa_export_price_all.js?v=1578741832
:return: 美国出口价格指数报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_export_price_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国出口价格指数"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "79",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_export_price"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-LMCI
def macro_usa_lmci():
"""
美联储劳动力市场状况指数报告, 数据区间从20141006-至今
https://datacenter.jin10.com/reportType/dc_usa_lmci
https://cdn.jin10.com/dc/reports/dc_usa_lmci_all.js?v=1578742043
:return: 美联储劳动力市场状况指数报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_LMCI_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美联储劳动力市场状况指数"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "93",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "lmci"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-失业率-美国失业率报告
def macro_usa_unemployment_rate():
"""
美国失业率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_unemployment_rate
https://cdn.jin10.com/dc/reports/dc_usa_unemployment_rate_all.js?v=1578821511
:return: 获取美国失业率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_UNEMPLOYMENT_RATE_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国失业率"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "47",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "unemployment_rate"
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-失业率-美国挑战者企业裁员人数报告
def macro_usa_job_cuts():
"""
美国挑战者企业裁员人数报告, 数据区间从19940201-至今
https://datacenter.jin10.com/reportType/dc_usa_job_cuts
https://cdn.jin10.com/dc/reports/dc_usa_job_cuts_all.js?v=1578742262
:return: 美国挑战者企业裁员人数报告-今值(万人)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_job_cuts_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国挑战者企业裁员人数报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万人)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "78",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "usa_job_cuts"
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-就业人口-美国非农就业人数报告
def macro_usa_non_farm():
"""
美国非农就业人数报告, 数据区间从19700102-至今
https://datacenter.jin10.com/reportType/dc_nonfarm_payrolls
https://cdn.jin10.com/dc/reports/dc_nonfarm_payrolls_all.js?v=1578742490
:return: 美国非农就业人数报告-今值(万人)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_NON_FARM_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国非农就业人数"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万人)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "33",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "non_farm"
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-就业人口-美国ADP就业人数报告
def macro_usa_adp_employment():
"""
美国ADP就业人数报告, 数据区间从20010601-至今
https://datacenter.jin10.com/reportType/dc_adp_nonfarm_employment
https://cdn.jin10.com/dc/reports/dc_adp_nonfarm_employment_all.js?v=1578742564
:return: 美国ADP就业人数报告-今值(万人)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_ADP_NONFARM_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国ADP就业人数(万人)"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万人)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "1",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "adp"
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-消费者收入与支出-美国核心PCE物价指数年率报告
def macro_usa_core_pce_price():
"""
美国核心PCE物价指数年率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_core_pce_price
https://cdn.jin10.com/dc/reports/dc_usa_core_pce_price_all.js?v=1578742641
:return: 美国核心PCE物价指数年率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_CORE_PCE_PRICE_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国核心PCE物价指数年率"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "80",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "core_pce_price"
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-消费者收入与支出-美国实际个人消费支出季率初值报告
def macro_usa_real_consumer_spending():
"""
美国实际个人消费支出季率初值报告, 数据区间从20131107-至今
https://datacenter.jin10.com/reportType/dc_usa_real_consumer_spending
https://cdn.jin10.com/dc/reports/dc_usa_real_consumer_spending_all.js?v=1578742802
:return: 美国实际个人消费支出季率初值报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_real_consumer_spending_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国实际个人消费支出季率初值报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "81",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "usa_real_consumer_spending"
return temp_df
# 金十数据中心-经济指标-美国-贸易状况-美国贸易帐报告
def macro_usa_trade_balance():
"""
美国贸易帐报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_trade_balance
https://cdn.jin10.com/dc/reports/dc_usa_trade_balance_all.js?v=1578742911
:return: 美国贸易帐报告-今值(亿美元)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_trade_balance_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国贸易帐报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(亿美元)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "42",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "usa_trade_balance"
return temp_df
# 金十数据中心-经济指标-美国-贸易状况-美国经常帐报告
def macro_usa_current_account():
"""
美国经常帐报告, 数据区间从20080317-至今
https://datacenter.jin10.com/reportType/dc_usa_current_account
https://cdn.jin10.com/dc/reports/dc_usa_current_account_all.js?v=1578743012
:return: 美国经常帐报告-今值(亿美元)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_current_account_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国经常账报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(亿美元)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "12",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "usa_current_account"
return temp_df
# 金十数据中心-经济指标-美国-产业指标-制造业-贝克休斯钻井报告
def macro_usa_rig_count():
"""
贝克休斯钻井报告, 数据区间从20080317-至今
https://datacenter.jin10.com/reportType/dc_rig_count_summary
https://cdn.jin10.com/dc/reports/dc_rig_count_summary_all.js?v=1578743203
:return: 贝克休斯钻井报告-当周
:rtype: pandas.Series
"""
t = time.time()
params = {
"_": t
}
res = requests.get("https://cdn.jin10.com/data_center/reports/baker.json", params=params)
temp_df = pd.DataFrame(res.json().get("values")).T
big_df = pd.DataFrame()
big_df["钻井总数_钻井数"] = temp_df["钻井总数"].apply(lambda x: x[0])
big_df["钻井总数_变化"] = temp_df["钻井总数"].apply(lambda x: x[1])
big_df["美国石油钻井_钻井数"] = temp_df["美国石油钻井"].apply(lambda x: x[0])
big_df["美国石油钻井_变化"] = temp_df["美国石油钻井"].apply(lambda x: x[1])
big_df["混合钻井_钻井数"] = temp_df["混合钻井"].apply(lambda x: x[0])
big_df["混合钻井_变化"] = temp_df["混合钻井"].apply(lambda x: x[1])
big_df["美国天然气钻井_钻井数"] = temp_df["美国天然气钻井"].apply(lambda x: x[0])
big_df["美国天然气钻井_变化"] = temp_df["美国天然气钻井"].apply(lambda x: x[1])
big_df = big_df.astype("float")
return big_df
# 金十数据中心-经济指标-美国-产业指标-制造业-美国个人支出月率报告
# 金十数据中心-经济指标-美国-产业指标-制造业-美国生产者物价指数(PPI)报告
def macro_usa_ppi():
"""
美国生产者物价指数(PPI)报告, 数据区间从20080226-至今
https://datacenter.jin10.com/reportType/dc_usa_ppi
https://cdn.jin10.com/dc/reports/dc_usa_ppi_all.js?v=1578743628
:return: 美国生产者物价指数(PPI)报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_ppi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国生产者物价指数(PPI)报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "37",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_ppi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-制造业-美国核心生产者物价指数(PPI)报告
def macro_usa_core_ppi():
"""
美国核心生产者物价指数(PPI)报告, 数据区间从20080318-至今
https://datacenter.jin10.com/reportType/dc_usa_core_ppi
https://cdn.jin10.com/dc/reports/dc_usa_core_ppi_all.js?v=1578743709
:return: 美国核心生产者物价指数(PPI)报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_core_ppi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国核心生产者物价指数(PPI)报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "7",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_core_ppi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-制造业-美国API原油库存报告
def macro_usa_api_crude_stock():
"""
美国API原油库存报告, 数据区间从20120328-至今
https://datacenter.jin10.com/reportType/dc_usa_api_crude_stock
https://cdn.jin10.com/dc/reports/dc_usa_api_crude_stock_all.js?v=1578743859
:return: 美国API原油库存报告-今值(万桶)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_api_crude_stock_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国API原油库存报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万桶)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "69",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_api_crude_stock"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-制造业-美国Markit制造业PMI初值报告
def macro_usa_pmi():
"""
美国Markit制造业PMI初值报告, 数据区间从20120601-至今
https://datacenter.jin10.com/reportType/dc_usa_pmi
https://cdn.jin10.com/dc/reports/dc_usa_pmi_all.js?v=1578743969
:return: 美国Markit制造业PMI初值报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_pmi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国Markit制造业PMI报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "74",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_pmi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-制造业-美国ISM制造业PMI报告
def macro_usa_ism_pmi():
"""
美国ISM制造业PMI报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_ism_pmi
https://cdn.jin10.com/dc/reports/dc_usa_ism_pmi_all.js?v=1578744071
:return: 美国ISM制造业PMI报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_ism_pmi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国ISM制造业PMI报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "28",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_ism_pmi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-工业-美国工业产出月率报告
def macro_usa_industrial_production():
"""
美国工业产出月率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_industrial_production
https://cdn.jin10.com/dc/reports/dc_usa_industrial_production_all.js?v=1578744188
:return: 美国工业产出月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_industrial_production_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国工业产出月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "20",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_industrial_production"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-工业-美国耐用品订单月率报告
def macro_usa_durable_goods_orders():
"""
美国耐用品订单月率报告, 数据区间从20080227-至今
https://datacenter.jin10.com/reportType/dc_usa_durable_goods_orders
https://cdn.jin10.com/dc/reports/dc_usa_durable_goods_orders_all.js?v=1578744295
:return: 美国耐用品订单月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_durable_goods_orders_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国耐用品订单月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "13",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_durable_goods_orders"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-工业-美国工厂订单月率报告
def macro_usa_factory_orders():
"""
美国工厂订单月率报告, 数据区间从19920401-至今
https://datacenter.jin10.com/reportType/dc_usa_factory_orders
https://cdn.jin10.com/dc/reports/dc_usa_factory_orders_all.js?v=1578744385
:return: 美国工厂订单月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_factory_orders_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国工厂订单月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "16",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_factory_orders"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-服务业-美国Markit服务业PMI初值报告
def macro_usa_services_pmi():
"""
美国Markit服务业PMI初值报告, 数据区间从20120701-至今
https://datacenter.jin10.com/reportType/dc_usa_services_pmi
https://cdn.jin10.com/dc/reports/dc_usa_services_pmi_all.js?v=1578744503
:return: 美国Markit服务业PMI初值报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_services_pmi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国Markit服务业PMI初值报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "89",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_services_pmi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-服务业-美国商业库存月率报告
def macro_usa_business_inventories():
"""
美国商业库存月率报告, 数据区间从19920301-至今
https://datacenter.jin10.com/reportType/dc_usa_business_inventories
https://cdn.jin10.com/dc/reports/dc_usa_business_inventories_all.js?v=1578744618
:return: 美国商业库存月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_business_inventories_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国商业库存月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "4",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_business_inventories"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-服务业-美国ISM非制造业PMI报告
def macro_usa_ism_non_pmi():
"""
美国ISM非制造业PMI报告, 数据区间从19970801-至今
https://datacenter.jin10.com/reportType/dc_usa_ism_non_pmi
https://cdn.jin10.com/dc/reports/dc_usa_ism_non_pmi_all.js?v=1578744693
:return: 美国ISM非制造业PMI报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_ism_non_pmi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国ISM非制造业PMI报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "29",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_ism_non_pmi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国NAHB房产市场指数报告
def macro_usa_nahb_house_market_index():
"""
美国NAHB房产市场指数报告, 数据区间从19850201-至今
https://datacenter.jin10.com/reportType/dc_usa_nahb_house_market_index
https://cdn.jin10.com/dc/reports/dc_usa_nahb_house_market_index_all.js?v=1578744817
:return: 美国NAHB房产市场指数报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_nahb_house_market_index_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国NAHB房产市场指数报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "31",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_nahb_house_market_index"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国新屋开工总数年化报告
def macro_usa_house_starts():
"""
美国新屋开工总数年化报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_house_starts
https://cdn.jin10.com/dc/reports/dc_usa_house_starts_all.js?v=1578747388
:return: 美国新屋开工总数年化报告-今值(万户)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_house_starts_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国新屋开工总数年化报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万户)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "17",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_house_starts"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国新屋销售总数年化报告
def macro_usa_new_home_sales():
"""
美国新屋销售总数年化报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_new_home_sales
https://cdn.jin10.com/dc/reports/dc_usa_new_home_sales_all.js?v=1578747501
:return: 美国新屋销售总数年化报告-今值(万户)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_new_home_sales_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国新屋销售总数年化报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万户)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "32",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_new_home_sales"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国营建许可总数报告
def macro_usa_building_permits():
"""
美国营建许可总数报告, 数据区间从20080220-至今
https://datacenter.jin10.com/reportType/dc_usa_building_permits
https://cdn.jin10.com/dc/reports/dc_usa_building_permits_all.js?v=1578747599
:return: 美国营建许可总数报告-今值(万户)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_building_permits_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国营建许可总数报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万户)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "3",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_building_permits"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国成屋销售总数年化报告
def macro_usa_exist_home_sales():
"""
美国成屋销售总数年化报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_exist_home_sales
https://cdn.jin10.com/dc/reports/dc_usa_exist_home_sales_all.js?v=1578747703
:return: 美国成屋销售总数年化报告-今值(万户)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_exist_home_sales_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国成屋销售总数年化报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万户)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "15",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_exist_home_sales"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国FHFA房价指数月率报告
def macro_usa_house_price_index():
"""
美国FHFA房价指数月率报告, 数据区间从19910301-至今
https://datacenter.jin10.com/reportType/dc_usa_house_price_index
https://cdn.jin10.com/dc/reports/dc_usa_house_price_index_all.js?v=1578747781
:return: 美国FHFA房价指数月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_house_price_index_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国FHFA房价指数月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "51",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_house_price_index"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国S&P/CS20座大城市房价指数年率报告
def macro_usa_spcs20():
"""
美国S&P/CS20座大城市房价指数年率报告, 数据区间从20010201-至今
https://datacenter.jin10.com/reportType/dc_usa_spcs20
https://cdn.jin10.com/dc/reports/dc_usa_spcs20_all.js?v=1578747873
:return: 美国S&P/CS20座大城市房价指数年率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_spcs20_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国S&P/CS20座大城市房价指数年率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "52",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_spcs20"
temp_df = temp_df.astype(float)
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国成屋签约销售指数月率报告
def macro_usa_pending_home_sales():
"""
美国成屋签约销售指数月率报告, 数据区间从20010301-至今
https://datacenter.jin10.com/reportType/dc_usa_pending_home_sales
https://cdn.jin10.com/dc/reports/dc_usa_pending_home_sales_all.js?v=1578747959
:return: 美国成屋签约销售指数月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_pending_home_sales_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国成屋签约销售指数月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "34",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_pending_home_sales"
temp_df = temp_df.astype(float)
return temp_df
# 金十数据中心-经济指标-美国-领先指标-美国谘商会消费者信心指数报告
def macro_usa_cb_consumer_confidence():
"""
美国谘商会消费者信心指数报告, 数据区间从19700101-至今
https://cdn.jin10.com/dc/reports/dc_usa_cb_consumer_confidence_all.js?v=1578576859
:return: 美国谘商会消费者信心指数报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_cb_consumer_confidence_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}")
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国谘商会消费者信心指数报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "5",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index =
|
pd.to_datetime(temp_se.iloc[:, 0])
|
pandas.to_datetime
|
import numpy as np
import data_generator_lib
import pandas as pd
import librosa
import data_cnn_format as cnn
import gccphat
import constants
import rotate
import final_models
import utility_methods
from plotting import plotting
class Simulation:
"""
Class for running a single simulation of doa estimation based on synthetic data
"""
def __init__(self, directory, source_azimuth, source_distance, model):
self.current_position = 0
self.iteration = 0
self.directory = directory
self.predictions = []
self.source_azimuth = source_azimuth
self.source_distance = source_distance
self.model = model
self.rotation = 0
self.mic_centre = np.array([1.5, 1.5])
self.rotation_list = [0]
self.prediction = 0
self.initial_adjust = False
self.current_model = self.get_model() # init
self.results = []
self.audio_number = 0
# TODO: move doa to here
def store_prediction(self, doa_list):
"""
convert relative prediction to home coordinates
"""
true_doas = [utility_methods.cylindrical(self.current_position + doa_list[0]),
utility_methods.cylindrical(self.current_position + doa_list[1])]
self.predictions.append(true_doas)
def get_model(self):
model = None
if self.model == "gcc_cnn":
model = final_models.gcc_cnn()
elif self.model == "raw_cnn":
model = final_models.raw_cnn()
elif self.model == "raw_resnet":
model = final_models.raw_resnet()
elif self.model == "gcc_dsp":
model = final_models.gcc_dsp()
else:
print("Error -> No file found")
return model
def simulated_record(self, mic_rotation):
"""
Simulation of recording audio. Takes source position and mic rotation,
simulates acoustics, and records to wav
:param mic_rotation: angle of rotation of microphone to simulate the head movements
"""
data_point = data_generator_lib.get_data(subset_size=1)[self.audio_number]
wav_id, true_azimuth = data_generator_lib.generate_training_data(source_azimuth_degrees=self.source_azimuth,
source_distance_from_room_centre=self.source_distance,
SNR=-20,
RT60=0.3,
source=data_point,
mic_centre=self.mic_centre,
mic_rotation_degrees=mic_rotation,
binaural_object=constants.room,
dir=self.directory)
output = [wav_id, true_azimuth, self.iteration]
df =
|
pd.DataFrame([output], columns=['stereo wav', 'True Azimuth', 'Rotation Iteration'])
|
pandas.DataFrame
|
"""
MIT License
Copyright (c) 2019 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
import pandas as pd
import numpy as np
import sys, os, site, zipfile, math, time, json, io
import googlemaps, urllib, shapely, shutil, requests
import xml.etree.ElementTree as ET
from glob import glob
from urllib.error import HTTPError
from urllib.request import URLError
from http.client import IncompleteRead
from zipfile import BadZipFile
from tqdm import tqdm, trange
from warnings import warn
###########################
### IMPORT PROJECT PATH ###
import pvvm.settings
revmpath = pvvm.settings.revmpath
datapath = pvvm.settings.datapath
apikeys = pvvm.settings.apikeys
nsrdbparams = pvvm.settings.nsrdbparams
#####################
### Imports from pvvm
import pvvm.toolbox
import pvvm.io
#######################
### DICTS AND LISTS ###
#######################
isos = ['CAISO', 'ERCOT', 'MISO', 'PJM', 'NYISO', 'ISONE']
resolutionlmps = {
('CAISO', 'da'): 60, ('CAISO', 'rt'): 5,
('ERCOT', 'da'): 60, ('ERCOT', 'rt'): 5,
('MISO', 'da'): 60, ('MISO', 'rt'): 60,
('PJM', 'da'): 60, ('PJM', 'rt'): 60,
('NYISO', 'da'): 60, ('NYISO', 'rt'): 5,
('ISONE', 'da'): 60, ('ISONE', 'rt'): 60,
}
################
### DOWNLOAD ###
################
###############
### General use
def constructpayload(**kwargs):
out = []
for kwarg in kwargs:
out.append('{}={}'.format(kwarg, kwargs[kwarg]))
stringout = '&'.join(out)
return stringout
def constructquery(urlstart, **kwargs):
out = '{}{}'.format(urlstart, constructpayload(**kwargs))
return out
def stampify(date, interval=pd.Timedelta('1H')):
datetime = pd.Timestamp(date)
if interval == pd.Timedelta('1H'):
dateout = '{}{:02}{:02}T{:02}'.format(
datetime.year, datetime.month,
datetime.day, datetime.hour)
elif interval == pd.Timedelta('1D'):
dateout = '{}{:02}{:02}'.format(
datetime.year, datetime.month,
datetime.day)
return dateout
def download_file_series(urlstart, urlend, fileseries, filepath,
overwrite=False, sleeptime=60, numattempts=200, seriesname=True):
"""
Example
-------
You want to download a list of files at urls = [
'http://www.test.com/foo001.csv', 'http://www.test.com/foo002.csv'].
Then:
urlstart = 'http://www.test.com/foo'
urlend = '.csv'
fileseries = ['001', '002']
If you want the files to be named 'foo001.csv', use seriesname=False
If you want the files to be named '001.csv', use seriesname=True
"""
filepath = pvvm.toolbox.pathify(filepath, make=True)
### Make lists of urls, files to download, and filenames
urls = [(urlstart + file + urlend) for file in fileseries]
todownload = [os.path.basename(url) for url in urls]
if seriesname == True:
filenames = [os.path.basename(file) + urlend for file in fileseries]
else:
filenames = todownload
### Get the list of downloaded files
downloaded = [os.path.basename(file) for file in glob(filepath + '*')]
### Remake the list if overwrite == False
if overwrite == False:
filestodownload = []
urlstodownload = []
fileseriesnames = []
for i in range(len(filenames)):
if filenames[i] not in downloaded:
filestodownload.append(todownload[i])
urlstodownload.append(urls[i])
fileseriesnames.append(filenames[i])
elif overwrite == True:
filestodownload = todownload
urlstodownload = urls
fileseriesnames = filenames
### Download the files
for i in trange(len(urlstodownload)):
### Attempt the download
attempts = 0
while attempts < numattempts:
try:
urllib.request.urlretrieve(
urlstodownload[i], filepath + fileseriesnames[i])
break
except (HTTPError, IncompleteRead, EOFError) as err:
print(urlstodownload[i])
print(filestodownload[i])
print('Rebuffed on attempt # {} at {} by "{}".'
'Will retry in {} seconds.'.format(
attempts, pvvm.toolbox.nowtime(), err, sleeptime))
attempts += 1
time.sleep(sleeptime)
###########################
### Geographic manipulation
def rowlatlon2x(row):
latrad = row['latitude'] * math.pi / 180
lonrad = row['longitude'] * math.pi / 180
x = math.cos(latrad) * math.cos(lonrad)
return x
def rowlatlon2y(row):
latrad = row['latitude'] * math.pi / 180
lonrad = row['longitude'] * math.pi / 180
y = math.cos(latrad) * math.sin(lonrad)
return y
def rowlatlon2z(row):
latrad = row['latitude'] * math.pi / 180
z = math.sin(latrad)
return z
############
### ISO LMPs
"""
Note: These scripts worked as of early 2018, but MISO, PJM, and NYISO have since
changed their websites, and CAISO has removed data prior to 20150303. Scripts
are included here for documentary purposes and as a resource for future
data collection, but are unlikely to work given ISO website changes.
"""
def download_caiso_lmp_allnodes(market, start, filepathout,
product='LMP', numattempts=200, waittime=10):
urlstart = 'http://oasis.caiso.com/oasisapi/GroupZip?'
columnsout = [
'INTERVALSTARTTIME_GMT', 'NODE', 'MW',
'OPR_DT', 'OPR_HR', 'OPR_INTERVAL']
if market in ['RTM', 'HASP', 'RTPD']:
interval = pd.Timedelta('1H')
elif market in ['DAM', 'RUC']:
interval = pd.Timedelta('1D')
starttimestamp = pd.Timestamp(start)
endtimestamp = starttimestamp + interval
startdatetime = '{}{:02}{:02}T{:02}:00-0000'.format(
starttimestamp.year, starttimestamp.month,
starttimestamp.day, starttimestamp.hour)
enddatetime = '{}{:02}{:02}T{:02}:00-0000'.format(
endtimestamp.year, endtimestamp.month,
endtimestamp.day, endtimestamp.hour)
if interval == pd.Timedelta('1D'):
fileout = '{}{:02}{:02}.gz'.format(
starttimestamp.year, starttimestamp.month,
starttimestamp.day)
elif interval == pd.Timedelta('1H'):
fileout = '{}{:02}{:02}T{:02}.gz'.format(
starttimestamp.year, starttimestamp.month,
starttimestamp.day, starttimestamp.hour)
url = constructquery(
urlstart,
groupid='{}_LMP_GRP'.format(market),
startdatetime=startdatetime,
enddatetime=enddatetime,
version=1,
resultformat=6)
attempts = 0
while attempts < numattempts:
try:
# if product.lower() in ['mcc', 'mce', 'mcl']:
# if (market.upper() in ['DAM', 'RUC']) and (starttimestamp.year >= 2016):
# if market.upper() in ['DAM', 'RUC']:
if ((product.lower() in ['mcc', 'mce', 'mcl'])
or ((market == 'DAM') and product.lower() == 'lmp')):
zip_file = zipfile.ZipFile(io.BytesIO(
urllib.request.urlopen(url).read()))
for csv_file in zip_file.infolist():
if csv_file.filename.endswith(
'{}_v1.csv'.format(product.upper())):
df = pd.read_csv(zip_file.open(csv_file.filename))
else:
df = pd.read_csv(url, compression='zip')
dfout = df[df['LMP_TYPE'] == product.upper()][columnsout]
dfout.to_csv(
'{}{}'.format(filepathout, fileout),
columns=columnsout,
index=False,
compression='gzip')
return dfout
except (
URLError, IncompleteRead, pd.errors.ParserError,
BadZipFile, KeyError, HTTPError, UnboundLocalError) as error:
print(
'Error for {} on attempt {}/{}: {}'.format(
start, attempts, numattempts, error),
# end='\r',
)
attempts += 1
time.sleep(waittime)
if attempts >= numattempts:
raise URLError('{}{}'.format(filepathout, fileout))
def download_lmps(year, iso, market, overwrite=False, sleeptime=60,
product='LMP', submarket=None, numattempts=200, subset=None,
waittime=10, filepath=None):
"""
Inputs
------
subset: None or slice()
Notes
-----
* ERCOT LMPs more than 30 days old must be requested from ERCOT.
Requests can be filed at http://www.ercot.com/about/contact/inforequest.
Files should be placed in the folder
revmpath + 'ERCOT/in/lmp/{}/{}/'.format(market, year)
where year is the year of the timestamp within the files.
Note that the date in the filename for day-ahead LMPs is the date before
the timestamps within the file: for example, file
('cdr.00012328.0000000000000000.20151231.125905514.DAMHRLMPNP4183_csv')
contains timestamps for 20160101, and should be placed in the 2016 folder.
"""
### Normalize inputs
iso = iso.upper()
market = market.lower()
year = int(year)
assert market in ['da', 'rt']
assert iso in ['CAISO', 'MISO', 'PJM', 'NYISO', 'ISONE']
### Set file structure
if filepath is None:
filepath = revmpath+'{}/in/lmp/{}/'.format(iso, market)
if not os.path.exists(filepath): os.makedirs(filepath)
### Adjust inputs for different isos
urlstart = {
'ISONE': {
'da': 'https://www.iso-ne.com/static-transform/csv/histRpts/da-lmp/WW_DALMP_ISO_',
'rt': 'https://www.iso-ne.com/static-transform/csv/histRpts/rt-lmp/lmp_rt_final_'},
'MISO': {
# 'da': 'https://old.misoenergy.org/Library/Repository/Market%20Reports/',
# 'rt': 'https://old.misoenergy.org/Library/Repository/Market%20Reports/',
'da': 'https://docs.misoenergy.org/marketreports/',
'rt': 'https://docs.misoenergy.org/marketreports/',
},
'PJM': {
'da': 'http://www.pjm.com/pub/account/lmpda/',
'rt': 'http://www.pjm.com/pub/account/lmp/'},
'NYISO': {
'da': 'http://mis.nyiso.com/public/csv/damlbmp/',
'rt': 'http://mis.nyiso.com/public/csv/realtime/'},
}
urlend = {
'ISONE': {'da': '.csv', 'rt': '.csv'},
'MISO': {'da': '_da_lmp.csv', 'rt': '_rt_lmp_final.csv'},
'PJM': {'da': '-da.zip', 'rt': '.zip'},
'NYISO': {'da': 'damlbmp_gen_csv.zip', 'rt': 'realtime_gen_csv.zip'},
}
files = {
'ISONE': pvvm.toolbox.makedays(year),
'MISO': pvvm.toolbox.makedays(year),
'PJM': pvvm.toolbox.makedays(year),
'NYISO': ['{}{:02}01'.format(year, month) for month in range(1,13)]
}
### Download files
if iso == 'ISONE':
download_file_series(
urlstart=urlstart[iso][market], urlend=urlend[iso][market],
fileseries=files[iso], filepath=filepath,
overwrite=overwrite, sleeptime=sleeptime, numattempts=numattempts)
elif iso == 'MISO':
urls = [(urlstart[iso][market] + file + '_da_expost_lmp.csv')
if (int(file) >= 20150301) and (market == 'da')
else (urlstart[iso][market] + file + urlend[iso][market])
for file in files[iso]]
download_file_series(
urlstart='', urlend='', fileseries=urls, filepath=filepath,
overwrite=overwrite, sleeptime=sleeptime, numattempts=numattempts)
elif iso == 'PJM':
da_updated = {
'20151201': '-da_updated.zip',
'20150930': '-da_updated.zip',
'20140617': '-da_updated.zip',
'20150616': '-da_updated.zip',
'20150615': '-da_updated.zip',
'20150614': '-da_updated.zip',
'20140613': '-da_updated.zip',
'20150603': '-da_updated.zip',
'20150602': '-da_updated.zip',
'20150601': '-da_updated.zip',
'20150409': '-da_updated.zip',
'20140327': '-da_updated.zip',
'20111012': '-da_update.zip',
'20111011': '-da_update.zip',
}
rt_updated = {
'20170116': '_updated.zip',
'20170115': '_updated.zip',
'20170114': '_updated.zip',
'20170113': '_updated.zip',
'20160923': '_updated.zip',
'20160417': '_updated.zip',
'20160416': '_updated.zip',
'20160415': '_updated.zip',
'20151110': '_updated.zip',
'20150929': '_updated.zip',
'20150901': '_updated.zip',
'20150831': '_updated.zip',
'20150601': '_updated.zip',
'20150504': '_updated.zip',
'20150427': '_updated.zip',
'20150407': '_updated.zip',
'20150310': '_updated.zip',
'20150309': '_updated.zip',
'20150201': '_updated.zip',
'20150131': '_updated.zip',
'20150130': '_updated.zip',
'20141112': '_updated.zip',
'20141023': '_updated.zip',
'20141013': '_updated.zip',
'20140805': '_updated.zip',
'20140710': '_updated.zip',
'20140507': '_updated.zip',
'20140128': '_updated.zip',
'20131125': '_updated.zip',
'20131120': '_updated.zip',
'20130424': '_updated.zip',
'20130307': '_updated.zip',
'20121109': '_updated.zip',
'20121023': '_updated.zip',
'20121004': '_updated.zip',
'20121003': '_updated2.zip',
'20121001': '_updated.zip',
'20110914': '_updated.zip',
'20110829': '_updated.zip',
'20110617': '_updated.zip',
'20110306': '_updated.zip',
'20110305': '_updated.zip',
'20110304': '_updated.zip',
'20101005': '_updated.zip',
'20100526': '_updated.zip',
'20100201': '_updated.zip',
'20100129': '_updated.zip',
'20100125': '_updated.zip',
'20080904': '_updated.zip',
'20080413': '_updated.zip',
'20080305': '_updated.zip',
'20080215': '_updated.zip',
'20080214': '_updated.zip',
'20071002': '_updated.zip',
'20070822': '_updated.zip',
}
if market == 'da':
# print("Download 'updated' files from http://www.pjm.com/markets-and-operations/"
# "energy/day-ahead/lmpda.aspx and replace the files of the corresponding date"
# "downloaded here")
# ### Files switch from .zip to .csv on 20171109 for day-ahead
# urls = [(urlstart[iso][market] + file + '-da.csv')
# if int(file) >= 20171109
# else (urlstart[iso][market] + file + '-da.zip')
# for file in files[iso]]
# ^ Out of date; files have been reposted as zips (20180621)
urls = [(urlstart[iso][market] + file + da_updated[file])
if file in da_updated.keys()
else (urlstart[iso][market] + file + '-da.zip')
for file in files[iso]]
elif market == 'rt':
# print("Download 'updated' files from http://www.pjm.com/markets-and-operations/"
# "energy/real-time/lmpda.aspx and replace the files of the corresponding date"
# "downloaded here")
# ### Files switch from .zip to .csv on 20171212 for real-time
# urls = [(urlstart[iso][market] + file + '.csv')
# if int(file) >= 20171212
# else (urlstart[iso][market] + file + '.zip')
# for file in files[iso]]
# ^ Out of date; files have been reposted as zips (20180621)
urls = [(urlstart[iso][market] + file + rt_updated[file])
if file in rt_updated.keys()
else (urlstart[iso][market] + file + '.zip')
for file in files[iso]]
download_file_series(
urlstart='', urlend='', fileseries=urls, filepath=filepath,
overwrite=overwrite, sleeptime=sleeptime, numattempts=numattempts)
elif iso == 'NYISO':
### NYISO files are zipped by month; put them in a separate folder
zippath = '{}/in/lmp/{}-zip/'.format(iso, market)
if not os.path.exists(zippath): os.makedirs(zippath)
download_file_series(
urlstart=urlstart[iso][market], urlend=urlend[iso][market],
fileseries=files[iso], filepath=zippath,
overwrite=overwrite, sleeptime=sleeptime, numattempts=numattempts)
### Unzip files
zips = [(zippath + file + urlend[iso][market]) for file in files[iso]]
for i in trange(len(zips)):
zip_ref = zipfile.ZipFile(zips[i], 'r')
zip_ref.extractall(filepath)
zip_ref.close()
elif iso == 'CAISO':
if (submarket == None) and (market == 'rt'): submarket = 'RTM'
elif (submarket == None) and (market == 'da'): submarket = 'DAM'
if submarket in ['RTM', 'HASP', 'RTPD']:
interval = pd.Timedelta('1H')
elif submarket in ['DAM', 'RUC']:
interval = pd.Timedelta('1D')
### Set output filepath
filepath = '{}/in/{}/{}/'.format(iso, product.lower(), market)
if (((market == 'da') and (submarket != 'DAM'))
or ((market == 'rt') and (submarket != 'RTM'))):
filepath = '{}/in/{}/{}/{}/'.format(
iso, product.lower(), market, submarket)
if not os.path.exists(filepath): os.makedirs(filepath)
queries = pd.date_range(
start=pd.Timestamp('{}-01-01T00:00'.format(year)),
end=(pd.Timestamp('{}-01-01T00:00'.format(year+1)) - interval),
freq=interval)
### Initialize error container and subset if necessary
errors = []
if subset == None: subset = slice(None)
# already_downloaded = glob('{}{}*'.format(filepath, year))
for query in tqdm(queries[subset]):
# if '{}{}.gz'.format(filepath, stampify(query)) not in already_downloaded:
if interval == pd.Timedelta('1D'):
fileout = stampify(query)[:-3]
elif interval == pd.Timedelta('1H'):
fileout = stampify(query)
if not os.path.exists('{}{}.gz'.format(filepath, fileout)):
# if overwrite == False:
# if os.path.exists('{}{}.gz'.format(filepath, stampify(query))):
# break
try:
download_caiso_lmp_allnodes(
market=submarket, start=str(query), filepathout=filepath,
product=product, numattempts=numattempts, waittime=waittime)
except (URLError, IncompleteRead, pd.errors.ParserError,
BadZipFile, HTTPError) as error:
errors.append(error)
print(error)
if len(errors) > 0:
pd.Series(errors).to_csv(
'{}__Errors__{}.csv'.format(filepath, time.strftime('%Y%m%dT%H%M%S')),
index=False)
################
### NODALIZE ###
def nodalize(year, market, iso,
filepathin=None, filepathout=None, nodesfile=None,
product='LMP', submarket=None, fillmissinghour=True):
"""
"""
### Set defaults if necessary
if iso.upper() == 'CAISO':
if filepathin == None:
filepathin = revmpath+'{}/in/{}/{}'.format(
iso, product.lower(), market)
if (((market == 'da') and (submarket != 'DAM'))
or ((market == 'rt') and (submarket != 'RTM'))):
filepathin = revmpath+'{}/in/{}/{}/{}/'.format(
iso, product.lower(), market, submarket)
if filepathout == None:
filepathout = revmpath+'{}/io/{}-nodal/{}/'.format(
iso, product.lower(), market)
if (market == 'rt') and (submarket == 'RTM'):
filepathout = revmpath+'{}/io/{}-nodal/{}-month/'.format(
iso, product.lower(), market)
if (((market == 'da') and (submarket != 'DAM'))
or ((market == 'rt') and (submarket != 'RTM'))):
filepathout = revmpath+'{}/io/{}-nodal/{}/{}/'.format(
iso, product.lower(), market, submarket)
if (submarket == None) and (market == 'rt'): submarket = 'RTM'
elif (submarket == None) and (market == 'da'): submarket = 'DAM'
elif iso.upper() == 'ERCOT':
if (filepathin == None) and (market == 'da'):
filepathin = revmpath+'{}/in/lmp/{}/{}/'.format(iso, market, year)
elif (filepathout == None) and (market == 'rt'):
filepathout = revmpath+'{}/io/lmp-nodal/{}-month/'.format(iso, market)
elif filepathout == None:
filepathout = revmpath+'{}/io/lmp-nodal/{}/'.format(iso, market)
else:
if filepathin == None:
filepathin = revmpath+'{}/in/lmp/{}/'.format(iso, market)
if filepathout == None:
filepathout = revmpath+'{}/io/lmp-nodal/{}/'.format(iso, market)
### Make output folders if necessary
if not os.path.exists(filepathout):
os.makedirs(filepathout, exist_ok=True)
if not os.path.exists(revmpath+'{}/io/missingnodes/'.format(iso.upper())):
os.makedirs(revmpath+'{}/io/missingnodes/'.format(iso.upper()), exist_ok=True)
if not os.path.exists(revmpath+'{}/io/datatimes/'.format(iso.upper())):
os.makedirs(revmpath+'{}/io/datatimes/'.format(iso.upper()), exist_ok=True)
if not os.path.exists(revmpath+'{}/io/fulltimenodes/year/'.format(iso.upper())):
os.makedirs(revmpath+'{}/io/datatimes/'.format(iso.upper()), exist_ok=True)
if not os.path.exists(revmpath+'{}/io/fulltimenodes/day/{}/'.format(iso.upper(), market)):
os.makedirs(revmpath+'{}/io/fulltimenodes/day/{}/'.format(iso.upper(), market),
exist_ok=True)
print(filepathout)
### Shared components
nodesfiles = {
'CAISO': revmpath+'CAISO/io/caiso-node-latlon.csv',
'ERCOT': revmpath+'ERCOT/io/ercot-node-latlon.csv',
#'MISO': revmpath+'MISO/in/miso-node-map.csv',
'MISO': revmpath+'MISO/io/miso-node-latlon.csv',
# 'PJM': revmpath+'PJM/io/pjm-pnode-latlon-uniquepoints.csv',
'PJM': revmpath+'PJM/io/pjm-node-latlon.csv',
'NYISO': revmpath+'NYISO/io/nyiso-node-latlon.csv',
'ISONE': revmpath+'ISONE/io/isone-node-latlon.csv'
}
if nodesfile is None:
nodesfile = nodesfiles[iso]
resolution = {
'CAISO': {'da': 60, 'rt': 5}, 'ERCOT': {'da': 60, 'rt': 5},
'MISO': {'da': 60, 'rt': 60}, 'PJM': {'da': 60, 'rt': 60},
'NYISO': {'da': 60, 'rt': 5}, 'ISONE': {'da': 60, 'rt': 60},
}
### Get file list and iso/market info
# files = glob('{}{}*'.format(filepathin, year))
files = sorted(glob('{}{}*'.format(filepathin, year)))
print('head(files):')
for file in files[:3]:
print(file)
print('tail(files):')
for file in files[-3:]:
print(file)
timezone = pvvm.toolbox.tz_iso[iso]
res = resolution[iso][market]
### Make the inputs easier to work with
iso = iso.upper()
hours = pvvm.toolbox.yearhours(year)
dates = pvvm.toolbox.makedays(year)
### DO: figure out how to generalize this
# if len(files) != len(dates):
# print('len(files) = {}'.format(len(files)))
# print('len(dates) = {}'.format(len(dates)))
# raise Exception("files and dates don't match")
if iso == 'ISONE':
### Load file containing nodes with geographic information
nodesin = pd.read_csv(nodesfile, usecols=[0], squeeze=True,
names=['Node'], skiprows=1)
### Load daily files
colnames = ['intime', 'node', 'lmp']
dfdict = {}
for i in trange(len(files)):
dfday = pd.read_csv(
files[i], skiprows=6, usecols=[2,4,6], names=colnames,
dtype={'intime':str, 'node':'category', 'lmp':float})
dfday.drop(dfday.index[-1], inplace=True)
dfday.loc[:,'intime'] = dates[i] + 'H' + dfday.loc[:,'intime']
dfdict[dates[i]] = dfday
### Concat into one dataframe with localized datetime index
dfall = pd.concat(dfdict)
### Make new index
oldtime = list(dfall.intime.unique())
newtime = list(pd.date_range(dates[0], freq='H', periods=pvvm.toolbox.yearhours(year)))
for i in range(len(newtime)):
newtime[i] = str(newtime[i])
indexconvert = dict(zip(oldtime, newtime))
dfall.loc[:,'intime'] = dfall.loc[:,'intime'].apply(
lambda x: indexconvert[x])
dfall.loc[:,'intime'] = pd.to_datetime(dfall['intime'])
fullindex = pd.date_range(dates[0], freq='H', periods=pvvm.toolbox.yearhours(year))
fullindex = fullindex.tz_localize(timezone)
fullindex = pd.DataFrame(index=fullindex)
### Determine missing nodes and data coverage, and save as one-node files
missingnodes = []
datalength = []
for j in trange(len(nodesin)):
try:
df = dfall[dfall['node'] == nodesin[j]][['intime','lmp']].copy()
df.index = df['intime'].values
del df['intime']
df.index = df.index.tz_localize(timezone)
df = df.merge(fullindex, how='right', left_index=True, right_index=True)
numhours = hours - len(df[df['lmp'].isnull()])
datalength.append([nodesin[j], numhours])
df.to_csv('{}{}-{}.gz'.format(filepathout, nodesin[j], year),
compression='gzip', header=False)
except KeyError:
missingnodes.append(nodesin[j])
continue
elif iso == 'MISO':
### Load file containing nodes with geographic information
nodesin = pd.read_csv(nodesfile, usecols=[0], squeeze=True, names=['Node'])
### Pick columns from input file
usecols = [0, 2,
3, 4, 5, 6, 7, 8, 9, 10,11,12,13,14,
15,16,17,18,19,20,21,22,23,24,25,26]
### Load daily files
dfdict = {}
for i in trange(len(files)):
colnames = ['Node', 'Value']
for j in range(24):
colnames.append(dates[i] + 'H{:02d}'.format(j))
dfin = pd.read_csv(
files[i], skiprows=5, header=None,
usecols=usecols,
dtype={0: 'category'}, names=colnames)
dfday = dfin.loc[dfin['Value'] == 'LMP'].T.copy()
dfday.columns = dfday.iloc[0,:]
dfday = dfday.drop(dfday.index[[0,1]])
dfdict[dates[i]] = dfday
### Concat into one dataframe with localized datetime index
dfall = pd.concat(dfdict)
dfall.index = dfall.index.droplevel(0)
dfall.index = pd.date_range(dates[0], periods=hours, freq='H')
dfall.index = dfall.index.tz_localize(timezone)
### Determine missing nodes and data coverage, and save as one-node files
missingnodes = []
datalength = []
for j in trange(len(nodesin)):
try:
df = pd.DataFrame(dfall.loc[:,nodesin[j]])
numhours = hours - len(df[df[nodesin[j]].isnull()])
datalength.append([nodesin[j], numhours])
df.to_csv('{}{}-{}.gz'.format(filepathout, nodesin[j], year),
compression='gzip', header=False)
except KeyError:
missingnodes.append(nodesin[j])
continue
elif iso == 'PJM':
### Set skiprows (different headers for 'da' and 'rt' markets)
skiprows = {'da': 8, 'rt': 18}
### Load file containing nodes with geographic information
nodesin = pd.read_csv(nodesfile, usecols=[0], squeeze=True)
### Pick columns from input file
usecols = [1,
7, 10, 13, 16, 19, 22, 25, 28, 31, 34, 37, 40,
43, 46, 49, 52, 55, 58, 61, 64, 67, 70, 73, 76]
usecols_dst_springforward = [1,
7, 10, 16, 19, 22, 25, 28, 31, 34, 37, 40,
43, 46, 49, 52, 55, 58, 61, 64, 67, 70, 73, 76]
usecols_dst_fallback = [1,
7, 10, 13, 16, 19, 22, 25, 28, 31, 34, 37, 40,
43, 46, 49, 52, 55, 58, 61, 64, 67, 70, 73, 76, 79]
### Load daily files
dfdict = {}
for i in trange(len(files)):
colnames = ['PnodeID']
if dates[i] not in [pvvm.toolbox.dst_springforward[year], pvvm.toolbox.dst_fallback[year]]:
for j in range(24):
colnames.append(dates[i] + 'H{:02d}'.format(j))
dfin = pd.read_csv(
files[i], skiprows=skiprows[market], header=None,
usecols=usecols,
dtype={1: 'category'}, names=colnames)
elif dates[i] == pvvm.toolbox.dst_springforward[year]:
for j in range(23):
colnames.append(dates[i] + 'H{:02d}'.format(j))
dfin = pd.read_csv(
files[i], skiprows=skiprows[market], header=None,
usecols=usecols_dst_springforward,
dtype={1: 'category'}, names=colnames)
elif dates[i] == pvvm.toolbox.dst_fallback[year]:
for j in range(25):
colnames.append(dates[i] + 'H{:02d}'.format(j))
dfin = pd.read_csv(
files[i], skiprows=skiprows[market], header=None,
usecols=usecols_dst_fallback,
dtype={1: 'category'}, names=colnames)
dfday = dfin.T.copy()
dfday.columns = dfday.iloc[0,:]
dfday = dfday.drop(dfday.index[[0]])
del dfday[np.nan]
dfdict[dates[i]] = dfday
### Concat into one dataframe with localized datetime index
dfall = pd.concat(dfdict)
dfall.index = dfall.index.droplevel(0)
dfall.index = pd.date_range(dates[0], periods=hours, freq='H')
dfall.index = dfall.index.tz_localize(timezone)
### Determine missing nodes and data coverage, and save as one-node files
missingnodes = []
datalength = []
for j in trange(len(nodesin)):
try:
df = pd.DataFrame(dfall.loc[:,nodesin[j].astype(str)])
numhours = hours - len(df[df[nodesin[j].astype(str)].isnull()])
datalength.append([nodesin[j], numhours])
df.to_csv(filepathout + '{}-{}.gz'.format(nodesin[j], year),
compression='gzip', header=False)
except KeyError:
missingnodes.append(nodesin[j])
continue
elif iso == 'NYISO':
### Load file containing nodes with geographic information
nodesin = pd.read_csv(nodesfile, usecols=[0], squeeze=True, names=['node'], skiprows=1)
if market == 'da':
dates = pvvm.toolbox.makedays(year)
if len(files) != len(dates):
print('len(files) = {}'.format(len(files)))
print('len(dates) = {}'.format(len(dates)))
raise Exception("files and dates don't match")
### Make daylight savings mangler
def dstfallback(dataframe):
fallback = pvvm.toolbox.dst_fallback[year]
backfall = '{}/{}/{}'.format(fallback[4:6], fallback[6:], fallback[:4])
fallbackhalf = int(len(dataframe[dataframe['intime'] == backfall + ' 01:00'])/2)
if str(dataframe[dataframe['intime'] == backfall + ' 01:00'].iloc[0,1]) != \
str(dataframe[dataframe['intime'] == backfall + ' 01:00'].iloc[fallbackhalf,1]):
raise Exception("DST fallback ptid's don't match.")
mask = dataframe['intime'] == backfall + ' 01:00'
mask.iloc[fallbackhalf:2*fallbackhalf] = False
dataframe.loc[mask, 'intime'] = backfall + ' 01:00 DST'
print("DST fallback conversion worked!")
return dataframe
### Make datetime converter
def makeindexconvert(files, dates):
"""
"""
dicttimes = {}
for i in trange(len(files)):
df = pd.read_csv(files[i],
usecols = [0,2,3], skiprows=1,
names=['intime', 'node', 'lmp'],
dtype={'ptid': 'category', 'lmp': float})
if dates[i] == pvvm.toolbox.dst_fallback[year]:
# print(df.head())
df = dstfallback(df)
dicttimes[dates[i]] = df
dftimes = pd.concat(dicttimes, copy=False)
oldtime = list(dftimes.intime.unique())
print('len(oldtime) = {}'.format(len(oldtime)))
newtime = list(pd.date_range(dates[0], freq='H', periods=pvvm.toolbox.yearhours(year)))
print('len(newtime) = {}'.format(len(newtime)))
for i in range(len(newtime)):
newtime[i] = str(newtime[i])
indexconvert = dict(zip(oldtime, newtime))
return indexconvert
indexconvert = makeindexconvert(files, dates)
### Load daily files
dfdict = {}
for i in trange(len(files)):
dfday = pd.read_csv(files[i],
usecols = [0,2,3], skiprows=1,
names=['intime', 'node', 'lmp'],
dtype={'ptid': 'category', 'lmp': float})
if dates[i] == pvvm.toolbox.dst_fallback[year]:
dfday = dstfallback(dfday)
dfday.loc[:,'intime'] = dfday.loc[:,'intime'].apply(lambda x: indexconvert[x])
dfday.loc[:,'intime'] = pd.to_datetime(dfday['intime'])
dfdict[dates[i]] = dfday
### Concat into one dataframe with localized datetime index
### copy=False is experimental
dfall = pd.concat(dfdict, copy=False)
### Change node type to 'category'. SUPER important. >10x speedup.
dfall['node'] = dfall['node'].astype('category')
### Make new index
fullindex = pd.date_range(dates[0], freq='H', periods=pvvm.toolbox.yearhours(year))
fullindex = fullindex.tz_localize(timezone)
fullindex = pd.DataFrame(index=fullindex)
### Determine missing nodes and data coverage, and save as one-node files
missingnodes = []
datalength = []
fulldaynodes = {}
for j in trange(len(nodesin)):
# for j in trange(20):
node = str(nodesin[j])
try:
df = dfall[dfall['node'] == nodesin[j]][['intime','lmp']].copy()
df.index = df['intime'].values
del df['intime']
df.index = df.index.tz_localize(timezone)
df = df.merge(fullindex, how='right', left_index=True, right_index=True)
## Record datapoints
numhours = hours - len(df[df['lmp'].isnull()])
datalength.append([nodesin[j], numhours])
## Determine full-data days
dfcount = df.groupby([df.index.month, df.index.day]).count()
for date in dates:
month = int(date[4:6])
day = int(date[6:])
count = dfcount.loc[month].loc[day][0]
if count == 24:
nodes = fulldaynodes.get(date, [])
nodes.append(node)
fulldaynodes[date] = nodes
## Write nodalized file
df.to_csv('{}{}-{}.gz'.format(filepathout, nodesin[j], year),
compression='gzip', header=False)
except KeyError:
missingnodes.append(nodesin[j])
continue
elif market == 'rt':
datesprev = pvvm.toolbox.makedays(year - 1)
datesthis = pvvm.toolbox.makedays(year)
dates = [datesprev[-1]] + datesthis
filesprev = sorted(glob('{}{}*'.format(filepathin, (year - 1))))
filesthis = sorted(glob('{}{}*'.format(filepathin, year)))
files = [filesprev[-1]] + filesthis
if len(files) != len(dates):
print('len(files) = {}'.format(len(files)))
print('len(dates) = {}'.format(len(dates)))
for date in dates:
if date not in [file[88:96] for file in files]:
print(date)
raise Exception("files and dates don't match")
### Make nice index
niceindex_hourstart = pd.date_range(
start='{}-01-01 00:00'.format(year),
periods = hours * 12,
freq = '5T',
tz=pvvm.toolbox.tz_iso[iso])
niceindex = pd.DataFrame(index=niceindex_hourstart)
### Load daily files
dfdict = {}
for i in trange(len(files)):
df = pd.read_csv(
files[i],
usecols=[0,2,3],
skiprows=1,
names=['intime', 'node', 'lmp'],
dtype={'intime': 'category',
'node': 'category',
'lmp': float},
parse_dates=['intime'],
infer_datetime_format=True)
dfdict[dates[i]] = df
### Concat into one dataframe with localized datetime index
dfall = pd.concat(dfdict, copy=False)
### Change node type to 'category'. SUPER important. >10x speedup.
dfall['node'] = dfall['node'].astype('category')
### Check number of nodes. Good for error checking.
numnodes = len(dfall['node'].unique())
print("len(dfall['node']): {}".format(numnodes))
### Reset index
dfall.index = dfall['intime'].values
dfall.index = dfall.index.tz_localize(pvvm.toolbox.tz_iso[iso])
### Fix DST
dststart = dfall.index.get_loc(pvvm.toolbox.dst_springforward[year] + ' 01:55')
print('len(dststart) = {}'.format(len(dststart)))
print('num nodes = {}'.format(numnodes))
if len(dststart) > numnodes:
raise Exception('len(dststart) > numnodes')
dststart = dststart[-1] + 1
if year == 2012:
dstend = dfall.index.get_loc(pvvm.toolbox.dst_fallback[year] + ' 01:59:34')
else:
dstend = dfall.index.get_loc(pvvm.toolbox.dst_fallback[year] + ' 01:55')
print('len(dstend) = {}'.format(len(dstend)))
if year == 2012:
if len(dstend) > numnodes:
raise Exception('len(dststart) > numnodes')
dstend = dstend[-1]
else:
if len(dstend) % 2 != 0:
raise Exception('len(dstend) % 2 != 0')
if len(dstend) / 2 > numnodes:
raise Exception('len(dstend) / 2 > numnodes')
if ((dstend[int(len(dstend)/2) + 0] - dstend[int(len(dstend)/2) - 1] - 1) / 11
!= (len(dstend) / 2)):
print((dstend[int(len(dstend)/2) + 0] - dstend[int(len(dstend)/2) - 1] - 1) / 11)
print(len(dstend) / 2)
raise Exception('node added or lost during DST fallback')
dstend = dstend[int(len(dstend)/2) - 1]
dfall.iloc[dststart:(dstend + 1),0] = (
dfall.iloc[dststart:(dstend + 1),0]
+ pd.Timedelta(-1, unit='h'))
### Reset index
dfall.index = dfall['intime'].values
dfall.index = dfall.index.tz_localize(pvvm.toolbox.tz_iso[iso])
### Determine missing nodes and data coverage, and save as one-node files
missingnodes = []
datalength = []
fulldaynodes = {}
for j in trange(len(nodesin)):
node = str(nodesin[j])
try:
dfin = dfall[dfall['node'] == node].copy()
## Add missing timestamps
df = dfin.merge(
niceindex,
how='outer',
left_index=True, right_index=True)
## Fill gaps, using off-5T values
df = df['lmp'].interpolate(method='time', limit=11)
## Remove off-5T values
dfout = pd.DataFrame(df).merge(
niceindex,
how='right',
left_index=True, right_index=True)
## Fill missing hour if desired
if fillmissinghour:
dfout = dfout.interpolate('linear', limit=12)
## Record datapoints
numpoints = dfout.notnull().sum().values[0]
datalength.append([nodesin[j], numpoints])
## Determine full-data days
dfcount = dfout.groupby([dfout.index.month, dfout.index.day]).count()
for date in dates[1:]:
month = int(date[4:6])
day = int(date[6:])
count = dfcount.loc[month].loc[day][0]
if count == 288:
nodes = fulldaynodes.get(date, [])
nodes.append(node)
fulldaynodes[date] = nodes
## Write nodalized file
dfout.to_csv(
'{}{}-{}.gz'.format(
filepathout, nodesin[j], year),
compression='gzip', header=False)
except KeyError:
missingnodes.append(node)
continue
elif iso == 'CAISO':
if market == 'da':
### Input housekeeping
filesin = sorted(glob('{}{}*'.format(filepathin, year)))
datesin = pvvm.toolbox.makedays(year)
if len(filesin) != len(datesin):
print('filepathin = {}'.format(filepathin))
print('len(filesin) = {}'.format(len(filesin)))
print('len(datesin) = {}'.format(len(datesin)))
raise Exception("files and dates don't match")
### Load file containing nodes with geographic information
nodesin = pd.read_csv(nodesfile, usecols=[0], squeeze=True,
names=['Node'], skiprows=1)
### Make nice hourly index
hourlyindex = pd.date_range(
start='{}-01-01 00:00'.format(year),
end='{}-12-31 23:00'.format(year),
freq = '1H',
tz=pvvm.toolbox.tz_iso[iso])
hourlyindex = pd.DataFrame(index=hourlyindex)
### Make nice daily index
dailyindex = pd.date_range(
start='{}-01-01'.format(year),
end='{}-12-31'.format(year),
freq='1D')
### Load daily files
dfdict = {}
for i in trange(len(filesin)):
if ((product == 'lmp') and (market == 'da')):
df = pd.read_csv(
filesin[i],
usecols=[1,2,3],
skiprows=1,
names=['node', 'intime', product],
dtype={'intime':'category', 'node':'category', product:float},
# index_col='intime',
parse_dates=['intime'],
infer_datetime_format=True
)
# df.intime = df.intime.map(
# lambda x: pd.to_datetime('{}{}{} {}:00'.format(x[:4], x[5:7], x[9:11], x[12:14])))
else:
df = pd.read_csv(
filesin[i],
usecols=[0,1,2],
skiprows=1,
names=['intime', 'node', product],
dtype={'intime':'category', 'node':'category', product:float},
parse_dates=['intime'],
infer_datetime_format=True
)
dfdict[datesin[i]] = df
### Concat into one dataframe
dfall = pd.concat(dfdict, copy=False)
# dfall.reset_index(level=0, drop=True, inplace=True)
### Categorize nodes (accelerates lookup)
dfall['node'] = dfall['node'].astype('category')
### Check number of nodes. Good for error checking.
numnodes = len(dfall['node'].unique())
print("numnodes = {}".format(numnodes))
### Reset index and set to local timezone
dfall.index = dfall['intime'].values
dfall.index = (
dfall.index
.tz_localize('UTC')
.tz_convert(pvvm.toolbox.tz_iso[iso]))
### Determine missing nodes and data coverage, and save as one-node files
missingnodes = []
datalength = []
fulldaynodes = {}
for j in trange(len(nodesin)):
node = str(nodesin[j])
try:
dfin = dfall[dfall['node'] == node].copy()
## Add missing timestamps
df = dfin.merge(
hourlyindex,
how='right',
left_index=True, right_index=True)
df = pd.DataFrame(df[product])
## Record datapoints
numpoints = df.notnull().sum().values[0]
datalength.append([nodesin[j], numpoints])
## Determine full-data days
dfcount = df.groupby([df.index.month, df.index.day]).count()
for date in dailyindex:
month = date.month
day = date.day
count = dfcount.loc[month].loc[day][0]
if count == 24:
nodes = fulldaynodes.get(date.strftime('%Y%m%d'), [])
nodes.append(node)
fulldaynodes[date.strftime('%Y%m%d')] = nodes
## Write nodalized file
## ONLY if it contains data
if df.notnull().sum()[0] > 0:
df.to_csv(
'{}{}-{}.gz'.format(
filepathout, node, year),
compression='gzip', header=False)
else:
missingnodes.append(node)
except KeyError:
missingnodes.append(node)
elif market == 'rt':
### Make convenience variables
months = list(range(1,13))
### Load file containing nodes with geographic information
nodesin = list(pd.read_csv(
nodesfile,
usecols=[0],
squeeze=True
))
### Loop over months
for month in months:
datetimesin = pd.date_range(
start='{}{:02}01T{:02}:00'.format(year, month, abs(pvvm.toolbox.timezone_iso[iso])),
periods = pvvm.toolbox.monthhours(year, month),
freq = 'H')
files = ['{}{}.gz'.format(filepathin, d.strftime('%Y%m%dT%H')) for d in datetimesin]
### Make nice MONTHLY index
niceindex = pd.date_range(
start='{}-{:02}-01 00:00'.format(year, month),
periods = (pvvm.toolbox.monthhours(year, month) * 60 / res),
freq = '5T',
tz=pvvm.toolbox.tz_iso[iso])
niceindex = pd.DataFrame(index=niceindex)
### Make date index (for labeling daily output files)
dates = pd.date_range(
start = '{}-{:02}-01'.format(year, month),
periods = (pvvm.toolbox.monthhours(year, month) / 24),
freq = '1D')
dates = [date.strftime('%Y%m%d') for date in dates]
### Load daily files
dfdict = {}
for i in trange(len(files)):
df = pd.read_csv(
files[i],
usecols=[0,1,2],
skiprows=1,
names=['intime', 'node', 'lmp'],
dtype={
'intime': 'category',
'node': 'category',
'lmp': float},
parse_dates=['intime'],
infer_datetime_format=True)
dfdict[datetimesin[i]] = df
### Concat into one dataframe
dfall = pd.concat(dfdict, copy=False)
### Categorize nodes (accelerates lookup)
dfall['node'] = dfall['node'].astype('category')
### Check number of nodes. Good for error checking.
numnodes = len(dfall['node'].unique())
print("numnodes({:02}) = {}".format(month, numnodes))
### Reset index and set to local timezone
dfall.index = dfall['intime'].values
dfall.index = (
dfall.index
.tz_localize('UTC')
.tz_convert(pvvm.toolbox.tz_iso[iso]))
### Determine missing nodes and data coverage, and save as one-node files
missingnodes = []
datalength = []
fulldaynodes = {}
for j in trange(len(nodesin)):
node = str(nodesin[j])
try:
dfin = dfall[dfall['node'] == node].copy()
## Add missing timestamps
df = dfin.merge(
niceindex,
how='right',
left_index=True, right_index=True)
df = pd.DataFrame(df['lmp'])
## Record datapoints
numpoints = df.notnull().sum().values[0]
datalength.append([nodesin[j], numpoints])
## Determine full-data days
dfcount = df.groupby([df.index.month, df.index.day]).count()
for date in dates:
day = int(date[6:])
count = dfcount.loc[month].loc[day][0]
if count == 288:
nodes = fulldaynodes.get(date, [])
nodes.append(node)
fulldaynodes[date] = nodes
## Write nodalized file
## ONLY if it contains data
if df.notnull().sum()[0] > 0:
df.to_csv(
'{}{}-{}{:02}.gz'.format(
filepathout, nodesin[j], year, month),
compression='gzip', header=False)
else:
missingnodes.append(node)
except KeyError:
missingnodes.append(node)
#############################
### Unmonthify the nodal lmps
### Set new filepaths
filepathin = revmpath+'{}/io/lmp-nodal/{}-month/'.format(iso, market)
filepathout = revmpath+'{}/io/lmp-nodal/{}/'.format(iso, market)
if not os.path.exists(filepathout): os.makedirs(filepathout)
### Make list of all files for year
filesin = sorted(glob('{}*-{}??.gz'.format(filepathin, year)))
### Make list of all nodes (with duplicates)
nodes = []
for i in range(len(filesin)):
nodes.append(filesin[i][:filesin[i].find('-{}'.format(year))])
### Make list of unique nodes
uniquenodes = np.unique(np.array(nodes))
### Make dict of monthly files for each node
dictfiles = {}
for node in uniquenodes:
out = []
for file in filesin:
if file.find(node) != -1:
out.append(file)
dictfiles[node] = out
### Load and concat monthly files for each node, then write as yearly csv
for node in tqdm(uniquenodes):
dfdict = {}
for file in dictfiles[node]:
dfin = pd.read_csv(
file,
header=None,
names=['datetime', 'lmp'],
)
dfdict[file[-5:-3]] = dfin
dfyear = pd.concat(dfdict, ignore_index=True)
dfyear.to_csv(
'{}{}-{}.gz'.format(filepathout, node[20:], year),
index=False,
header=False,
compression='gzip'
)
elif iso == 'ERCOT':
#############
### Functions
def makeindexconvert(files, dates):
"""
Datetime converter for day-ahead
"""
dicttimes = {}
for i in trange(len(files)):
try:
df = pd.read_csv(files[i], usecols=[0,1,4],
dtype={
'DeliveryDate': 'category',
'HourEnding': 'category',
'DSTFlag': 'category'})
df.loc[:,'DSTFlag'] = (
df.loc[:,'DeliveryDate'].astype(str) +
'H' + df.loc[:,'HourEnding'].astype(str) +
df.loc[:,'DSTFlag'].astype(str)).astype('category')
except ValueError as err:
df = pd.read_csv(files[i], usecols=[0,1],
dtype={
'DeliveryDate': 'category',
'HourEnding': 'category'})
df['DSTFlag'] = (
df['DeliveryDate'].astype(str)
+ 'H' + df['HourEnding'].astype(str)
+ 'N').astype('category')
dicttimes[dates[i]] = df
### copy=False is experimental
dftimes = pd.concat(dicttimes, copy=False)
oldtime = list(dftimes.DSTFlag.unique())
print('len(oldtime) = {}'.format(len(oldtime)))
newtime = list(pd.date_range(dates[0], freq='H', periods=pvvm.toolbox.yearhours(year)))
print('len(newtime) = {}'.format(len(newtime)))
if len(oldtime) != len(newtime):
raise Exception("len(oldtime) and len(newtime) don't match")
for i in range(len(newtime)):
newtime[i] = str(newtime[i])
indexconvert = dict(zip(oldtime, newtime))
return indexconvert
def datetimefromfile(file, clean=False):
"""
Only works for ERCOT bus RTLMP files
Example: ('cdr.00011485.0000000000000000.20101201.005033.'
'LMPSELECTBUSNP6787_20101201_005025_csv.zip')
"""
basename = os.path.basename(file)
file_datetime = basename[65:80]
year = file_datetime[:4]
if year == 'retr':
file_datetime = basename[71:86]
if year == '87_2':
file_datetime = basename[68:83]
if year == '87_r':
file_datetime = basename[74:89]
year = file_datetime[:4]
try:
year = int(year)
except:
print(year)
print(type(year))
print(basename)
raise ValueError
if (year < 2010) or (year > 2017):
print(year)
print(type(year))
print(basename)
raise ValueError
springforward_date = pvvm.toolbox.dst_springforward[year]
fallback_date = pvvm.toolbox.dst_fallback[year]
springforward_time = pd.to_datetime(
'{} 03:00:00'.format(springforward_date))
fallback_time = pd.to_datetime(
'{} 02:00:00'.format(fallback_date))
if basename.find('xhr') == -1:
dst = False
else:
dst = True
if not clean:
datetime_predst = pd.to_datetime(
file_datetime,
format='%Y%m%d_%H%M%S')
elif clean:
datetime_predst = pd.to_datetime(
file_datetime[:-2],
format='%Y%m%d_%H%M')
if (
(datetime_predst >= springforward_time)
& (datetime_predst < fallback_time)
& (not dst)
):
datetime = datetime_predst - pd.Timedelta('1H')
else:
datetime = datetime_predst
return datetime
def datetimebin(datetime, mod=5):
"""
Take a datetime and determine what bin it falls into.
If mod == 5, then 08:39:42 --> 08:35:00.
"""
assert 60 % mod == 0, "60 must be divisible by mod"
assert type(datetime) == pd.Timestamp, "datetime must be pd.Timestamp"
newminute = int(datetime.minute / mod) * mod
out = pd.Timestamp(
year=datetime.year, month=datetime.month, day=datetime.day,
hour=datetime.hour, minute=newminute)
return out
#############
### Procedure
if market == 'da':
### Make the inputs easier to work with
files = sorted(glob(filepathin + '*csv.zip'))
dates = pvvm.toolbox.makedays(year)
if len(files) != len(dates):
print('filepathin = {}'.format(filepathin))
print('len(files) = {}'.format(len(files)))
print('len(dates) = {}'.format(len(dates)))
raise Exception("files and dates don't match")
### Load file containing nodes with geographic information
nodesin = pd.read_csv(nodesfile, usecols=[0], squeeze=True, names=['Node'], skiprows=1)
nodesin.drop_duplicates(inplace=True)
nodesin = list(nodesin)
for i in range(len(nodesin)):
nodesin[i] = nodesin[i].upper()
### Convert datetimes
indexconvert = makeindexconvert(files, dates)
### Load daily files
dfdict = {}
for i in trange(len(files)):
try:
dfday = pd.read_csv(files[i],
dtype={
'DeliveryDate': 'category', 'HourEnding': 'category',
'BusName': 'category', 'LMP': float, 'DSTFlag': 'category'})
dfday.loc[:,'DSTFlag'] = (
dfday.loc[:,'DeliveryDate'].astype(str) +
'H' + dfday.loc[:,'HourEnding'].astype(str) +
dfday.loc[:,'DSTFlag'].astype(str)).astype('category')
except KeyError as err:
dfday = pd.read_csv(files[i],
dtype={
'DeliveryDate': 'category', 'HourEnding': 'category',
'BusName': 'category', 'LMP': float})
dfday['DSTFlag'] = (
dfday['DeliveryDate'].astype(str)
+ 'H' + dfday['HourEnding'].astype(str)
+ 'N').astype('category')
dfday.loc[:,'DSTFlag'] = dfday.loc[:,'DSTFlag'].apply(lambda x: indexconvert[x])
dfday.loc[:,'DSTFlag'] = pd.to_datetime(dfday['DSTFlag'])
del dfday['DeliveryDate']
del dfday['HourEnding']
dfday = dfday.rename(columns={'BusName': 'node', 'DSTFlag': 'intime', 'LMP': 'lmp'})
dfdict[dates[i]] = dfday
### Concat into one dataframe with localized datetime index
### copy=False is experimental
dfall = pd.concat(dfdict, copy=False)
### Change node type to 'category'. SUPER important. >10x speedup.
dfall['node'] = dfall['node'].astype('category')
# if len(dfall.index.unique()) != pvvm.toolbox.yearhours(year):
# raise Exception("len(dfall.index.unique() != pvvm.toolbox.yearhours(year)")
### Make new index
fullindex = pd.date_range(dates[0], freq='H', periods=pvvm.toolbox.yearhours(year))
fullindex = fullindex.tz_localize(timezone)
fullindex = pd.DataFrame(index=fullindex)
### Determine missing nodes and data coverage, and save as one-node files
missingnodes = []
datalength = []
for j in trange(len(nodesin)):
try:
df = dfall[dfall['node'] == nodesin[j]][['intime','lmp']].copy()
df.index = df['intime'].values
del df['intime']
df.index = df.index.tz_localize(timezone)
df = df.merge(fullindex, how='right', left_index=True, right_index=True)
numhours = hours - len(df[df['lmp'].isnull()])
datalength.append([nodesin[j], numhours])
df.to_csv('{}{}-{}.gz'.format(filepathout, nodesin[j], year),
compression='gzip', header=False)
except KeyError:
missingnodes.append(nodesin[j])
continue
elif market == 'rt':
### Set defaults
months = list(range(1,13))
################################
###### Make filekey if necessary
###### It takes a while to make, so save ane load if possible
filekey = revmpath+'{}/io/{}-{}-filekey-cleaned-existing.csv'.format(
iso.upper(), iso.lower(), market)
### Check if it exists already and is long enough to contain data through end 2017
if (os.path.exists(filekey)) and (len(pd.read_csv(filekey, usecols=[2])) >= 743827):
pass
else:
### Make list of ALL ERCOT RT LMP files
allyears = range(2010,2051)
files = {}
for i_year in list(allyears):
files[i_year] = sorted(glob('{}{}/*csv.zip'.format(filepathin, i_year)))
files_all = sum([files[i] for i in allyears], [])
### Round all files to start of 5-minute bin
filestodatetimes = {}
for i in trange(len(files_all)):
filestodatetimes[files_all[i]] = datetimefromfile(files_all[i], clean=False)
filetimes = [filestodatetimes[key] for key in filestodatetimes]
dffiles = pd.DataFrame(files_all, columns=['Filename'])
dffiles['Datetime'] = filetimes
dffiles['Retry'] = dffiles.apply(
lambda row: row['Filename'].find('retr') != -1,
axis=1)
dffiles.drop_duplicates(subset='Datetime', keep='last', inplace=True)
datetimes_binned = []
for i in dffiles.index:
try:
datetimes_binned.append(datetimebin(dffiles.loc[i, 'Datetime']))
except:
print(i)
print(dffiles.loc[i, 'Datetime'])
raise TypeError
dffiles['Datetime-Binned'] = datetimes_binned
dffiles.drop_duplicates(subset='Datetime-Binned', keep='first', inplace=True)
dffiles.to_csv(
revmpath+'{}/io/{}-{}-filekey-cleaned-existing.csv'.format(
iso.upper(), iso.lower(), market),
index=False)
### Load file containing nodes with geographic information
nodesin = pd.read_csv(nodesfile, usecols=[0], squeeze=True, names=['Node'], skiprows=1)
nodesin.drop_duplicates(inplace=True)
nodesin = list(nodesin)
for i in range(len(nodesin)):
nodesin[i] = nodesin[i].upper()
### Load file with datetime-filename key
dffiles = pd.read_csv(
filekey,
dtype={'Filename': str, 'Retry': bool, 'one': float},
parse_dates=['Datetime', 'Datetime-Binned'],
infer_datetime_format=True
)
dffiles.index = dffiles['Datetime-Binned']
#############
### PROCEDURE
### Set DST switch times
springforward_date = pvvm.toolbox.dst_springforward[year]
fallback_date = pvvm.toolbox.dst_fallback[year]
springforward_time = pd.to_datetime(
'{} 03:00:00'.format(springforward_date))
fallback_time = pd.to_datetime(
'{} 02:00:00'.format(fallback_date))
### Loop over months
for month in months:
print('{}-{:02}'.format(year, month))
### Make nice 5T timestamps for month
monthindex = pd.date_range(
start='{}-{:02}-01 00:00'.format(year, month),
periods = (pvvm.toolbox.monthhours(year, month) * 60 / res),
freq = '5T',
# tz=pvvm.toolbox.tz_iso[iso]
)
monthindex = pd.DataFrame(index=monthindex)
### Make date index (for labeling daily output files)
dates = pd.date_range(
start = '{}-{:02}-01'.format(year, month),
periods = (pvvm.toolbox.monthhours(year, month) / 24),
freq = '1D')
dates = [date.strftime('%Y%m%d') for date in dates]
### Make nice 5T timestamps for year
yearindex = pd.date_range(
start='{}-01-01 00:00'.format(year),
periods = int(pvvm.toolbox.yearhours(year) * 60 / res),
freq = '{}T'.format(res),
# tz=pvvm.toolbox.tz_iso[iso]
)
yearindex = pd.DataFrame(index=yearindex)
### Create list of files to load
dfmonth = dffiles.loc[
(dffiles['Datetime-Binned']
>= (
pd.Timestamp('{}-{:02}-01 00:00'.format(year, month)))
- pd.Timedelta('1H'))
& (dffiles['Datetime-Binned']
<= (
pd.Timestamp('{}-{:02}-01 00:00'.format(year, month)))
+ pd.Timedelta('{}H'.format(pvvm.toolbox.monthhours(year, month) + 1)))
]
filestoload = list(dfmonth['Filename'])
### Load the files
dfdict = {}
badzipfilecount = 0
for i in trange(len(filestoload)):
file = filestoload[i]
datetime = datetimefromfile(file)
try:
df = pd.read_csv(
file, skiprows=1,
usecols=[2,3],
names=['node', 'lmp'],
dtype={'node': 'category', 'lmp': float})
df['datetime'] = datetime
df['datetime'] = df['datetime'].map(datetimebin)
df.index = df['datetime']
dfdict[datetime.strftime('%Y%m%d_%H%M%S')] = df[['node', 'lmp']]
except zipfile.BadZipFile as err:
badzipfilecount += 1
print("zipfile.BadZipFile error number {}".format(badzipfilecount))
print(err)
print(file)
print(datetime)
### Concat into one dataframe
dfall = pd.concat(dfdict, copy=False).reset_index(level=0, drop=True)
### Clear dfdict to conserve memory (?)
dfdict = 0
### Categorize nodes (accelerates lookup)
dfall['node']= dfall['node'].astype('category')
### Determine missing nodes and data coverage, and save as one-node files
missingnodes, datalength, fulldaynodes = [], [], {}
### v OR could do "for j in trange(len(dfall['node'].unique()"
### v then node = str(uniquenodes[j]), to write all nodes
### v (not just those with geographic information)
for j in trange(len(nodesin)):
node = str(nodesin[j])
try:
dfin = dfall[dfall['node'] == node].copy()
## Add missing timestamps
df = dfin.merge(
monthindex,
how='outer',
left_index=True, right_index=True)
df = pd.DataFrame(df['lmp'])
## For debugging:
## df['interpolated'] = df['lmp'].isnull()
## Fill gaps using linear interpolation
df.interpolate(
method='time',
# limit=12,
# limit_direction='both',
inplace=True)
## Remove off-5T values
dfout = pd.DataFrame(df).merge(
monthindex,
how='right',
left_index=True, right_index=True)
########### Drop Duplicates ############
## Drop duplicates
dfout = dfout.reset_index().drop_duplicates('index').copy()
dfout.index = dfout['index']
dfout = dfout.drop('index', axis=1).copy()
########################################
## Record datapoints
numpoints = dfout.notnull().sum().values[0]
datalength.append([nodesin[j], numpoints])
## Determine full-data days
dfcount = dfout.groupby([dfout.index.month, dfout.index.day]).count()
for date in dates:
day = int(date[6:])
count = dfcount.loc[month].loc[day][0]
if count == 288:
nodes = fulldaynodes.get(date, [])
nodes.append(node)
fulldaynodes[date] = nodes
## Write nodalized file
## ONLY if it contains data
if dfout.notnull().sum()[0] > 0:
dfout.to_csv(
'{}{}-{}{:02}.gz'.format(
filepathout, nodesin[j], year, month),
compression='gzip', header=False)
else:
missingnodes.append(node)
except KeyError:
missingnodes.append(node)
#############################
### Unmonthify the nodal lmps
### Set new filepaths
filepathin = revmpath+'{}/io/lmp-nodal/{}-month/'.format(iso, market)
filepathout = revmpath+'{}/io/lmp-nodal/{}/'.format(iso, market)
if not os.path.exists(filepathout): os.makedirs(filepathout)
### Make list of all files for year
filesin = sorted(glob('{}*-{}??.gz'.format(filepathin, year)))
print('len(filesin) = {}'.format(len(filesin)))
if len(filesin) == 0:
raise Exception("No files in filepathin")
### Make list of all nodes (with duplicates)
nodes = []
for i in range(len(filesin)):
nodes.append(filesin[i][:filesin[i].find('-{}'.format(year))])
### Make list of unique nodes
uniquenodes = np.unique(np.array(nodes))
print('len(uniquenodes) = {}'.format(len(uniquenodes)))
### Make dict of monthly files for each node
dictfiles = {}
for node in uniquenodes:
out = []
for file in filesin:
if file.find(node) != -1:
out.append(file)
dictfiles[node] = out
### Load and concat monthly files for each node, then write as yearly csv
for node in tqdm(uniquenodes):
dfdict = {}
for file in dictfiles[node]:
dfin = pd.read_csv(
file,
header=None,
names=['datetime', 'lmp'],
parse_dates=['datetime']
)
dfdict[file[-5:-3]] = dfin
dfyear = pd.concat(dfdict, ignore_index=True)
dfyear.index = dfyear.datetime
dfyear.index = dfyear.index.tz_localize(pvvm.toolbox.tz_iso[iso])
dfyear.to_csv(
'{}{}-{}.gz'.format(
filepathout,
node[len(filepathin):],
year),
index_label=False,
header=False,
columns=['lmp'],
compression='gzip'
)
###### Write summary outputs
if (iso in ['CAISO', 'ERCOT'] and (market == 'rt')):
## List of nodes from nodemap that don't have LMP data
pd.Series(missingnodes).to_csv(
revmpath+'{}/io/missingnodes/{}-{}lmp-{}-{}{:02}.csv'.format(
iso.upper(), iso.lower(), market, submarket, year, month),
index=False)
## Intervals (hours or 5-min chunks) of LMP data per node
pd.DataFrame(datalength).to_csv(
revmpath+'{}/io/datatimes/{}-{}lmp-{}-{}{:02}.csv'.format(
iso.upper(), iso.lower(), market, submarket, year, month),
index=False, header=False)
## List of nodes with complete data over year
fulltime = pd.DataFrame(datalength)
fulltime = fulltime[
fulltime[1] == pvvm.toolbox.monthhours(year, month) * 12]
fulltime.to_csv(
### Original
# revmpath+'{}/io/fulltimenodes/year/{}-{}lmp{}-{}{:02}.csv'.format(
### New
revmpath+'{}/io/fulltimenodes/{}-{}lmp{}-{}{:02}.csv'.format(
iso.upper(), iso.lower(), market,
{None:''}.get(submarket,'-'+submarket), year, month),
index=False, header=False, columns=[0])
else:
## List of nodes from nodemap that don't have LMP data
pd.Series(missingnodes).to_csv(
revmpath+'{}/io/missingnodes/{}-{}lmp-missing-{}.csv'.format(
iso.upper(), iso.lower(), market, year),
index=False)
## Intervals (hours or 5-min chunks) of LMP data per node
pd.DataFrame(datalength).to_csv(
revmpath+'{}/io/datatimes/{}-{}lmp-datatimes-{}.csv'.format(
iso.upper(), iso.lower(), market, year),
index=False, header=False)
## List of nodes with complete data over year
fulltime = pd.DataFrame(datalength)
fulltime = fulltime[fulltime[1] == hours * int(24 * 60 / res)]
fulltime.to_csv(
### Original
# revmpath+'{}/io/fulltimenodes/year/{}-{}lmp-fulltime-{}.csv'.format(
### New
revmpath+'{}/io/fulltimenodes/{}-{}lmp-fulltime-{}.csv'.format(
iso.upper(), iso.lower(), market, year),
index=False, header=False, columns=[0])
###### Write daily nodecounts
if iso == 'ISONE':
for i in trange(len(dates)):
dfcount = dfall.loc[dates[i]]
dfday = dfcount.groupby(dfcount.node).count()['lmp']
fulltimenodes = list(dfday[dfday == int(24 * 60 / res)].index)
pd.Series(fulltimenodes).to_csv(
revmpath+'{}/io/fulltimenodes/day/{}/{}.csv'.format(
iso.upper(), market, dates[i]),
index=False)
elif (iso in ['MISO', 'PJM']) or ((iso, market) == ('ERCOT', 'da')):
if iso == 'ERCOT': dfcount = dfall.reset_index(level=0, drop=True)
dfcount = dfall.groupby([dfall.index.month, dfall.index.day]).count().copy()
daterange = pd.date_range(dates[0], periods=int(hours / 24), freq='D')
for i in range(len(daterange)):
dfday = dfcount.loc[daterange[i].month].loc[daterange[i].day].copy()
fulltimenodes = list(dfday[dfday == int(24 * 60 / res)].index)
pd.Series(fulltimenodes).to_csv(
revmpath+'{}/io/fulltimenodes/day/{}/{}.csv'.format(
iso.upper(), market, dates[i]),
index=False)
elif (iso in ['NYISO', 'CAISO']) or ((iso, market) == ('ERCOT', 'rt')):
for date in fulldaynodes:
nodes = fulldaynodes.get(date, [])
pd.Series(nodes).to_csv(
revmpath+'{}/io/fulltimenodes/day/{}/{}.csv'.format(iso.upper(), market, date),
index=False)
##############################
### EXTRACT NODE LOCATIONS ###
def nodelocations_pjm():
"""
"""
### Set up googlemaps
gmaps = googlemaps.Client(key=apikeys['googlemaps'])
### Test if zip code mapping file exists and download if it does not
zipnodefile = revmpath+'PJM/in/zip-code-mapping.xls'
if not os.path.exists(zipnodefile):
url = 'https://www.pjm.com/-/media/markets-ops/energy/lmp-model-info/zip-code-mapping.ashx'
filepathout = revmpath+'PJM/in/zip-code-mapping.xls'
urllib.request.urlretrieve(url, filepathout)
## Make a clean csv version
dfin = pd.read_excel(zipnodefile, skiprows=9, dtype={'Zip Code': 'category'})
dfin['Zip Code'] = dfin['Zip Code'].map(lambda x: '{:>05}'.format(x))
dfin.to_csv(revmpath+'PJM/in/zip-code-mapping.csv', index=False)
## Save the unique zip codes
df = pd.read_csv(revmpath+'PJM/in/zip-code-mapping.csv', dtype={'Zip Code': 'category'})
df['Zip Code'] = df['Zip Code'].map(lambda x: '{:>05}'.format(x))
zips_unique = pd.Series(df['Zip Code'].unique())
zips_unique.to_csv(revmpath+'PJM/io/zips-pjm-unique.csv', index=False)
###### Look up zipcode centers
numattempts = 200
sleeptime = 60
zipcodes = pd.read_csv(
revmpath+'PJM/io/zips-pjm-unique.csv', dtype='category',
names=['zipcode'], squeeze=True)
zipcodes = zipcodes.map(lambda x: '{:>05}'.format(x))
out=[]
for i, zipcode in enumerate(tqdm(zipcodes)):
attempts = 0
while attempts < numattempts:
try:
### Original version only added 'zipcode' for these two zipcodes.
### Now, additional googlemaps queries return erroneous locations,
### so we append 'zipcode' for everything.
# if zipcode in ['15775', '15777']:
# ### These two zipcodes, if queried alone, return locations
# ### outside of the PJM territory
# location = gmaps.geocode('zipcode {}'.format(zipcode))
# else:
# location = gmaps.geocode(zipcode)
location = gmaps.geocode('zipcode {}'.format(zipcode))
### Continue
lat = location[0]['geometry']['location']['lat']
lon = location[0]['geometry']['location']['lng']
out.append([lat, lon])
time.sleep(0.02)
break
except HTTPError as err:
print('Rebuffed for {} on attempt # {} by "{}".'
'Will retry in {} seconds.'.format(
zipcode, attempts, err, sleeptime))
attempts += 1
time.sleep(sleeptime)
if attempts >= numattempts:
raise Exception('Failed on {} after {} attempts'.format(
zipcode, attempts))
dfout = pd.DataFrame(out, columns=['latitude', 'longitude'])
zipgeo = pd.concat([zipcodes, dfout], axis=1)
### NOTE: zip code 45418, when searched in google maps, returns a
### location in Mexico. So look up zip code at
### https://www.unitedstateszipcodes.org/45418/
### and change by hand.
zipgeo.loc[zipgeo.zipcode.astype(str) == '45418', ['latitude', 'longitude']] = (39.69, -84.26)
### Additional error for 25572 fixed by hand
zipgeo.loc[zipgeo.zipcode.astype(str) == '25572', ['latitude', 'longitude']] = (38.16, -81.91)
## Write zipcode coordinates
zipgeo.to_csv(revmpath+'PJM/io/zips-latlon-pjm.csv', index=False)
###### Determine node locations
### Load input files
zips = pd.read_csv(
revmpath+'PJM/io/zips-latlon-pjm.csv',
dtype={'zipcode': 'category'}, index_col='zipcode')
dfnodes = pd.read_csv(revmpath+'PJM/in/zip-code-mapping.csv', dtype='category')
dfnodes['Zip Code'] = dfnodes['Zip Code'].map(lambda x: '{:>05}'.format(x))
dfnodes.PNODEID = dfnodes.PNODEID.astype(int)
dfnodes = dfnodes.drop_duplicates().copy()
dfnodes = dfnodes.merge(zips, left_on='Zip Code', right_index=True, how='left')
pnodeids = list(dfnodes['PNODEID'].sort_values().unique())
zipcodes = list(dfnodes['Zip Code'].unique())
### Put lat, lon in cartesian coordinates (assuming spherical Earth)
dfnodes['x'] = dfnodes.apply(rowlatlon2x, axis=1)
dfnodes['y'] = dfnodes.apply(rowlatlon2y, axis=1)
dfnodes['z'] = dfnodes.apply(rowlatlon2z, axis=1)
### Determine centroid of zipcodes listed for each node
lats, lons = [], []
for i, pnode in enumerate(pnodeids):
x = dfnodes[dfnodes['PNODEID'] == pnode]['x'].mean()
y = dfnodes[dfnodes['PNODEID'] == pnode]['y'].mean()
z = dfnodes[dfnodes['PNODEID'] == pnode]['z'].mean()
outlon = math.atan2(y, x) * 180 / math.pi
rho = math.sqrt(x*x + y*y)
outlat = math.atan2(z, rho) * 180 / math.pi
lats.append(outlat)
lons.append(outlon)
### Make output dataframe
dfout = (dfnodes[['PNODEID', 'PNODENAME']]
.drop_duplicates()
.sort_values('PNODEID')
.reset_index(drop=True)
# .rename(columns={col:col.lower() for col in ['PNODEID','PNODENAME']})
.rename(columns={'PNODEID':'node','PNODENAME':'nodename'})
)
dfout['latitude'] = lats
dfout['longitude'] = lons
### Identify duplicate (lat,lon) tuples for NSRDB
latlons = dfout[['latitude', 'longitude']].drop_duplicates().copy()
latlons['latlonindex'] = range(len(latlons))
dfout = dfout.merge(latlons, on=['latitude', 'longitude'], how='left')
dfout.to_csv(revmpath+'PJM/io/pjm-node-latlon.csv', index=False)
latlons[['latlonindex', 'latitude', 'longitude']].to_csv(
revmpath+'PJM/io/pjm-pnode-unique-latlons-for-nsrdb.csv', index=False)
return dfout
def nodelocations_caiso():
### Test if nodemap xml exists and download if it does not
nodemapxml = revmpath+'CAISO/in/GetPriceContourMap.xml'
if not os.path.exists(nodemapxml):
print("Need to download the input file by hand from "
"'http://wwwmobile.caiso.com/Web.Service.Chart/api/v1/ChartService/GetPriceContourMap'"
" and save it at (revmpath + 'CAISO/in/GetPriceContourMap.xml).")
raise Exception("Input file not found")
# ### For some reason this downloades the file in json format. Just do it by hand.
# url = 'http://wwwmobile.caiso.com/Web.Service.Chart/api/v1/ChartService/GetPriceContourMap'
# xmlfile = revmpath+'CAISO/in/GetPriceContourMap.xml'
# urllib.request.urlretrieve(url, xmlfile)
### Import xml nodemap
tree = ET.parse(nodemapxml)
root = tree.getroot()
### Get node names, areas, types, and latlons
names, areas, types, latlonsraw = [], [], [], []
for node in root.iter(tag='{urn:schemas.caiso.com/mobileapp/2014/03}n'):
names.append(node.text)
for node in root.iter(tag='{urn:schemas.caiso.com/mobileapp/2014/03}a'):
areas.append(node.text)
for node in root.iter(tag='{urn:schemas.caiso.com/mobileapp/2014/03}p'):
types.append(node.text)
latlonsraw = []
for node in root.iter(tag='{http://schemas.microsoft.com/2003/10/Serialization/Arrays}decimal'):
latlonsraw.append(float(node.text))
lats = latlonsraw[::2]
lons = latlonsraw[1::2]
### Generate output dataframe
dfout = pd.DataFrame({'node': names, 'latitude': lats, 'longitude': lons,
'area': areas, 'type': types}).sort_values('node')
### Clean up output: Drop nodes with erroneous coordinates
dfclean = dfout.loc[
(dfout.longitude > -180)
& (dfout.longitude < 0)
& (dfout.latitude > 20)
].copy()[['node', 'latitude', 'longitude', 'area', 'type']]
### Write output
dfclean.to_csv(revmpath+'CAISO/io/caiso-node-latlon.csv', index=False)
return dfclean
def nodelocations_miso():
import geopandas as gpd
### Test if nodemap json files exist and download if not
filepaths = {
'Nodes': revmpath + 'MISO/in/MISO_GEN_INT_LZN.json',
'Hubs': revmpath + 'MISO/in/PNODELMPLabels_2.json',
'ReserveZones': revmpath + 'MISO/in/ASMZones_2.json',
'PlanningZones': revmpath + 'MISO/in/Planning_Zones.json',
}
if not os.path.exists(filepaths['Nodes']):
url = ('https://api.misoenergy.org/MISORTWDDataBroker/DataBrokerServices.asmx'
'?messageType=getvectorsource&nodeTypes=GEN,INT,LZN')
# urllib.request.urlretrieve(url, filepaths['Nodes'])
r = requests.get(url, allow_redirects=True)
with open(filepaths['Nodes'], 'wb') as writer:
writer.write(r.content)
if not os.path.exists(filepaths['Hubs']):
url = 'https://api.misoenergy.org/MISORTWD/map/PNODELMPLabels_2.json'
# urllib.request.urlretrieve(url, filepaths['Hubs'])
r = requests.get(url, allow_redirects=True)
with open(filepaths['Hubs'], 'wb') as writer:
writer.write(r.content)
###### GEN, INT, LZN nodes
### Load node file and extract names, types, regions, and locations
with open(filepaths['Nodes']) as f:
data = json.load(f)
dfall = pd.io.json.json_normalize(data)
proj = data['proj']
df = pd.io.json.json_normalize(data['f'])
nodenames = [(i[0]) for i in df.p]
nodetypes = [(i[1]) for i in df.p]
noderegions = [(i[2]) for i in df.p]
nodexy = [tuple(df['g.c'][i]) for i in range(len(df.p))]
###### Original version - now kills the kernel
# ### Convert to lat/lon
# g = gpd.GeoSeries(
# [shapely.geometry.Point(nodexy[i]) for i in range(len(nodexy))])
# g.crs = proj
# gnode = g.to_crs({'init': 'epsg:4326'})
###### New version
import pyproj
import shapely.geometry
latlonproj = pyproj.CRS.from_epsg(4326)
misoproj = pyproj.CRS(proj)
transform = pyproj.Transformer.from_crs(
crs_from=misoproj, crs_to=latlonproj, always_xy=True)
gnode = gpd.GeoSeries(
[shapely.geometry.Point(transform.transform(xy[0], xy[1]))
for xy in nodexy])
### Generate output dataframe
dfout = pd.DataFrame(gnode)
dfout['node'] = nodenames
dfout['latitude'] = gnode.y
dfout['longitude'] = gnode.x
dfout['type'] = nodetypes
dfout['region'] = noderegions
dfout.drop(0, axis=1, inplace=True)
###### HUB nodes
with open(filepaths['Hubs']) as f:
data = json.load(f)
dfall = pd.io.json.json_normalize(data)
proj = data['proj']
df = pd.io.json.json_normalize(data['f'])
hubnames = [(i[0][:-4]) for i in df.p]
hubxy = [tuple(df['g.c'][i]) for i in range(len(df.p))]
###### Original version - now kills the kernel
# ### Convert to lat/lon
# g = gpd.GeoSeries(
# [shapely.geometry.Point(hubxy[i]) for i in range(len(hubxy))])
# g.crs = proj
# ghub = g.to_crs({'init': 'epsg:4326'})
###### New version
latlonproj = pyproj.CRS.from_epsg(4326)
misoproj = pyproj.CRS(proj)
transform = pyproj.Transformer.from_crs(
crs_from=misoproj, crs_to=latlonproj, always_xy=True)
ghub = gpd.GeoSeries(
[shapely.geometry.Point(transform.transform(xy[0], xy[1]))
for xy in hubxy])
### Generate output dataframe
hubout = pd.DataFrame(ghub)
hubout['node'] = [i+'.HUB' for i in hubnames]
hubout['latitude'] = ghub.y
hubout['longitude'] = ghub.x
hubout['type'] = 'Hub'
hubout['region'] = 'MISO'
hubout.drop(0, axis=1, inplace=True)
### Combine and write output dataframes
dfout = pd.concat([dfout, hubout], ignore_index=True)
dfout.to_csv(revmpath + 'MISO/io/miso-node-latlon.csv', index=False)
return dfout
def nodelocations_isone(filepath_input=None):
"""
File must be requested from ISONE
"""
if (filepath_input==None) or (os.path.exists(filepath_input)==False):
print("Example: revmpath + 'ISONE/in/nepnode_lat_long.xlsx'. "
"File can be requested from ISONE at: "
"'https://www.iso-ne.com/participate/support/request-information/'")
raise Exception("Need filename of nepnode_lat_long.xlsx file.")
### Load file, rename columns, and write output
dfin = pd.read_excel(filepath_input, sheet_name='New England')
dfin.rename(
columns={'Node Name': 'node', 'LATITUDE': 'latitude', 'LONGITUDE': 'longitude',
'RSP Area': 'area', 'Dispatch Zone': 'zone', 'Reserve ID': 'reserveid',
'Zone ID': 'zoneid'}, inplace=True)
dfout = dfin[['node', 'latitude', 'longitude', 'area', 'zone', 'reserveid', 'zoneid']]
dfout.to_csv(revmpath+'ISONE/io/isone-node-latlon.csv', index=False)
return dfout
def nodelocations_ercot(filepath_input=None):
"""
http://www.ercot.com/services/rq/imre
https://mis.ercot.com/pps/tibco/mis/Pages/Grid+Information/Long+Term+Planning
'CRR Network Model (Monthly)' > download and unzip one of the available files
and use the filepath as the input value for filepath_input.
"""
### Functions
def latlonify(coordinates):
foo = coordinates.split(' ')
bar = []
for line in foo:
if line not in bar:
bar.append(line)
latitude = 0
longitude = 0
for line in bar:
longitude += float(line.split(',')[0])
latitude += float(line.split(',')[1])
latitude = latitude / 4
longitude = longitude / 4
return latitude, longitude
### Load and parse the input kml file
if (filepath_input==None) or (os.path.exists(filepath_input)==False):
print("Missing input file. To get this file, register as an IMRE at "
"http://www.ercot.com/services/rq/imre. Go to "
"https://mis.ercot.com/pps/tibco/mis/Pages/Grid+Information/Long+Term+Planning, "
"then select 'CRR Network Model (Monthly)'. "
"Download and unzip one of the available files and use the filepath "
"as the input value for filepath_input. "
"Example: revmpath+'ERCOT/in/rpt.00011205.0000000000000000.20170530"
".140432154.JUL2017MonthlyCRRNetworkModel/"
"2017.JUL.Monthly.Auction.OneLineDiagram.kml'")
raise Exception("Need path to CRRNetworkModel OneLineDiagram file")
tree = ET.parse(filepath_input)
root = tree.getroot()
buses = root[0][6]
### Extract the node names and coordinates
names, coordinates, latitudes, longitudes = [], [], [], []
for node in buses.iter(tag='{http://www.opengis.net/kml/2.2}name'):
if node.text.find(' ') == -1 and node.text != 'Buses':
names.append(node.text)
for node in buses.iter(
tag='{http://www.opengis.net/kml/2.2}coordinates'):
coordinates.append(node.text)
for node in coordinates:
latitudes.append(latlonify(node)[0])
longitudes.append(latlonify(node)[1])
### Extract the areas and zones (optional)
descriptions = []
for node in buses.iter(
tag='{http://www.opengis.net/kml/2.2}description'):
descriptions.append(node.text)
areas = []
for i in range(len(descriptions)):
foo = descriptions[i]
bar = foo[(foo.find('Area')+14):]
out = bar[:bar.find('</td>')]
areas.append(out)
zones = []
for i in range(len(descriptions)):
foo = descriptions[i]
bar = foo[(foo.find('Zone')+14):]
out = bar[:bar.find('</td>')]
zones.append(out)
settlementzones = []
for i in range(len(descriptions)):
foo = descriptions[i]
bar = foo[(foo.find('Settlement Zone')+25):]
out = bar[:bar.find('</td>')]
settlementzones.append(out)
### Make the output dataframe
dfout = pd.DataFrame({
'node': names, 'latitude': latitudes, 'longitude': longitudes,
'area': areas, 'zone': zones, 'settlementzone': settlementzones
})[['node', 'latitude', 'longitude', 'area', 'zone', 'settlementzone']]
### Normalize node names
dfout['node'] = dfout.node.map(lambda x: str(x).strip().upper())
### Identify duplicate (lat,lon) tuples for NSRDB
latlons = dfout[['latitude', 'longitude']].drop_duplicates().copy()
latlons['latlonindex'] = range(len(latlons))
dfout = dfout.merge(latlons, on=['latitude', 'longitude'], how='left')
### Write outputs
dfout.to_csv(revmpath+'ERCOT/io/ercot-node-latlon.csv', index=False)
latlons[['latlonindex', 'latitude', 'longitude']].to_csv(
revmpath+'ERCOT/io/ercot-node-latlon-unique.csv', index=False)
return dfout
def nodelocations_nyiso():
###### Download input data
### Identify urls
years = range(2005, 2018)
### Old version
# urlbase = ('http://www.nyiso.com/public/webdocs/markets_operations/'
# 'services/planning/Documents_and_Resources/'
# 'Planning_Data_and_Reference_Docs/Data_and_Reference_Docs/')
# urls = {year: urlbase + '{}_NYCA_Generators.xls'.format(year)
# for year in years}
# urls[2012] = urlbase + '2012_NYCA_Generating_Facilities.xls'
# urls[2015] = urlbase + '2015_NYCA_Generators_Revised.xls'
base = 'https://www.nyiso.com/documents/20142/1402024/'
urls = {
2005: base+'2005_NYCA_Generators.xls/64f2ffcf-7859-714f-dc9c-cca2519f453a',
2006: base+'2006_NYCA_Generators.xls/bb67c807-9a27-7039-f3ef-8793d3e72cce',
2007: base+'2007_NYCA_Generators.xls/2e0da2c4-be90-caa3-b201-f211a3f9389b',
2008: base+'2008_NYCA_Generators.xls/cb944d0f-84c5-0f46-1ac3-424e3cbac850',
2009: base+'2009_NYCA_Generators.xls/962f951d-03a0-ccff-1296-5bfedadfeee9',
2010: base+'2010_NYCA_Generators.xls/ede624bb-40f6-6bd6-6fae-664a819b9058',
2011: base+'2011_NYCA_Generators.xls/432a163b-8860-99f0-2f61-8a54d2a7c74d',
2012: base+'2012_NYCA_Generating_Facilities.xls/1bb796f7-7221-2787-d164-9fc669c2ef52',
2013: base+'2013_NYCA_Generators.xls/58f988d4-d72c-510c-ae2f-2afa9b5dc0b3',
2014: base+'2014_NYCA_Generators.xls/92af4de1-ffc4-69cb-bab6-bac4afcec0ca',
2015: base+'2015_NYCA_Generators_Revised.xls/b1dfb906-56d6-b245-1c21-9649038050fd',
2016: base+'2016_NYCA_Generators.xls/b38728a0-0a95-d4e8-3b7b-fe14b4419e89',
2017: base+'2017_NYCA_Generators.xls/42b3e346-b89c-4284-3457-30bd73e3ea19',
}
### Download files
for year in years:
filepathout = revmpath + 'NYISO/in/' + os.path.basename(urls[year])
urllib.request.urlretrieve(urls[year], filepathout)
### Concat input files into clean csv
filesin = {year: revmpath + 'NYISO/in/' + os.path.basename(urls[year])
for year in years}
### Set columns
columns = {}
columns[2005] = [
'line_ref_number', 'owner_operator_billing_org', 'station_unit',
'zone', 'ptid', 'town', 'county', 'state',
'date_in_service',
'capability_MW_sum', 'capability_MW_win',
'dual_cogen', 'unit_type', 'FT', 'CS',
'fuel_type_1', 'fuel_type_2', 'fuel_type_3',
'net_energy_MWh_prev_year', 'notes',
]
columns[2006] = columns[2005]
### 2007, 2008, 2009
columns[2007] = [
'line_ref_number', 'owner_operator_billing_org', 'station_unit',
'zone', 'ptid', 'town', 'county', 'state',
'date_in_service', 'nameplate_rating_kW',
'capability_kW_sum', 'capability_kW_win',
'dual_cogen', 'unit_type', 'FT', 'CS',
'fuel_type_1', 'fuel_type_2', 'fuel_type_3',
'net_energy_MWh_prev_year', 'notes',
]
columns[2008], columns[2009] = columns[2007], columns[2007]
### 2010, 2011, 2012,
columns[2010] = [
'line_ref_number', 'owner_operator_billing_org', 'station_unit',
'zone', 'ptid', 'town', 'county', 'state',
'date_in_service', 'nameplate_rating_MW', 'CRIS_sum_cap_MW',
'capability_MW_sum', 'capability_MW_win',
'dual_cogen', 'unit_type', 'FT', 'CS',
'fuel_type_1', 'fuel_type_2', 'fuel_type_3',
'net_energy_GWh_prev_year', 'notes',
]
columns[2011], columns[2012] = columns[2010], columns[2010]
### 2013, 2014, 2015, 2016, 2017,
columns[2013] = [
'line_ref_number', 'owner_operator_billing_org', 'station_unit',
'zone', 'ptid', 'town', 'county', 'state',
'date_in_service', 'nameplate_rating_MW', 'CRIS_sum_cap_MW',
'capability_MW_sum', 'capability_MW_win',
'dual_cogen', 'unit_type',
'fuel_type_1', 'fuel_type_2', 'fuel_type_3',
'net_energy_GWh_prev_year', 'notes',
]
columns[2014], columns[2015], columns[2016], columns[2017] = (
columns[2013], columns[2013], columns[2013], columns[2013])
### 2018
# columns[2018] = [
# 'line_ref_number', 'owner_operator_billing_org', 'station_unit',
# 'zone', 'ptid', 'town', 'county', 'state',
# 'date_in_service', 'nameplate_rating_MW',
# 'CRIS_sum_cap_MW', 'CRIS_win_cap_MW',
# 'capability_MW_sum', 'capability_MW_win',
# 'dual_cogen', 'unit_type',
# 'fuel_type_1', 'fuel_type_2',
# 'net_energy_GWh_prev_year', 'notes',
# ]
### Set other spreadsheet loading parameters
skiprows = {
2005: 6, 2006: 7, 2007: 7, 2008: 7, 2009: 7, 2010: 6, 2011: 6,
2012: 6, 2013: 6, 2014: 6, 2015: 6, 2016: 6, 2017: 6,
}
skip_footer = {
2005: 2, 2006: 2, 2007: 2, 2008: 2, 2009: 1, 2010: 1, 2011: 2,
2012: 2, 2013: 2, 2014: 3, 2015: 3, 2016: 3, 2017: 1,
}
sheet_name = {year: 0 for year in years}
sheet_name[2016] = 'NYCA_2016'
sheet_name[2017] = 'NYCA_2017'
### Load and concat all dataframes
dfs = {}
for year in years:
df = pd.read_excel(
filesin[year], skiprows=skiprows[year], sheet_name=sheet_name[year],
names=columns[year], usecols=len(columns[year])-1, skip_footer=skip_footer[year])
dfs[year] = df[['ptid', 'town', 'county', 'state']]
dfgens = pd.concat(dfs, axis=0)
dfgens = (dfgens.reset_index(level=0).rename(columns={'level_0':'year'})
.reset_index(drop=True).copy())
### Define location codes
statecodes = {36: 'NY', 42: 'PA', 25: 'MA', 34: 'NJ'}
codestates = {'NY': 36, 'PA': 42, 'MA': 25, 'NJ': 34}
dfs = {}
### NY
df = pd.read_excel(filesin[2017], sheet_name='Gen_Codes', skiprows=29,
usecols=[2, 3, 4, 5]).dropna()
dfs['NY'] = pd.DataFrame(df.values.reshape(len(df)*2,2))
### PA
df = pd.read_excel(filesin[2017], sheet_name='Gen_Codes', skiprows=29,
usecols=[7, 8, 9, 10]).dropna()
dfs['PA'] = pd.DataFrame(df.values.reshape(len(df)*2,2))
### MA
df = pd.read_excel(filesin[2017], sheet_name='Gen_Codes', skiprows=29,
usecols=[13, 14]).dropna()
dfs['MA'] = pd.DataFrame(df.values.reshape(len(df),2))
### NJ
df = pd.read_excel(filesin[2017], sheet_name='Gen_Codes', skiprows=29,
usecols=[18, 19]).dropna()
dfs['NJ'] = pd.DataFrame(df.values.reshape(len(df),2))
codes = pd.concat(dfs).reset_index(level=0).rename(
columns={'level_0':'state', 0: 'countycode', 1: 'county'})
codes['statecode'] = codes['state'].map(lambda x: codestates[x])
codes['countycode'] = codes['countycode'].map(lambda x: x[:3])
codes['code_state'] = codes['countycode'].astype(str) + '_' + codes['state']
codes['county_state'] = codes['county'] + ' county, ' + codes['state']
# codes = codes[['state', 'county', 'county_state', 'code_state']].copy()
# codes.to_csv(revmpath + 'NYISO/test/io/nyiso-county-codes.csv')
###### Determine unique list of locations
### Generate nodes dataframe
dfnodes = dfgens.drop('year',axis=1).drop_duplicates().dropna(subset=['ptid'])
### Turn county codes into counties
countycodes = dict(zip(codes.code_state, codes.county_state))
def foo(x):
if type(x) == int:
return '{:03}'.format(x)
elif x == ' -':
return 'nan'
elif type(x) == str:
return x
else:
return 'nan'
dfnodes['county'] = dfnodes.county.map(foo)
### Clean up dfnodes
dfnodes = dfnodes.drop(dfnodes.loc[dfnodes.county == 'nan'].index)
dfnodes['county, state'] = dfnodes.apply(
lambda row: countycodes['{}_{}'.format(row['county'], statecodes[row['state']])], axis=1)
### Correct some typos
replace = {
'Gilboa NY': 'Gilboa',
'Kittanning PA': 'Kittanning',
'Linden NJ': 'Linden',
'LyonsFalls': 'Lyons Falls',
'Mahwah NJ': 'Mahwah NJ',
'<NAME>': '<NAME>erson',
'SouthHampton': 'Southampton',
'South Hampton': 'Southampton',
'Wappingers Falls': 'Wappingers',
'': 'nan',
}
dfnodes['town'] = dfnodes.town.map(lambda x: str(x).strip())
dfnodes['town'].replace(replace, inplace=True)
### For each row:
### * If a town is listed, use the town.
### * If a town is not listed, use the county.
def foo(row):
if row['town'] == 'nan':
return row['county, state']
else:
return '{}, {}'.format(row['town'], statecodes[row['state']])
dfnodes['location'] = dfnodes.apply(foo, axis=1)
###### Look up locations
### Set up googlemaps
gmaps = googlemaps.Client(key=apikeys['googlemaps'])
### Get centers from googlemaps
numattempts = 200
sleeptime = 60
locations = dfnodes[['location']].drop_duplicates().reset_index(drop=True)
out=[]
for i, location in enumerate(tqdm(locations.values)):
attempts = 0
while attempts < numattempts:
try:
location = gmaps.geocode(location)
lat = location[0]['geometry']['location']['lat']
lon = location[0]['geometry']['location']['lng']
out.append([lat, lon])
time.sleep(0.02)
break
except HTTPError as err:
print('Rebuffed for {} on attempt # {} by "{}".'
'Will retry in {} seconds.'.format(
location, attempts, err, sleeptime))
attempts += 1
time.sleep(sleeptime)
if attempts >= numattempts:
raise Exception('Failed on {} after {} attempts'.format(
location, attempts))
dfout = pd.DataFrame(out, columns=['latitude', 'longitude'])
geo = pd.concat([locations, dfout], axis=1)
###### Average all locations for each node
### Add lat,lon info to nodes df
dfnodes = dfnodes.merge(geo, on='location', how='left')
dfnodes.drop_duplicates(inplace=True)
dfnodes.drop(dfnodes.loc[dfnodes.ptid.map(lambda x: type(x) != int)].index,
inplace=True)
dfnodes['x'] = np.nan
dfnodes['y'] = np.nan
dfnodes['z'] = np.nan
ptids = list(dfnodes['ptid'].sort_values().unique())
### Put lat, lon in cartesian coordinates (assuming spherical Earth)
dfnodes['x'] = dfnodes.apply(rowlatlon2x, axis=1)
dfnodes['y'] = dfnodes.apply(rowlatlon2y, axis=1)
dfnodes['z'] = dfnodes.apply(rowlatlon2z, axis=1)
### Determine centroid of locations listed for each node
lats, lons = [], []
for i, pnode in enumerate(ptids):
x = dfnodes[dfnodes['ptid'] == pnode]['x'].mean()
y = dfnodes[dfnodes['ptid'] == pnode]['y'].mean()
z = dfnodes[dfnodes['ptid'] == pnode]['z'].mean()
outlon = math.atan2(y, x) * 180 / math.pi
rho = math.sqrt(x*x + y*y)
outlat = math.atan2(z, rho) * 180 / math.pi
lats.append(outlat)
lons.append(outlon)
### Make output dataframe
dfout = pd.DataFrame(
{'node':ptids, 'latitude': lats, 'longitude': lons}
)[['node', 'latitude', 'longitude']]
### Identify duplicate (lat,lon) tuples for NSRDB
latlons = dfout[['latitude', 'longitude']].drop_duplicates().copy()
latlons['latlonindex'] = range(len(latlons))
dfout = dfout.merge(latlons, on=['latitude', 'longitude'], how='left')
### Write outputs
geo.to_csv(revmpath+'NYISO/io/nyiso-locations-latlon.csv', index=False)
dfout.to_csv(revmpath+'NYISO/io/nyiso-node-latlon.csv', index=False)
latlons[['latlonindex', 'latitude', 'longitude']].to_csv(
revmpath+'NYISO/io/nyiso-node-unique-latlons-for-nsrdb.csv', index=False)
return dfout
def nodelocations(iso, filein=None):
"""
"""
if iso.upper() == 'CAISO':
nodelocations_caiso()
elif iso.upper() == 'ERCOT':
nodelocations_ercot(filein=filein)
elif iso.upper() == 'MISO':
nodelocations_miso()
elif iso.upper() == 'PJM':
nodelocations_pjm()
elif iso.upper() == 'NYISO':
nodelocations_nyiso()
elif iso.upper() == 'ISONE':
nodelocations_isone()
###########################
### DOWNLOAD NSRDB DATA ###
def lonlat2wkt(lon, lat):
return 'POINT({:+f}{:+f})'.format(lon, lat)
def lonlats2wkt(lonlats):
out = ['{}%20{}'.format(lonlat[0], lonlat[1]) for lonlat in lonlats]
return 'MULTIPOINT({})'.format('%2C'.join(out))
def querify(**kwargs):
out = ['{}={}'.format(key, kwargs[key]) for key in kwargs]
return '&'.join(out)
def convertattributes_2to3(attributes):
attributes_2to3 = {
'surface_air_temperature_nwp': 'air_temperature',
'surface_pressure_background': 'surface_pressure',
'surface_relative_humidity_nwp': 'relative_humidity',
'total_precipitable_water_nwp': 'total_precipitable_water',
'wind_direction_10m_nwp': 'wind_direction',
'wind_speed_10m_nwp': 'wind_speed',
}
attributes_in = attributes.split(',')
attributes_out = [attributes_2to3.get(attribute, attribute)
for attribute in attributes_in]
return ','.join(attributes_out)
def convertattributes_3to2(attributes):
attributes_3to2 = {
'air_temperature': 'surface_air_temperature_nwp',
'surface_pressure': 'surface_pressure_background',
'relative_humidity': 'surface_relative_humidity_nwp',
'total_precipitable_water': 'total_precipitable_water_nwp',
'wind_direction': 'wind_direction_10m_nwp',
'wind_speed': 'wind_speed_10m_nwp',
}
attributes_in = attributes.split(',')
attributes_out = [attributes_3to2.get(attribute, attribute)
for attribute in attributes_in]
return ','.join(attributes_out)
def postNSRDBsize(
years,
lonlats,
attributes='ghi,dni,dhi,solar_zenith_angle,air_temperature,wind_speed',
leap_day='true',
interval='30',
norm=False):
"""
Determine size of NSRDB POST request
"""
numyears = len(years)
numattributes = len(attributes.split(','))
numintervals = sum([pvvm.toolbox.yearhours(year) * 60 / int(interval)
for year in years])
numsites = len(lonlats)
if norm:
return numsites * numattributes * numyears * numintervals / 175000000
return numsites * numattributes * numyears * numintervals
def postNSRDBfiles(
years, lonlats, psmversion=3,
api_key=apikeys['nsrdb'],
attributes='ghi,dni,dhi,solar_zenith_angle,air_temperature,wind_speed',
leap_day='true', interval='30', utc='false'):
"""
"""
### Set url based on version of PSM
if psmversion in [2, '2', 2.]:
url = 'http://developer.nrel.gov/api/solar/nsrdb_0512_download.json?api_key={}'.format(
api_key)
attributes = convertattributes_3to2(attributes)
elif psmversion in [3, '3', 3.]:
url = 'http://developer.nrel.gov/api/solar/nsrdb_psm3_download.json?api_key={}'.format(
api_key)
attributes = convertattributes_2to3(attributes)
else:
raise Exception("Invalid psmversion; must be 2 or 3")
names = ','.join([str(year) for year in years])
wkt = lonlats2multipoint(lonlats)
payload = querify(
wkt=wkt, attributes=attributes,
names=names, utc=utc, leap_day=leap_day, interval=interval,
full_name=nsrdbparams['full_name'], email=nsrdbparams['email'],
affiliation=nsrdbparams['affiliation'], reason=nsrdbparams['reason'],
mailing_list=nsrdbparams['mailing_list']
)
headers = {
'content-type': "application/x-www-form-urlencoded",
'cache-control': "no-cache"
}
response = requests.request("POST", url, data=payload, headers=headers)
output = response.text
print(output[output.find("errors"):output.find("inputs")],
'\n', output[output.find("outputs"):])
def downloadNSRDBfile(
lat, lon, year, filepath=None,
nodename='default', filetype='.gz',
attributes='ghi,dni,dhi,solar_zenith_angle,air_temperature,wind_speed',
leap_day='true', interval='30', utc='false', psmversion=3,
write=True, return_savename=False, urlonly=False):
'''
Downloads file from NSRDB.
NOTE: PSM v2 doesn't include 'surface_albedo' attribute.
NOTE: For PSM v3, can use either v2 or v3 version of attribute labels.
Full list of attributes for PSM v2:
attributes=(
'dhi,dni,ghi,clearsky_dhi,clearsky_dni,clearsky_ghi,cloud_type,' +
'dew_point,surface_air_temperature_nwp,surface_pressure_background,' +
'surface_relative_humidity_nwp,solar_zenith_angle,' +
'total_precipitable_water_nwp,wind_direction_10m_nwp,' +
'wind_speed_10m_nwp,fill_flag')
Full list of attributes for PSM v3:
attributes=(
'dhi,dni,ghi,clearsky_dhi,clearsky_dni,clearsky_ghi,cloud_type,' +
'dew_point,air_temperature,surface_pressure,' +
'relative_humidity,solar_zenith_angle,' +
'total_precipitable_water,wind_direction,' +
'wind_speed,fill_flag,surface_albedo')
Parameters
----------
filename: string
nodename: string
lat: numeric
lon: numeric
year: numeric
Returns
-------
if write == True: # default
'.csv' file if filetype == '.csv', or '.gz' file if filetype == '.gz'
if return_savename == False: pandas.DataFrame # default
if return_savename == True: (pandas.DataFrame, savename) # type(savename) = str
'''
### Check inputs
if filetype not in ['.csv', '.gz']:
raise Exception("filetype must be '.csv' or '.gz'.")
if write not in [True, False]:
raise Exception('write must be True or False.')
if return_savename not in [True, False]:
raise Exception('return_savename must be True or False.')
### Set psmversion to 3 if year is 2016 (since v2 doesn't have 2016)
if year in [2016, '2016', 2016.]:
psmversion = 3
### Remove solar_zenith_angle if year == 'tmy'
if year == 'tmy':
attributes = attributes.replace('solar_zenith_angle,','')
attributes = attributes.replace('solar_zenith_angle','')
year = str(year)
### Set url based on version of PSM
if psmversion in [2, '2', 2.]:
urlbase = 'http://developer.nrel.gov/api/solar/nsrdb_0512_download.csv?'
attributes = convertattributes_3to2(attributes)
elif psmversion in [3, '3', 3.]:
urlbase = 'https://developer.nrel.gov/api/solar/nsrdb_psm3_download.csv?'
attributes = convertattributes_2to3(attributes)
else:
raise Exception("Invalid psmversion; must be 2 or 3")
url = (
urlbase + querify(
api_key=apikeys['nsrdb'], full_name=nsrdbparams['full_name'],
email=nsrdbparams['email'], affiliation=nsrdbparams['affiliation'],
reason=nsrdbparams['reason'], mailing_list=nsrdbparams['mailing_list'],
wkt=lonlat2wkt(lon, lat), names=year, attributes=attributes,
leap_day=leap_day, utc=utc, interval=interval))
if urlonly:
return url
try:
df = pd.read_csv(url)
except HTTPError as err:
print(url)
print(err)
raise HTTPError
df = df.fillna('')
columns = df.columns
if write == True:
if len(filepath) != 0 and filepath[-1] != '/':
filepath = filepath + '/'
if nodename in [None, 'default']:
savename = (filepath + df.loc[0,'Location ID'] + '_' +
df.loc[0,'Latitude'] + '_' +
df.loc[0,'Longitude'] + '_' + year + filetype)
else:
# savename = str(filepath + nodename + '-' + year + filetype)
savename = os.path.join(
filepath, '{}-{}{}'.format(nodename, year, filetype))
### Determine number of columns to write (used to always write 11)
numcols = max(len(attributes.split(','))+5, 11)
### Write the output
if filetype == '.gz':
df.to_csv(savename, columns=columns[0:numcols], index=False,
compression='gzip')
elif filetype == '.csv':
df.to_csv(savename, columns=columns[0:numcols], index=False)
if return_savename == True:
return df, savename
else:
return df
return df
def downloadNSRDBfiles(
dfin, years, nsrdbpath, namecolumn=None,
resolution=None, latlonlabels=None,
filetype='.gz', psmversion=3,
attributes='ghi,dni,dhi,solar_zenith_angle,air_temperature,wind_speed',
wait=0.5, maxattempts=200,):
"""
"""
###### Set defaults
### Convert attributes if necessary
if psmversion in [2, '2', 2.]:
attributes = convertattributes_3to2(attributes)
elif psmversion in [3, '3', 3.]:
attributes = convertattributes_2to3(attributes)
### Get lat, lon labels
if ('latitude' in dfin.columns) and ('longitude' in dfin.columns):
latlabel, lonlabel = 'latitude', 'longitude'
elif ('Latitude' in dfin.columns) and ('Longitude' in dfin.columns):
latlabel, lonlabel = 'Latitude', 'Longitude'
elif ('lat' in dfin.columns) and ('lon' in dfin.columns):
latlabel, lonlabel = 'lat', 'lon'
elif ('lat' in dfin.columns) and ('long' in dfin.columns):
latlabel, lonlabel = 'lat', 'long'
elif ('x' in dfin.columns) and ('y' in dfin.columns):
latlabel, lonlabel = 'x', 'y'
else:
latlabel, lonlabel = latlonlabels[0], latlonlabels[1]
### Loop over years
for year in years:
### Set defaults
if (resolution == None) and (year == 'tmy'):
resolution = 60
elif (resolution == None) and (type(year) == int):
resolution = 30
### Set up output folder
outpath = nsrdbpath+'{}/{}min/'.format(year, resolution)
os.makedirs(outpath, exist_ok=True)
### Make list of files downloaded so far
downloaded = glob(outpath + '*') ## or os.listdir(outpath)
downloaded = [os.path.basename(file) for file in downloaded]
### Make list of files to download
if 'latlonindex' in dfin.columns:
dfin.drop_duplicates('latlonindex', inplace=True)
dfin['name'] = dfin['latlonindex'].copy()
dfin['file'] = dfin['latlonindex'].map(
lambda x: '{}{}-{}{}'.format(outpath, x, year, filetype))
elif namecolumn is not None:
dfin['name'] = dfin[namecolumn].copy()
dfin['file'] = dfin[namecolumn].map(
lambda x: '{}{}-{}{}'.format(outpath, x, year, filetype))
elif namecolumn is None:
dfin['name'] = None
dfin['file'] = None
dfin['todownload'] = dfin['file'].map(
lambda x: os.path.basename(x) not in downloaded)
dftodownload = dfin[dfin['todownload']].reset_index(drop=True)
print('{}: {} done, {} to download'.format(
year, len(downloaded), len(dftodownload)))
### Loop over locations
for i in trange(len(dftodownload)):
attempts = 0
while attempts < maxattempts:
try:
downloadNSRDBfile(
lat=dftodownload[latlabel][i],
lon=dftodownload[lonlabel][i],
year=year,
filepath=outpath,
nodename=dftodownload['name'][i],
interval=str(resolution),
psmversion=psmversion,
attributes=attributes)
break
except HTTPError as err:
if str(err) in ['HTTP Error 504: Gateway Time-out',
'HTTP Error 500: Internal Server Error']:
print(('Rebuffed on attempt # {} at {} by "{}". '
'Retry in 5 minutes.').format(
attempts, pvvm.toolbox.nowtime(), err))
attempts += 1
time.sleep(5 * 60)
else:
print(('Rebuffed on attempt # {} at {} by "{}". '
'Retry in {} hours.').format(
attempts, pvvm.toolbox.nowtime(), err, wait))
attempts += 1
time.sleep(wait * 60 * 60)
if attempts >= maxattempts:
print("Something must be wrong. No response after {} attempts.".format(
attempts))
def downloadNSRDBfiles_iso(year, resolution=None,
isos=['CAISO', 'ERCOT', 'MISO', 'NYISO', 'PJM', 'ISONE'],
filetype='.gz', wait=0.5, psmversion=3,
attributes='ghi,dni,dhi,solar_zenith_angle,air_temperature,wind_speed'):
"""
"""
# nodemap = {
# 'CAISO': os.path.join(revmpath, 'CAISO/io/caiso-node-latlon.csv'),
# 'ERCOT': os.path.join(revmpath, 'ERCOT/io/ercot-node-latlon.csv'),
# 'MISO': os.path.join(revmpath, 'MISO/in/miso-node-map.csv'),
# 'PJM': os.path.join(revmpath, 'PJM/io/pjm-pnode-latlon-uniquepoints.csv'),
# 'NYISO': os.path.join(revmpath, 'NYISO/io/nyiso-node-latlon.csv'),
# 'ISONE': os.path.join(revmpath, 'ISONE/io/isone-node-latlon.csv')
# }[iso]
### Set defaults
if (resolution == None) and (year == 'tmy'):
resolution = 60
elif (resolution == None) and (type(year) == int):
resolution = 30
### Convert attributes if necessary
if psmversion in [2, '2', 2.]:
attributes = convertattributes_3to2(attributes)
elif psmversion in [3, '3', 3.]:
attributes = convertattributes_2to3(attributes)
for iso in isos:
nodemap = revmpath + '{}/io/{}-node-latlon.csv'.format(
iso.upper(), iso.lower())
### Load node key
dfin = pd.read_csv(nodemap)
dfin.rename(
columns={'name': 'node', 'pnodename': 'node', 'ptid': 'node'},
inplace=True)
### Set up output folder
outpath = os.path.join(revmpath, '{}/in/NSRDB/{}/{}min/'.format(
iso, year, resolution))
if not os.path.isdir(outpath):
os.makedirs(outpath)
### Make list of files downloaded so far
downloaded = glob(outpath + '*') ## or os.listdir(outpath)
downloaded = [os.path.basename(file) for file in downloaded]
### Make list of files to download
if 'latlonindex' in dfin.columns:
dfin.drop_duplicates('latlonindex', inplace=True)
dfin['name'] = dfin['latlonindex'].copy()
dfin['file'] = dfin['latlonindex'].map(
lambda x: '{}{}-{}{}'.format(outpath, x, year, filetype))
else:
dfin['name'] = dfin['node'].copy()
dfin['file'] = dfin['node'].map(
lambda x: '{}{}-{}{}'.format(outpath, x, year, filetype))
dfin['todownload'] = dfin['file'].map(
lambda x: os.path.basename(x) not in downloaded)
dftodownload = dfin[dfin['todownload']].reset_index(drop=True)
print('{} {}: {} done, {} to download'.format(
iso.upper(), year, len(downloaded), len(dftodownload)))
for i in trange(len(dftodownload)):
attempts = 0
while attempts < 200:
try:
downloadNSRDBfile(
lat=dftodownload['latitude'][i],
lon=dftodownload['longitude'][i],
year=year,
filepath=outpath,
nodename=str(dftodownload['name'][i]),
interval=str(resolution),
psmversion=psmversion,
attributes=attributes)
break
except HTTPError as err:
if str(err) in ['HTTP Error 504: Gateway Time-out',
'HTTP Error 500: Internal Server Error']:
print(('Rebuffed on attempt # {} at {} by "{}". '
'Retry in 5 minutes.').format(
attempts, pvvm.toolbox.nowtime(), err))
attempts += 1
time.sleep(5 * 60)
else:
print(('Rebuffed on attempt # {} at {} by "{}". '
'Retry in {} hours.').format(
attempts, pvvm.toolbox.nowtime(), err, wait))
attempts += 1
time.sleep(wait * 60 * 60)
if attempts >= 200:
print("Something must be wrong. No response after {} attempts.".format(
attempts))
def postNSRDBfiles_iso(year, yearkey, resolution=None,
isos=['CAISO', 'ERCOT', 'MISO', 'NYISO', 'PJM', 'ISONE'],
filetype='.gz', wait=3, psmversion=2, chunksize=1000,
attributes='ghi,dni,dhi,solar_zenith_angle,air_temperature,wind_speed'):
"""
Notes
-----
* This function can only be run after all NSRDB files for a given year have been
downloaded using downloadNSRDBfiles(), as the POST request scrambles the
node-to-NSRDBid correspondence.
* The files will be sent to settings.nsrdbparams['email']. Need to download and unzip them.
Default location for unzipped files is revmpath+'USA/in/NSRDB/nodes/{}/'.format(year).
"""
# nodemap = {
# 'CAISO': os.path.join(revmpath, 'CAISO/io/caiso-node-latlon.csv'),
# 'ERCOT': os.path.join(revmpath, 'ERCOT/io/ercot-node-latlon.csv'),
# 'MISO': os.path.join(revmpath, 'MISO/in/miso-node-map.csv'),
# 'PJM': os.path.join(revmpath, 'PJM/io/pjm-pnode-latlon-uniquepoints.csv'),
# 'NYISO': os.path.join(revmpath, 'NYISO/io/nyiso-node-latlon.csv'),
# 'ISONE': os.path.join(revmpath, 'ISONE/io/isone-node-latlon.csv')
# }[iso]
### Set defaults
if (resolution == None) and (year == 'tmy'):
resolution = 60
elif (resolution == None) and (type(year) == int):
resolution = 30
### Convert attributes if necessary
if psmversion in [2, '2', 2.]:
attributes = convertattributes_3to2(attributes)
elif psmversion in [3, '3', 3.]:
attributes = convertattributes_2to3(attributes)
### Make dataframe of nodes from all ISOs
dictnodes = {}
for iso in isos:
### Load node key
nodemap = revmpath + '{}/io/{}-node-latlon.csv'.format(
iso.upper(), iso.lower())
dfin = pd.read_csv(nodemap)
dfin.rename(
columns={'name': 'node', 'pnodename': 'node', 'ptid': 'node'},
inplace=True)
inpath = os.path.join(revmpath, '{}/in/NSRDB/{}/{}min/'.format(
iso, yearkey, resolution))
### Make list of files downloaded so far
downloaded = glob(inpath + '*') ## or os.listdir(inpath)
### Make list of files to download
if 'latlonindex' in dfin.columns:
dfin.drop_duplicates('latlonindex', inplace=True)
dfin['name'] = dfin['latlonindex'].copy()
dfin['file'] = dfin['latlonindex'].map(
lambda x: '{}{}-{}{}'.format(inpath, x, yearkey, filetype))
else:
dfin['name'] = dfin['node'].copy()
dfin['file'] = dfin['node'].map(
lambda x: '{}{}-{}{}'.format(inpath, x, yearkey, filetype))
dfin['todownload'] = dfin['file'].map(
lambda x: x not in downloaded)
dictnodes[iso] = dfin.copy()
dfnodes = pd.concat(dictnodes)
### Identify locations to include in query
nsrdbids, nsrdblats, nsrdblons = [], [], []
for file in tqdm(dfnodes['file'].values):
df = pd.read_csv(file, nrows=1)
nsrdbids.append(df['Location ID'][0])
nsrdblats.append(df['Latitude'][0])
nsrdblons.append(df['Longitude'][0])
dfnodes['NSRDBid'] = nsrdbids
dfnodes['NSRDBlat'] = nsrdblats
dfnodes['NSRDBlon'] = nsrdblons
dfnodes = dfnodes.reset_index(level=0).rename(columns={'level_0': 'iso'})
dfnodes.reset_index(drop=True, inplace=True)
### Save dfnodes for use in unpacking
if not os.path.exists(revmpath+'USA/io/'):
os.makedirs(revmpath+'USA/io/')
dfnodes.to_csv(revmpath+'USA/io/nsrdbnodekey-{}.csv'.format(yearkey), index=False)
### Post NSRDB requests in 400-unit chunks, dropping duplicate NSRDBids
dftodownload = dfnodes.drop_duplicates('NSRDBid').copy()
lonlatstodownload = list(zip(dftodownload['NSRDBlon'], dftodownload['NSRDBlat']))
for i in range(0,len(lonlatstodownload), chunksize):
print(i)
postNSRDBfiles(years=[year], lonlats=lonlatstodownload[i:i+chunksize],
psmversion=psmversion, attributes=attributes)
time.sleep(wait)
def unpackpostNSRDBfiles_iso(year, yearkey, postpath=None,
isos=['CAISO', 'ERCOT', 'MISO', 'NYISO', 'PJM', 'ISONE'],
resolution=None, filetypeout='.gz',
attributes='ghi,dni,dhi,solar_zenith_angle,air_temperature,wind_speed'):
"""
Notes
-----
* This function can only be run after postNSRDBfiles_iso().
* Default location for unzipped posted files is
revmpath+'USA/in/NSRDB/nodes/{}/'.format(year).
* Defualt location for dfnodes is
revmpath+'USA/io/nsrdbnodekey-{}.csv'.format(yearkey)
"""
### Set defaults, if necessary
if postpath==None:
postpath = revmpath+'USA/in/NSRDB/nodes/{}/'.format(year)
if (resolution == None) and (year == 'tmy'):
resolution = 60
elif (resolution == None) and (type(year) == int):
resolution = 30
compression = 'gzip'
if filetypeout not in ['gzip', '.gz']:
compression = None
### Load dfnodes from default location
dfnodes = pd.read_csv(revmpath+'USA/io/nsrdbnodekey-{}.csv'.format(yearkey))
### Get downloaded file list
postfiles = glob(postpath + '*')
### Extract parameters from filename
def fileparams(filepath, filetype='.csv'):
filename = os.path.basename(filepath)
nsrdbid = filename[:filename.find('_')]
lat = filename[filename.find('_')+1:filename.find('_-')]
lon = filename[filename.find('_-')+1:filename.find(filetype)][:-5]
year = filename[-(len(filetype)+4):-len(filetype)]
return nsrdbid, lat, lon, year
dfpostfiles =
|
pd.DataFrame(postfiles, columns=['filepath'])
|
pandas.DataFrame
|
# Written By : <NAME>
# Adviser : <NAME>, Phd
# Research : Using a neural network to maximize the significance of tttHH production.
# Description: Script that trains and test a Keras NN.
# Reference :http://cdsweb.cern.ch/record/2220969/files/ATL-PHYS-PUB-2016-023.pdf
###########################################################################################################################
# Imported packages.
import csv, sys
import uproot
import pandas as pd
import numpy as np
from numpy import array
np.set_printoptions(threshold=sys.maxsize)
import shap
import argparse
import tensorflow as tf
import tkinter
import matplotlib
# matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import math
import time
from math import log, sqrt
from tensorflow import keras
from tensorflow.keras import metrics
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
status = len(tf.config.experimental.list_physical_devices("GPU"))
from sklearn.metrics import (
precision_recall_curve,
plot_precision_recall_curve,
average_precision_score,
roc_curve,
auc,
roc_auc_score,
precision_recall_curve,
)
from sklearn.metrics import confusion_matrix
from datetime import datetime
import slug # Library with common functions used in multiple scripts.
parser = argparse.ArgumentParser(description="number of jets")
parser.add_argument("--num", type=str, help="Use '--num=' followed by a Number of jets")
args = parser.parse_args()
# numofjets = int(args.num)
numofjets = 10
# Fixed values.
tree = "OutputTree"
seed = 42
phase = 1
branches = slug.dataCol(phase,numofjets)
# Number of features.
numBranches = len(branches) - 2
mikeHancePATH = '/data/users/mhance/tthh/'
jTellecheaPATH = '~/neural_networks/data/flat_btageff_0.77/'
# jTellecheaPATH = '~/neural_networks/data/flat_btageff_0.85/'
# Data read from file.
signal = uproot.open(jTellecheaPATH+"new_TTHH.root")[tree]
df_signal = signal.pandas.df(branches)
bkgTTBB = uproot.open(jTellecheaPATH+"new_TTBB.root")[tree]
df_bkgTTBB = bkgTTBB.pandas.df(branches)
bkgTTH = uproot.open(jTellecheaPATH+"new_TTH.root")[tree]
df_bkgTTH = bkgTTH.pandas.df(branches)
bkgTTZ = uproot.open(jTellecheaPATH+"new_TTZ.root")[tree]
df_bkgTTZ = bkgTTZ.pandas.df(branches)
df_background =
|
pd.concat([df_bkgTTBB, df_bkgTTH, df_bkgTTZ])
|
pandas.concat
|
import dask.dataframe as dd
import pandas as pd
import pytest
import featuretools as ft
from featuretools.entityset import EntitySet, Relationship
def test_create_entity_from_dask_df(pd_es):
dask_es = EntitySet(id="dask_es")
log_dask = dd.from_pandas(pd_es["log"].df, npartitions=2)
dask_es = dask_es.entity_from_dataframe(
entity_id="log_dask",
dataframe=log_dask,
index="id",
time_index="datetime",
variable_types=pd_es["log"].variable_types
)
pd.testing.assert_frame_equal(pd_es["log"].df, dask_es["log_dask"].df.compute(), check_like=True)
def test_create_entity_with_non_numeric_index(pd_es, dask_es):
df = pd.DataFrame({"id": ["A_1", "A_2", "C", "D"],
"values": [1, 12, -34, 27]})
dask_df = dd.from_pandas(df, npartitions=2)
pd_es.entity_from_dataframe(
entity_id="new_entity",
dataframe=df,
index="id")
dask_es.entity_from_dataframe(
entity_id="new_entity",
dataframe=dask_df,
index="id",
variable_types={"id": ft.variable_types.Id, "values": ft.variable_types.Numeric})
pd.testing.assert_frame_equal(pd_es['new_entity'].df.reset_index(drop=True), dask_es['new_entity'].df.compute())
def test_create_entityset_with_mixed_dataframe_types(pd_es, dask_es):
df = pd.DataFrame({"id": [0, 1, 2, 3],
"values": [1, 12, -34, 27]})
dask_df = dd.from_pandas(df, npartitions=2)
# Test error is raised when trying to add Dask entity to entitset with existing pandas entities
err_msg = "All entity dataframes must be of the same type. " \
"Cannot add entity of type {} to an entityset with existing entities " \
"of type {}".format(type(dask_df), type(pd_es.entities[0].df))
with pytest.raises(ValueError, match=err_msg):
pd_es.entity_from_dataframe(
entity_id="new_entity",
dataframe=dask_df,
index="id")
# Test error is raised when trying to add pandas entity to entitset with existing dask entities
err_msg = "All entity dataframes must be of the same type. " \
"Cannot add entity of type {} to an entityset with existing entities " \
"of type {}".format(type(df), type(dask_es.entities[0].df))
with pytest.raises(ValueError, match=err_msg):
dask_es.entity_from_dataframe(
entity_id="new_entity",
dataframe=df,
index="id")
def test_add_last_time_indexes():
pd_es = EntitySet(id="pd_es")
dask_es = EntitySet(id="dask_es")
sessions = pd.DataFrame({"id": [0, 1, 2, 3],
"user": [1, 2, 1, 3],
"time": [pd.to_datetime('2019-01-10'),
pd.to_datetime('2019-02-03'),
pd.to_datetime('2019-01-01'),
pd.to_datetime('2017-08-25')],
"strings": ["I am a string",
"23",
"abcdef ghijk",
""]})
sessions_dask = dd.from_pandas(sessions, npartitions=2)
sessions_vtypes = {
"id": ft.variable_types.Id,
"user": ft.variable_types.Id,
"time": ft.variable_types.DatetimeTimeIndex,
"strings": ft.variable_types.Text
}
transactions = pd.DataFrame({"id": [0, 1, 2, 3, 4, 5],
"session_id": [0, 0, 1, 2, 2, 3],
"amount": [1.23, 5.24, 123.52, 67.93, 40.34, 50.13],
"time": [pd.to_datetime('2019-01-10 03:53'),
|
pd.to_datetime('2019-01-10 04:12')
|
pandas.to_datetime
|
# MIT License
# Copyright (c) [2017] [<NAME>]
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import pandas as pd
from numpy.linalg import norm
from scipy.sparse import issparse
from .utils import svd_wrapper, centering
class pca(object):
"""
Computes the Principal Components Analysis (PCA) of a data matrix
X (n_samples x n_features).
Parameters
----------
n_components: int, default = None
rank of the decomposition. If None, will compute full PCA.
center: str, default = None
How to center the columns of X. If None, will not center the
columns (i.e. just computes the SVD).
Attributes
----------
scores_: pd.DataFrame
- scores_ shape: (n_samples, n_components)
The orthonormal matrix of (normalized) scores.
loadings_: pd.DataFrame
- loadings_ shape: (n_features, n_components)
The orthonormal matrix of loadings.
svals_: pd.Series
- svals_ shape: (n_components, )
The singular values.
m_: np.array
- m_ shape: (n_features, )
The vector used to center the data.
frob_norm_: float
The Frobenius norm of the training data matrix X.
shape_: tuple
- shape_ length: 2
The shape of the original data matrix.
"""
def __init__(self, n_components=None, center='mean'):
self.n_components = n_components
self.center = center
def get_params(self):
return {'n_components': self.n_components,
'center': self.center}
def __repr__(self):
if not hasattr(self, 'scores_'):
return 'pca object, nothing has been computed yet'
else:
return 'Rank {} pca of a {} matrix'.format(self.n_components,
self.shape_)
def fit(self, X):
"""
Computes the PCA decomposition of X.
Parameters
----------
X: array-like or sparse matrix
- X.shape = (n_samples, n_features)
"""
self.shape_, obs_names, var_names, self.n_components, \
= _arg_checker(X, self.n_components)
# possibly mean center X
X, self.m_ = centering(X, self.center)
# compute SVD
U, D, V = svd_wrapper(X, self.n_components)
# compute variance explained
if self.n_components == min(X.shape):
self.frob_norm_ = np.sqrt(sum(D ** 2))
else:
self.frob_norm_ = _safe_frob_norm(X)
self.var_expl_prop_ = D ** 2 / self.frob_norm_ ** 2
self.var_expl_cum_ = np.cumsum(self.var_expl_prop_)
if self.n_components is None:
self.n_components = self.scores_.shape[1]
self.scores_, self.svals_, self.loadings_ = \
svd2pd(U, D, V, obs_names=obs_names, var_names=var_names)
return self
@classmethod
def from_precomputed(cls, n_components=None, center=None,
scores=None, loadings=None, svals=None,
obs_names=None, var_names=None, comp_names=None,
m=None, frob_norm=None, var_expl_prop=None,
shape=None):
"""
Loads the pca object from a precomputed PCA decomposition.
Returns
------
X: pca object
"""
x = cls()
if n_components is None and scores is not None:
n_components = scores.shape[1]
x.n_components = n_components
if shape is not None:
shape = shape
else:
shape = [None, None]
if scores is not None:
shape[0] = scores.shape[0]
if loadings is not None:
shape[1] = loadings.shape[0]
x.shape_ = shape
if scores is not None and type(scores) != pd.DataFrame:
if obs_names is None:
obs_names = _default_obs_names(scores.shape[0])
if comp_names is None:
comp_names = _default_comp_names(scores.shape[1])
scores = pd.DataFrame(scores, index=obs_names,
columns=comp_names)
if svals is not None and type(svals) != pd.Series:
if comp_names is None:
comp_names = _default_comp_names(loadings.shape[1])
svals =
|
pd.Series(svals, index=comp_names)
|
pandas.Series
|
from distutils.version import LooseVersion
from warnings import catch_warnings
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
import pandas as pd
from pandas import (
DataFrame,
HDFStore,
Index,
MultiIndex,
Series,
_testing as tm,
bdate_range,
concat,
date_range,
isna,
read_hdf,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
tables,
)
from pandas.io.pytables import Term
pytestmark = pytest.mark.single
def test_select_columns_in_where(setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_select_with_dups(setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_select(setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(setup_path):
with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
{
"ts": bdate_range("2012-01-01", periods=300),
"A": np.random.randn(300),
}
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A"])
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=["A", "B"])
df["object"] = "foo"
df.loc[4:5, "object"] = "bar"
df["boolv"] = df["A"] > 0
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa
for v in [True, "true", 1]:
result = store.select("df", f"boolv == {v}", columns=["A", "boolv"])
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa
for v in [False, "false", 0]:
result = store.select("df", f"boolv == {v}", columns=["A", "boolv"])
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame({"A": np.random.rand(20), "B": np.random.rand(20)})
_maybe_remove(store, "df_int")
store.append("df_int", df)
result = store.select("df_int", "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(
{
"A": np.random.rand(20),
"B": np.random.rand(20),
"index": np.arange(20, dtype="f8"),
}
)
_maybe_remove(store, "df_float")
store.append("df_float", df)
result = store.select("df_float", "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(setup_path) as store:
# floats w/o NaN
df = DataFrame({"cols": range(11), "values": range(11)}, dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
store.append("df1", df, data_columns=True)
result = store.select("df1", where="values>2.0")
expected = df[df["values"] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df["values"] > 2.0]
store.append("df2", df, data_columns=True, index=False)
result = store.select("df2", where="values>2.0")
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame({"cols": range(11), "values": range(11)}, dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df["values"] > 2.0]
store.append("df4", df, data_columns=True)
result = store.select("df4", where="values>2.0")
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
expected = df[df["A"] > 0]
store.append("df", df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select("df", where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"ts": bdate_range("2012-01-01", periods=300),
"A": np.random.randn(300),
"B": range(300),
"users": ["a"] * 50
+ ["b"] * 50
+ ["c"] * 100
+ [f"a{i:03d}" for i in range(100)],
}
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A", "B", "users"])
# regular select
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select("df", "ts>=Timestamp('2012-02-01') & users=['a','b','c']")
expected = df[
(df.ts >= Timestamp("2012-02-01")) & df.users.isin(["a", "b", "c"])
]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ["a", "b", "c"] + [f"a{i:03d}" for i in range(60)]
result = store.select("df", "ts>=Timestamp('2012-02-01') and users=selector")
expected = df[(df.ts >= Timestamp("2012-02-01")) & df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select("df", "B=selector")
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select("df", "ts=selector")
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(setup_path):
# single table
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, "df")
store.append("df", df)
expected = store.select("df")
results = list(store.select("df", iterator=True))
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=100))
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=150))
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df_non_table")
msg = "can only use an iterator or chunksize on a table"
with pytest.raises(TypeError, match=msg):
read_hdf(path, "df_non_table", chunksize=100)
with pytest.raises(TypeError, match=msg):
read_hdf(path, "df_non_table", iterator=True)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df", format="table")
results = list(read_hdf(path, "df", chunksize=100))
result = concat(results)
assert len(results) == 5
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, "df"))
# multiple
with ensure_clean_store(setup_path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append("df1", df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns="{}_2".format)
df2["foo"] = "bar"
store.append("df2", df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(["df1", "df2"], selector="df1")
results = list(
store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=150)
)
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select("df")
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = f"index >= '{beg_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = f"index <= '{end_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = list(store.select("df", chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = f"index >= '{beg_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = f"index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_non_complete_8014(setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# with iterator, non complete range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[1]
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
where = f"index >= '{beg_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = f"index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) & (expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# with iterator, empty where
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
end_dt = expected.index[-1]
# select w/iterator and where clause, single term, begin of range
where = f"index > '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
assert 0 == len(results)
def test_select_iterator_many_empty_frames(setup_path):
# GH 8014
# using iterator and where clause can return many empty
# frames.
chunksize = 10_000
# with iterator, range limited to the first chunk
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100000, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[chunksize - 1]
# select w/iterator and where clause, single term, begin of range
where = f"index >= '{beg_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = f"index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
assert len(results) == 1
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
# should be 1, is 10
assert len(results) == 1
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) & (expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause which selects
# *nothing*.
#
# To be consistent with Python idiom I suggest this should
# return [] e.g. `for e in []: print True` never prints
# True.
where = f"index <= '{beg_dt}' & index >= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
# should be []
assert len(results) == 0
def test_frame_select(setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
date = df.index[len(df) // 2]
crit1 = Term("index>=date")
assert crit1.env.scope["date"] == date
crit2 = "columns=['A', 'D']"
crit3 = "columns=A"
result = store.select("frame", [crit1, crit2])
expected = df.loc[date:, ["A", "D"]]
tm.assert_frame_equal(result, expected)
result = store.select("frame", [crit3])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# invalid terms
df = tm.makeTimeDataFrame()
store.append("df_time", df)
msg = "could not convert string to Timestamp"
with pytest.raises(ValueError, match=msg):
store.select("df_time", "index>0")
# can't select if not written as table
# store['frame'] = df
# with pytest.raises(ValueError):
# store.select('frame', [crit1, crit2])
def test_frame_select_complex(setup_path):
# select via complex criteria
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[df.index[0:4], "string"] = "bar"
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table", data_columns=["string"])
# empty
result = store.select("df", 'index>df.index[3] & string="bar"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select("df", 'index>df.index[3] & string="foo"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "foo")]
tm.assert_frame_equal(result, expected)
# or
result = store.select("df", 'index>df.index[3] | string="bar"')
expected = df.loc[(df.index > df.index[3]) | (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select(
"df", '(index>df.index[3] & index<=df.index[6]) | string="bar"'
)
expected = df.loc[
((df.index > df.index[3]) & (df.index <= df.index[6]))
| (df.string == "bar")
]
tm.assert_frame_equal(result, expected)
# invert
result = store.select("df", 'string!="bar"')
expected = df.loc[df.string != "bar"]
tm.assert_frame_equal(result, expected)
# invert not implemented in numexpr :(
msg = "cannot use an invert condition when passing to numexpr"
with pytest.raises(NotImplementedError, match=msg):
store.select("df", '~(string="bar")')
# invert ok for filters
result = store.select("df", "~(columns=['A','B'])")
expected = df.loc[:, df.columns.difference(["A", "B"])]
tm.assert_frame_equal(result, expected)
# in
result = store.select("df", "index>df.index[3] & columns in ['A','B']")
expected = df.loc[df.index > df.index[3]].reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_frame_select_complex2(setup_path):
with ensure_clean_path(["parms.hdf", "hist.hdf"]) as paths:
pp, hh = paths
# use non-trivial selection criteria
parms = DataFrame({"A": [1, 1, 2, 2, 3]})
parms.to_hdf(pp, "df", mode="w", format="table", data_columns=["A"])
selection = read_hdf(pp, "df", where="A=[2,3]")
hist = DataFrame(
np.random.randn(25, 1),
columns=["data"],
index=MultiIndex.from_tuples(
[(i, j) for i in range(5) for j in range(5)], names=["l1", "l2"]
),
)
hist.to_hdf(hh, "df", mode="w", format="table")
expected = read_hdf(hh, "df", where="l1=[2, 3, 4]")
# scope with list like
l = selection.index.tolist() # noqa
store = HDFStore(hh)
result = store.select("df", where="l1=l")
tm.assert_frame_equal(result, expected)
store.close()
result = read_hdf(hh, "df", where="l1=l")
tm.assert_frame_equal(result, expected)
# index
index = selection.index # noqa
result = read_hdf(hh, "df", where="l1=index")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=selection.index")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=selection.index.tolist()")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=list(selection.index)")
tm.assert_frame_equal(result, expected)
# scope with index
store = HDFStore(hh)
result = store.select("df", where="l1=index")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=selection.index")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=selection.index.tolist()")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=list(selection.index)")
tm.assert_frame_equal(result, expected)
store.close()
def test_invalid_filtering(setup_path):
# can't use more than one filter (atm)
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
msg = "unable to collapse Joint Filters"
# not implemented
with pytest.raises(NotImplementedError, match=msg):
store.select("df", "columns=['A'] | columns=['B']")
# in theory we could deal with this
with pytest.raises(NotImplementedError, match=msg):
store.select("df", "columns=['A','B'] & columns=['C']")
def test_string_select(setup_path):
# GH 2973
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
# test string ==/!=
df["x"] = "none"
df.loc[df.index[2:7], "x"] = ""
store.append("df", df, data_columns=["x"])
result = store.select("df", "x=none")
expected = df[df.x == "none"]
tm.assert_frame_equal(result, expected)
result = store.select("df", "x!=none")
expected = df[df.x != "none"]
tm.assert_frame_equal(result, expected)
df2 = df.copy()
df2.loc[df2.x == "", "x"] = np.nan
store.append("df2", df2, data_columns=["x"])
result = store.select("df2", "x!=none")
expected = df2[isna(df2.x)]
tm.assert_frame_equal(result, expected)
# int ==/!=
df["int"] = 1
df.loc[df.index[2:7], "int"] = 2
store.append("df3", df, data_columns=["int"])
result = store.select("df3", "int=2")
expected = df[df.int == 2]
tm.assert_frame_equal(result, expected)
result = store.select("df3", "int!=2")
expected = df[df.int != 2]
tm.assert_frame_equal(result, expected)
def test_select_as_multiple(setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df2["foo"] = "bar"
with ensure_clean_store(setup_path) as store:
msg = "keys must be a list/tuple"
# no tables stored
with pytest.raises(TypeError, match=msg):
store.select_as_multiple(None, where=["A>0", "B>0"], selector="df1")
store.append("df1", df1, data_columns=["A", "B"])
store.append("df2", df2)
# exceptions
with pytest.raises(TypeError, match=msg):
store.select_as_multiple(None, where=["A>0", "B>0"], selector="df1")
with pytest.raises(TypeError, match=msg):
store.select_as_multiple([None], where=["A>0", "B>0"], selector="df1")
msg = "'No object named df3 in the file'"
with pytest.raises(KeyError, match=msg):
store.select_as_multiple(
["df1", "df3"], where=["A>0", "B>0"], selector="df1"
)
with pytest.raises(KeyError, match=msg):
store.select_as_multiple(["df3"], where=["A>0", "B>0"], selector="df1")
with pytest.raises(KeyError, match="'No object named df4 in the file'"):
store.select_as_multiple(
["df1", "df2"], where=["A>0", "B>0"], selector="df4"
)
# default select
result = store.select("df1", ["A>0", "B>0"])
expected = store.select_as_multiple(
["df1"], where=["A>0", "B>0"], selector="df1"
)
tm.assert_frame_equal(result, expected)
expected = store.select_as_multiple("df1", where=["A>0", "B>0"], selector="df1")
tm.assert_frame_equal(result, expected)
# multiple
result = store.select_as_multiple(
["df1", "df2"], where=["A>0", "B>0"], selector="df1"
)
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected, check_freq=False)
# FIXME: 2021-01-20 this is failing with freq None vs 4B on some builds
# multiple (diff selector)
result = store.select_as_multiple(
["df1", "df2"], where="index>df2.index[4]", selector="df2"
)
expected = concat([df1, df2], axis=1)
expected = expected[5:]
tm.assert_frame_equal(result, expected)
# test exception for diff rows
store.append("df3", tm.makeTimeDataFrame(nper=50))
msg = "all tables must have exactly the same nrows!"
with pytest.raises(ValueError, match=msg):
store.select_as_multiple(
["df1", "df3"], where=["A>0", "B>0"], selector="df1"
)
@pytest.mark.skipif(
LooseVersion(tables.__version__) < LooseVersion("3.1.0"),
reason=("tables version does not support fix for nan selection bug: GH 4858"),
)
def test_nan_selection_bug_4858(setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame({"cols": range(6), "values": range(6)}, dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[0] = np.nan
expected = DataFrame(
{"cols": ["13.0", "14.0", "15.0"], "values": [3.0, 4.0, 5.0]},
index=[3, 4, 5],
)
# write w/o the index on that particular column
store.append("df", df, data_columns=True, index=["cols"])
result = store.select("df", where="values>2.0")
tm.assert_frame_equal(result, expected)
def test_query_with_nested_special_character(setup_path):
df = DataFrame(
{
"a": ["a", "a", "c", "b", "test & test", "c", "b", "e"],
"b": [1, 2, 3, 4, 5, 6, 7, 8],
}
)
expected = df[df.a == "test & test"]
with ensure_clean_store(setup_path) as store:
store.append("test", df, format="table", data_columns=True)
result = store.select("test", 'a = "test & test"')
tm.assert_frame_equal(expected, result)
def test_query_long_float_literal(setup_path):
# GH 14241
df =
|
DataFrame({"A": [1000000000.0009, 1000000000.0011, 1000000000.0015]})
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
from PIL import Image
import streamlit as st
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn import svm
data = pd.read_csv('PL.csv')
X_all = data.drop(['FTR'],1)
y_all = data['FTR']
# Standardising the data.
from sklearn.preprocessing import scale
#Center to the mean and component wise scale to unit variance.
cols = [['H_ST','H_SOG','H_SFG','H_PT','H_COR','H_FL','H_YC','H_RC','A_ST','A_SOG','A_SFG','A_PT','A_COR','A_FL','A_YC','A_RC']]
for col in cols:
X_all[col] = scale(X_all[col])
X_all = data.drop(['HTAG','HTHG','FTAG','FTHG','HTR','Date','FTR','Country','League','Type','Season','Home_Team','Away_team','ETR','ETHG','ETAG','PENR','PENHG','PENAG'],1)
from sklearn.feature_selection import SelectKBest, chi2
sel = SelectKBest(chi2, k = 5)
sel.fit(X_all, y_all)
X = sel.transform(X_all)
X = pd.DataFrame(X)
from sklearn.model_selection import train_test_split
# Shuffle and split the dataset into training and testing set.
X_train, X_test, y_train, y_test = train_test_split(X, y_all,
test_size = 0.20,
random_state = 3,
stratify = y_all)
SuperVector = svm.SVC()
param_grid = {'C':[0.1,1], 'kernel':['rbf', 'poly', 'sigmoid','linear'], 'degree':[1,2,3]}
SVMGrid=GridSearchCV(estimator=SuperVector, param_grid=param_grid)
SVMGrid.fit(X_train, y_train)
SVMPred = SVMGrid.predict(X_test)
Arsenal = {
'name' : 'Arsenal',
'logo' : Image.open('logo/arsenal_logo2.png'),
'data' : pd.DataFrame([0,1,2,4,4]).transpose()
}
Inter = {
'name' : 'Internazionale',
'logo' : Image.open('logo/inter_logo.png'),
'data' : pd.DataFrame([4,1,2,4,5]).transpose()
}
Milan = {
'name' : 'Milan',
'logo' : Image.open('logo/milan_logo.png'),
'data' : pd.DataFrame([2,1,2,4,5]).transpose()
}
Chelsea = {
'name' : 'Chelsea',
'logo':Image.open('logo/chelsea_logo.png'),
'data': pd.DataFrame([1,1,1,3,0]).transpose()
}
Manchester_United = {
'name' : 'Manchester United',
'logo':Image.open('logo/manchester_united_logo.png'),
'data': pd.DataFrame([0,0,1,1,0]).transpose()
}
Liverpool = {
'name': 'Liverpool',
'logo': Image.open('logo/liverpool_logo.png'),
'data': pd.DataFrame([0,0,1,1,1]).transpose()
}
Manchester_City = {
'name' : 'Manchester City',
'logo':Image.open('logo/man_city_logo.png'),
'data': pd.DataFrame([3,4,2,1,8]).transpose()
}
Juventus = {
'name' : 'Juventus',
'logo':Image.open('logo/juve_logo.png'),
'data': pd.DataFrame([3,4,2,1,2]).transpose()
}
Barcelone = {
'name' : 'Barcelone',
'logo':Image.open('logo/barcelone_logo.png'),
'data': pd.DataFrame([3,4,2,1,8]).transpose()
}
Real_Madrid = {
'name' : '<NAME>',
'logo':Image.open('logo/real_madrid_logo.png'),
'data':
|
pd.DataFrame([3,4,2,1,1])
|
pandas.DataFrame
|
'''
/*******************************************************************************
* Copyright 2016-2019 Exactpro (Exactpro Systems Limited)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
'''
import numpy
import matplotlib.pyplot as plt
import scipy.stats as stats
import pandas
import datetime
import calendar
class RelativeFrequencyChart:
# returns coordinates for each chart column
def get_coordinates(self, data, bins): # bins - chart columns count
self.btt = numpy.array(list(data))
self.y, self.x, self.bars = plt.hist(self.btt, weights=numpy.zeros_like(self.btt) + 1. / self.btt.size, bins=bins)
return self.x, self.y
class FrequencyDensityChart:
def get_coordinates_histogram(self, data, bins):
self.btt = numpy.array(list(data))
self.y, self.x, self.bars = plt.hist(self.btt, bins=bins, density=True)
return self.x, self.y
def get_coordinates_line(self, data):
try:
self.btt = numpy.array(list(data))
self.density = stats.kde.gaussian_kde(list(data))
self.x_den = numpy.linspace(0, data.max(), data.count())
self.density = self.density(self.x_den)
return self.x_den, self.density
except numpy.linalg.linalg.LinAlgError:
return [-1], [-1]
class DynamicChart:
def get_coordinates(self, frame, step_size):
self.plot = {} # chart coordinates
self.dynamic_bugs = []
self.x = []
self.y = []
self.plot['period'] = step_size
if step_size == 'W-SUN':
self.periods = DynamicChart.get_periods(self, frame, step_size) # separates DataFrame to the specified periods
if len(self.periods) == 0:
return 'error'
self.cumulative = 0 # cumulative total of defect submission for specific period
for self.period in self.periods:
# checks whether the first day of period is Monday (if not then we change first day to Monday)
if pandas.to_datetime(self.period[0]) < pandas.to_datetime(frame['Created_tr']).min():
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >=
pandas.to_datetime(frame['Created_tr']).min()) &
(pandas.to_datetime(frame['Created_tr']) <= pandas.to_datetime(self.period[1]))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(datetime.datetime.date(pandas.to_datetime(frame['Created_tr'], format='%Y-%m-%d').min())))
self.y.append(self.cumulative)
else:
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(self.period[0]))
& (pandas.to_datetime(frame['Created_tr']) <= pandas.to_datetime(self.period[1]))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str((self.period[0])))
self.y.append(self.cumulative)
# check whether the date from new DataFrame is greater than date which is specified in settings
if pandas.to_datetime(frame['Created_tr']).max() > pandas.to_datetime(self.periods[-1][1]):
# processing of days which are out of full period set
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) > pandas.to_datetime(self.periods[-1][1]))
& (pandas.to_datetime(frame['Created_tr']) <=
pandas.to_datetime(frame['Created_tr']).max())]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(datetime.datetime.date(pandas.to_datetime(self.periods[-1][1], format='%Y-%m-%d')) + datetime.timedelta(days=1)))
self.y.append(self.cumulative)
self.dynamic_bugs.append(self.x)
self.dynamic_bugs.append(self.y)
self.plot['dynamic bugs'] = self.dynamic_bugs
self.cumulative = 0
return self.plot
if step_size in ['7D', '10D', '3M', '6M', 'A-DEC']:
self.count0 = 0
self.count1 = 1
self.periods = DynamicChart.get_periods(self, frame, step_size) # DataFrame separation by the specified periods
if len(self.periods) == 0:
return 'error'
self.cumulative = 0
self.countPeriodsList = len(self.periods) # count of calculated periods
self.count = 1
if self.countPeriodsList == 1:
if step_size == '7D':
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >=
pandas.to_datetime(frame['Created_tr']).min())
& (pandas.to_datetime(frame['Created_tr'])
< pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())
+datetime.timedelta(days=7)))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr'], format='%Y-%m-%d').min()), step_size)))
self.y.append(self.cumulative)
if pandas.to_datetime(frame['Created_tr']).max() > pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=7)):
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >=
pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+
datetime.timedelta(days=7))) & (pandas.to_datetime(frame['Created_tr'])
<= pandas.to_datetime(frame['Created_tr']).max())]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=7), step_size)))
self.y.append(self.cumulative)
self.cumulative = 0
if step_size == '10D':
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(frame['Created_tr']).min()) & (pandas.to_datetime(frame['Created_tr']) < pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=10)))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr'], format='%Y-%m-%d').min()), step_size)))
self.y.append(self.cumulative)
if pandas.to_datetime(frame['Created_tr']).max() > pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=10)):
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=10))) & (pandas.to_datetime(frame['Created_tr']) <= pandas.to_datetime(frame['Created_tr']).max())]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=10), step_size)))
self.y.append(self.cumulative)
self.cumulative = 0
if step_size == '3M':
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(frame['Created_tr']).min())
& (pandas.to_datetime(frame['Created_tr']) <
pandas.to_datetime(DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 3)))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr'], format='%Y-%m-%d').min()), step_size)))
self.y.append(self.cumulative)
if pandas.to_datetime(frame['Created_tr']).max() > pandas.to_datetime(DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 3)):
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 3))) & (pandas.to_datetime(frame['Created_tr']) <= pandas.to_datetime(frame['Created_tr']).max())]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 3), step_size)))
self.y.append(self.cumulative)
self.cumulative = 0
if step_size == '6M':
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(frame['Created_tr']).min())
& (pandas.to_datetime(frame['Created_tr']) <
pandas.to_datetime(DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 6)))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr'], format='%Y-%m-%d').min()), step_size)))
self.y.append(self.cumulative)
if pandas.to_datetime(frame['Created_tr']).max() > pandas.to_datetime(DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 6)):
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 6))) & (pandas.to_datetime(frame['Created_tr']) <= pandas.to_datetime(frame['Created_tr']).max())]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 6), step_size)))
self.y.append(self.cumulative)
self.cumulative = 0
if step_size == 'A-DEC':
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(frame['Created_tr']).min())
& (pandas.to_datetime(frame['Created_tr']) < pandas.to_datetime(str(int(self.periods[0])+1)))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr'], format='%Y-%m-%d').min()), step_size)))
self.y.append(self.cumulative)
if(pandas.to_datetime(frame['Created_tr']).max() > pandas.to_datetime(str(int(self.periods[0])+1))):
self.newFrame = frame[(
|
pandas.to_datetime(frame['Created_tr'])
|
pandas.to_datetime
|
import pandas
import numpy as np
import patsy
import argparse
import statsmodels.discrete.count_model as cm
import seaborn as sb
from scipy.stats import nbinom
#
# TODO -- this should really be part of bin3C or use proxigenomics_toolkit
# and derive all this information itself. The logic for producing the
# contacts is important. An existing issue is that bin3C is Python 2.7
# while I strongly suspect the latest codebase of important modules
# will only be deployed for Python >3
#
def fit_data(df, n_sample=None):
if n_sample is None:
return df
else:
return df.sample(n=n_sample)
def mul_ab(df, index1, index2, col):
return df.loc[index1, col].values * df.loc[index2, col].values
def scaler(a, _mu=None, _sigma=None):
if not _mu:
_mu = a.mean()
if not _sigma:
_sigma = a.std()
return (a - float(_mu)) / float(_sigma), _mu, _sigma
def make_table(_contacts, _sites, _lengths, _coverage):
return pandas.DataFrame({'contacts': _contacts,
'sites': _sites,
'length': _lengths,
'coverage': _coverage})
def convert_params(mu, alpha):
"""
Convert mean/dispersion parameterization of a negative binomial to the ones scipy supports
See https://en.wikipedia.org/wiki/Negative_binomial_distribution#Alternative_formulations
"""
r = 1. / alpha
var = mu + 1. / r * mu ** 2
p = (var - mu) / var
return r, 1. - p
def nbinom_cdf(c, mu, alpha):
"""
Re-parameterised scipy negative binomial
:param c: observed count
:param mu: expected count
:param alpha: dispersion (alpha = 1/r)
:return: cumulative probability
"""
return nbinom.cdf(c, *convert_params(mu, alpha))
parser = argparse.ArgumentParser()
parser.add_argument('--threshold', '-t', type=float, default=0.1,
help='Threshold cut-off for significant contacts')
parser.add_argument('--n-sample', '-n', type=int,
help='Limit number of observations used in model fitting')
parser.add_argument('coverage', help='Coverage file [name,depth]')
parser.add_argument('contig_info', help="Contig information file [name,length,sites]")
parser.add_argument('intra_contacts', help='Intra-genome contacts')
parser.add_argument('raw_contacts', help='All raw contacts')
parser.add_argument('valid_out', help='Output table of valid contacts')
parser.add_argument('spur_out', help='Output table of spurious contacts')
args = parser.parse_args()
print('Reading input data')
coverage = pandas.read_csv(args.coverage, header=None, names=['contig_name', 'coverage'])
contig_info =
|
pandas.read_csv(args.contig_info, header=None, names=['contig_name', 'length', 'sites'])
|
pandas.read_csv
|
"""Main helper"""
__docformat__ = "numpy"
import argparse
from datetime import datetime, timedelta
from typing import List
import matplotlib.pyplot as plt
import mplfinance as mpf
import pandas as pd
import pyEX
import pytz
import yfinance as yf
from alpha_vantage.timeseries import TimeSeries
from numpy.core.fromnumeric import transpose
from tabulate import tabulate
from gamestonk_terminal import config_plot as cfgPlot
from gamestonk_terminal import config_terminal as cfg
from gamestonk_terminal import feature_flags as gtff
from gamestonk_terminal.common.technical_analysis import trendline_api as trend
from gamestonk_terminal.helper_funcs import (
parse_known_args_and_warn,
plot_autoscale,
valid_date,
)
# pylint: disable=no-member,too-many-branches,C0302
def clear(
other_args: List[str],
s_ticker: str,
s_start,
s_interval: str,
df_stock: pd.DataFrame,
):
"""Clears loaded stock and returns empty variables
Parameters
----------
other_args : List[str]
Argparse arguments
s_ticker : str
Ticker
s_start : str
Start date
s_interval : str
Interval to get data for
df_stock : pd.DataFrame
Preloaded dataframe
Returns
-------
str
Ticker
str
Start date
str
Interval
pd.DataFrame
Dataframe of data
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="clear",
description="""Clear previously loaded stock ticker.""",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return "", "", "", pd.DataFrame()
print("Clearing stock ticker to be used for analysis\n")
return "", "", "", pd.DataFrame()
except SystemExit:
print("")
return s_ticker, s_start, s_interval, df_stock
def load(
other_args: List[str],
s_ticker: str,
s_start,
s_interval: str,
df_stock: pd.DataFrame,
):
"""Load selected ticker
Parameters
----------
other_args : List[str]
Argparse arguments
s_ticker : str
Ticker
s_start : str
Start date
s_interval : str
Interval to get data for
df_stock : pd.DataFrame
Preloaded dataframe
Returns
-------
str
Ticker
str
Start date
str
Interval
pd.DataFrame
Dataframe of data.
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="load",
description="Load stock ticker to perform analysis on. When the data source is 'yf', an Indian ticker can be"
" loaded by using '.NS' at the end, e.g. 'SBIN.NS'. See available market in"
" https://help.yahoo.com/kb/exchanges-data-providers-yahoo-finance-sln2310.html.",
)
parser.add_argument(
"-t",
"--ticker",
action="store",
dest="s_ticker",
required="-h" not in other_args,
help="Stock ticker",
)
parser.add_argument(
"-s",
"--start",
type=valid_date,
default=(datetime.now() - timedelta(days=366)).strftime("%Y-%m-%d"),
dest="s_start_date",
help="The starting date (format YYYY-MM-DD) of the stock",
)
parser.add_argument(
"-i",
"--interval",
action="store",
dest="n_interval",
type=int,
default=1440,
choices=[1, 5, 15, 30, 60],
help="Intraday stock minutes",
)
parser.add_argument(
"--source",
action="store",
dest="source",
choices=["yf", "av", "iex"],
default="yf",
help="Source of historical data.",
)
parser.add_argument(
"-p",
"--prepost",
action="store_true",
default=False,
dest="b_prepost",
help="Pre/After market hours. Only works for 'yf' source, and intraday data",
)
try:
# For the case where a user uses: 'load BB'
if other_args and "-t" not in other_args and "-h" not in other_args:
other_args.insert(0, "-t")
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return [s_ticker, s_start, s_interval, df_stock]
# Daily
if ns_parser.n_interval == 1440:
# Alpha Vantage Source
if ns_parser.source == "av":
ts = TimeSeries(key=cfg.API_KEY_ALPHAVANTAGE, output_format="pandas")
# pylint: disable=unbalanced-tuple-unpacking
df_stock_candidate, _ = ts.get_daily_adjusted(
symbol=ns_parser.s_ticker, outputsize="full"
)
df_stock_candidate.columns = [
val.split(". ")[1].capitalize()
for val in df_stock_candidate.columns
]
df_stock_candidate = df_stock_candidate.rename(
columns={
"Adjusted close": "Adj Close",
}
)
# Check that loading a stock was not successful
# pylint: disable=no-member
if df_stock_candidate.empty:
print("")
return [s_ticker, s_start, s_interval, df_stock]
# pylint: disable=no-member
df_stock_candidate.sort_index(ascending=True, inplace=True)
# Slice dataframe from the starting date YYYY-MM-DD selected
df_stock_candidate = df_stock_candidate[ns_parser.s_start_date :]
# Yahoo Finance Source
elif ns_parser.source == "yf":
df_stock_candidate = yf.download(
ns_parser.s_ticker, start=ns_parser.s_start_date, progress=False
)
# Check that loading a stock was not successful
if df_stock_candidate.empty:
print("")
return [s_ticker, s_start, s_interval, df_stock]
df_stock_candidate.index.name = "date"
# IEX Cloud Source
elif ns_parser.source == "iex":
client = pyEX.Client(api_token=cfg.API_IEX_TOKEN, version="v1")
df_stock_candidate = client.chartDF(ns_parser.s_ticker)
# Check that loading a stock was not successful
if df_stock_candidate.empty:
print("")
return [s_ticker, s_start, s_interval, df_stock]
df_stock_candidate = df_stock_candidate[
["uClose", "uHigh", "uLow", "uOpen", "fClose", "volume"]
]
df_stock_candidate = df_stock_candidate.rename(
columns={
"uClose": "Close",
"uHigh": "High",
"uLow": "Low",
"uOpen": "Open",
"fClose": "Adj Close",
"volume": "Volume",
}
)
df_stock_candidate.sort_index(ascending=True, inplace=True)
# Slice dataframe from the starting date YYYY-MM-DD selected
df_stock_candidate = df_stock_candidate[ns_parser.s_start_date :]
# Check if start time from dataframe is more recent than specified
if df_stock_candidate.index[0] > pd.to_datetime(ns_parser.s_start_date):
s_start = df_stock_candidate.index[0]
else:
s_start = ns_parser.s_start_date
elif ns_parser.source == "av":
ts = TimeSeries(key=cfg.API_KEY_ALPHAVANTAGE, output_format="pandas")
# pylint: disable=unbalanced-tuple-unpacking
df_stock_candidate, _ = ts.get_intraday(
symbol=ns_parser.s_ticker,
outputsize="full",
interval=str(ns_parser.n_interval) + "min",
)
df_stock_candidate.columns = [
val.split(". ")[1].capitalize() for val in df_stock_candidate.columns
]
df_stock_candidate = df_stock_candidate.rename(
columns={
"Adjusted close": "Adj Close",
}
)
s_interval = str(ns_parser.n_interval) + "min"
# Check that loading a stock was not successful
# pylint: disable=no-member
if df_stock_candidate.empty:
print("")
return [s_ticker, s_start, s_interval, df_stock]
# pylint: disable=no-member
df_stock_candidate.sort_index(ascending=True, inplace=True)
# Slice dataframe from the starting date YYYY-MM-DD selected
df_stock_candidate = df_stock_candidate[ns_parser.s_start_date :]
# Check if start time from dataframe is more recent than specified
if df_stock_candidate.index[0] > pd.to_datetime(ns_parser.s_start_date):
s_start = df_stock_candidate.index[0]
else:
s_start = ns_parser.s_start_date
elif ns_parser.source == "yf":
s_int = str(ns_parser.n_interval) + "m"
s_interval = s_int + "in"
d_granularity = {"1m": 6, "5m": 59, "15m": 59, "30m": 59, "60m": 729}
s_start_dt = datetime.utcnow() - timedelta(days=d_granularity[s_int])
s_date_start = s_start_dt.strftime("%Y-%m-%d")
if s_start_dt > ns_parser.s_start_date:
# Using Yahoo Finance with granularity {s_int} the starting date is set to: {s_date_start}
df_stock_candidate = yf.download(
ns_parser.s_ticker,
start=s_date_start,
progress=False,
interval=s_int,
prepost=ns_parser.b_prepost,
)
else:
df_stock_candidate = yf.download(
ns_parser.s_ticker,
start=ns_parser.s_start_date.strftime("%Y-%m-%d"),
progress=False,
interval=s_int,
prepost=ns_parser.b_prepost,
)
# Check that loading a stock was not successful
if df_stock_candidate.empty:
print("")
return [s_ticker, s_start, s_interval, df_stock]
if s_start_dt > ns_parser.s_start_date:
s_start = pytz.utc.localize(s_start_dt)
else:
s_start = ns_parser.s_start_date
df_stock_candidate.index.name = "date"
elif ns_parser.source == "iex":
s_interval = str(ns_parser.n_interval) + "min"
client = pyEX.Client(api_token=cfg.API_IEX_TOKEN, version="v1")
df_stock_candidate = client.chartDF(ns_parser.s_ticker)
df_stock_candidate = client.intradayDF(ns_parser.s_ticker).iloc[
0 :: ns_parser.n_interval
]
df_stock_candidate = df_stock_candidate[
["close", "high", "low", "open", "volume", "close"]
]
df_stock_candidate.columns = [
x.capitalize() for x in df_stock_candidate.columns
]
df_stock_candidate.columns = list(df_stock_candidate.columns[:-1]) + [
"Adj Close"
]
df_stock_candidate.sort_index(ascending=True, inplace=True)
new_index = []
for idx in range(len(df_stock_candidate)):
dt_time = datetime.strptime(df_stock_candidate.index[idx][1], "%H:%M")
new_index.append(
df_stock_candidate.index[idx][0]
+ timedelta(hours=dt_time.hour, minutes=dt_time.minute)
)
df_stock_candidate.index =
|
pd.DatetimeIndex(new_index)
|
pandas.DatetimeIndex
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 11 16:31:58 2021
@author: snoone
"""
import os
import glob
import pandas as pd
pd.options.mode.chained_assignment = None # default='warn'
OUTDIR = "D:/Python_CDM_conversion/hourly/qff/cdm_out/observations_table"
os.chdir("D:/Python_CDM_conversion/hourly/qff/test")
extension = 'qff'
#my_file = open("D:/Python_CDM_conversion/hourly/qff/ls1.txt", "r")
#all_filenames = my_file.readlines()
#print(all_filenames)
##use alist of file name sto run 5000 parallel
#with open("D:/Python_CDM_conversion/hourly/qff/ls.txt", "r") as f:
# all_filenames = f.read().splitlines()
all_filenames = [i for i in glob.glob('*.{}'.format(extension))]
##to start at begining of files
for filename in all_filenames:
##to start at next file after last processe
#for filename in all_filenames[all_filenames.index('SWM00002338.qff'):] :
df=pd.read_csv(filename, sep="|")
##set up master df to extrcat each variable
df["report_id"]=""
df["observation_id"]=""
df["data_policy_licence"]=""
df["date_time_meaning"]="1"
df["observation_duration"]="0"
df["latitude"]=df["Latitude"]
df["longitude"]=df["Longitude"]
df["crs"]=""
df["z_coordinate"]=""
df["z_coordinate_type"]=""
df["observation_height_above_station_surface"]=""
df["observed_variable"]=""
df["secondary_variable"]=""
df["observation_value"]=""
df["value_significance"]="12"
df["secondary_value"]=""
df["units"]=""
df["code_table"]=""
df["conversion_flag"]=""
df["location_method"]=""
df["location_precision"]=""
df["z_coordinate_method"]=""
df["bbox_min_longitude"]=""
df["bbox_max_longitude"]=""
df["bbox_min_latitude"]=""
df["bbox_max_latitude"]=""
df["spatial_representativeness"]=""
df["original_code_table"]=""
df["quality_flag"]=""
df["numerical_precision"]=""
df["sensor_id"]=""
df["sensor_automation_status"]=""
df["exposure_of_sensor"]=""
df["original_precision"]=""
df["original_units"]=""
df["original_code_table"]=""
df["original_value"]=""
df["conversion_method"]=""
df["processing_code"]=""
df["processing_level"]="0"
df["adjustment_id"]=""
df["traceability"]=""
df["advanced_qc"]=""
df["advanced_uncertainty"]=""
df["advanced_homogenisation"]=""
df["advanced_assimilation_feedback"]=""
df["source_id"]=""
df["source_record_id"]=""
df["primary_station_id"]=df["Station_ID"]
df["Timestamp2"] = df["Year"].map(str) + "-" + df["Month"].map(str)+ "-" + df["Day"].map(str)
df["Seconds"]="00"
df["offset"]="+00"
df["date_time"] = df["Timestamp2"].map(str)+ " " + df["Hour"].map(str)+":"+df["Minute"].map(str)+":"+df["Seconds"].map(str)
df['date_time'] = pd.to_datetime(df['date_time'], format='%Y/%m/%d' " ""%H:%M")
df['date_time'] = df['date_time'].astype('str')
df.date_time = df.date_time + '+00'
#=========================================================================================
##convert temperature changes for each variable
dft = df[["observation_id","report_id","data_policy_licence","date_time",
"date_time_meaning","observation_duration","longitude","latitude",
"crs","z_coordinate","z_coordinate_type","observation_height_above_station_surface",
"observed_variable","secondary_variable","observation_value",
"value_significance","secondary_value","units","code_table",
"conversion_flag","location_method","location_precision",
"z_coordinate_method","bbox_min_longitude","bbox_max_longitude",
"bbox_min_latitude","bbox_max_latitude","spatial_representativeness",
"quality_flag","numerical_precision","sensor_id","sensor_automation_status",
"exposure_of_sensor","original_precision","original_units",
"original_code_table","original_value","conversion_method",
"processing_code","processing_level","adjustment_id","traceability",
"advanced_qc","advanced_uncertainty","advanced_homogenisation",
"advanced_assimilation_feedback","source_id","primary_station_id"]]
##change for each variable to convert to cdm compliant values
dft["observation_value"]=df["temperature"]+273.15
dft["source_id"]=df["temperature_Source_Code"]
dft["Seconds"]="00"
dft["quality_flag"]=df["temperature_QC_flag"]
dft["qc_method"]=dft["quality_flag"]
dft["conversion_flag"]="0"
dft["conversion_method"]="1"
dft["numerical_precision"]="0.01"
dft["original_precision"]="0.1"
dft["original_units"]="60"
dft["original_value"]=df["temperature"]
dft["observation_height_above_station_surface"]="2"
dft["units"]="5"
dft["observed_variable"]="85"
##set quality flag from df master for variable and fill all nan with Null then change all nonnan to
dft.loc[dft['quality_flag'].notnull(), "quality_flag"] = 1
dft = dft.fillna("Null")
dft.quality_flag[dft.quality_flag == "Null"] = 0
#change for each variable if required
##remove unwanted mising data rows
dft = dft.fillna("null")
dft = dft.replace({"null":"-99999"})
dft = dft[dft.observation_value != -99999]
#df = df.astype(str)
dft["source_id"] = pd.to_numeric(dft["source_id"],errors='coerce')
#df = df.astype(str)
#concatenate columns for joining df for next step
dft['source_id'] = dft['source_id'].astype(str).apply(lambda x: x.replace('.0',''))
dft['primary_station_id_2']=dft['primary_station_id'].astype(str)+'-'+dft['source_id'].astype(str)
dft["observation_value"] = pd.to_numeric(dft["observation_value"],errors='coerce')
#dft.to_csv("ttest.csv", index=False, sep=",")
###add data policy and record number to df
df2=pd.read_csv("D:/Python_CDM_conversion/new recipe tables/record_id.csv")
dft = dft.astype(str)
df2 = df2.astype(str)
dft= df2.merge(dft, on=['primary_station_id_2'])
dft['data_policy_licence'] = dft['data_policy_licence_x']
dft['data_policy_licence'] = dft['data_policy_licence'].astype(str).apply(lambda x: x.replace('.0',''))
dft['observation_id']=dft['primary_station_id'].astype(str)+'-'+dft['record_number'].astype(str)+'-'+dft['date_time'].astype(str)
dft['observation_id'] = dft['observation_id'].str.replace(r' ', '-')
##remove unwanted last twpo characters
dft['observation_id'] = dft['observation_id'].str[:-6]
dft["observation_id"]=dft["observation_id"]+'-'+dft['observed_variable'].astype(str)+'-'+dft['value_significance'].astype(str)
dft["report_id"]=dft["observation_id"].str[:-6]
##set up qc table
dft = dft[["observation_id","report_id","data_policy_licence","date_time",
"date_time_meaning","observation_duration","longitude","latitude",
"crs","z_coordinate","z_coordinate_type","observation_height_above_station_surface",
"observed_variable","secondary_variable","observation_value",
"value_significance","secondary_value","units","code_table",
"conversion_flag","location_method","location_precision",
"z_coordinate_method","bbox_min_longitude","bbox_max_longitude",
"bbox_min_latitude","bbox_max_latitude","spatial_representativeness",
"quality_flag","numerical_precision","sensor_id","sensor_automation_status",
"exposure_of_sensor","original_precision","original_units",
"original_code_table","original_value","conversion_method",
"processing_code","processing_level","adjustment_id","traceability",
"advanced_qc","advanced_uncertainty","advanced_homogenisation",
"advanced_assimilation_feedback","source_id"]]
df.dropna(subset = ["observation_value"], inplace=True)
dft['source_id'] = dft['source_id'].astype(str).apply(lambda x: x.replace('.0',''))
dft['data_policy_licence'] = dft['data_policy_licence'].astype(str).apply(lambda x: x.replace('.0',''))
dft["source_id"] = pd.to_numeric(dft["source_id"],errors='coerce')
dft["observation_value"] = pd.to_numeric(dft["observation_value"],errors='coerce')
dft["observation_value"]= dft["observation_value"].round(2)
#dft.to_csv("isuest.csv", index=False, sep=",")
#=================================================================================
##convert dew point temperature changes for each variable
dfdpt= df[["observation_id","report_id","data_policy_licence","date_time",
"date_time_meaning","observation_duration","longitude","latitude",
"crs","z_coordinate","z_coordinate_type","observation_height_above_station_surface",
"observed_variable","secondary_variable","observation_value",
"value_significance","secondary_value","units","code_table",
"conversion_flag","location_method","location_precision",
"z_coordinate_method","bbox_min_longitude","bbox_max_longitude",
"bbox_min_latitude","bbox_max_latitude","spatial_representativeness",
"quality_flag","numerical_precision","sensor_id","sensor_automation_status",
"exposure_of_sensor","original_precision","original_units",
"original_code_table","original_value","conversion_method",
"processing_code","processing_level","adjustment_id","traceability",
"advanced_qc","advanced_uncertainty","advanced_homogenisation",
"advanced_assimilation_feedback","source_id","primary_station_id"]]
##change for each variable to convert to cdm compliant values
dfdpt["observation_value"]=df["dew_point_temperature"]+273.15
dfdpt["source_id"]=df["dew_point_temperature_Source_Code"]
dfdpt["Seconds"]="00"
dfdpt["quality_flag"]=df["dew_point_temperature_QC_flag"]
dfdpt["conversion_flag"]="0"
dfdpt["conversion_method"]="1"
dfdpt["numerical_precision"]="0.01"
dfdpt["original_precision"]="0.1"
dfdpt["original_units"]="60"
dfdpt["original_value"]=df["dew_point_temperature"]
dfdpt["observation_height_above_station_surface"]="2"
dfdpt["units"]="5"
dfdpt["observed_variable"]="36"
##set quality flag from df master for variable and fill all nan with Null then change all nonnan to
dfdpt.loc[dfdpt['quality_flag'].notnull(), "quality_flag"] = 1
dfdpt= dfdpt.fillna("Null")
dfdpt.quality_flag[dfdpt.quality_flag == "Null"] = 0
##remove unwanted mising data rows
dfdpt= dfdpt.fillna("null")
dfdpt= dfdpt.replace({"null":"-99999"})
dfdpt= dfdpt[dfdpt.observation_value != -99999]
#df = df.astype(str)
dfdpt["source_id"] = pd.to_numeric(dfdpt["source_id"],errors='coerce')
#df = df.astype(str)
#concatenate columns for joining df for next step
dfdpt['source_id'] = dfdpt['source_id'].astype(str).apply(lambda x: x.replace('.0',''))
dfdpt['primary_station_id_2']=dfdpt['primary_station_id'].astype(str)+'-'+dfdpt['source_id'].astype(str)
dfdpt["observation_value"] = pd.to_numeric(dfdpt["observation_value"],errors='coerce')
#dfdpt.to_csv("ttest.csv", index=False, sep=",")
###add data policy and record number to df
df2=pd.read_csv("D:/Python_CDM_conversion/new recipe tables/record_id.csv")
dfdpt= dfdpt.astype(str)
df2 = df2.astype(str)
dfdpt= df2.merge(dfdpt, on=['primary_station_id_2'])
dfdpt['data_policy_licence'] = dfdpt['data_policy_licence_x']
dfdpt['data_policy_licence'] = dfdpt['data_policy_licence'].astype(str).apply(lambda x: x.replace('.0',''))
dfdpt['observation_id']=dfdpt['primary_station_id'].astype(str)+'-'+dfdpt['record_number'].astype(str)+'-'+dfdpt['date_time'].astype(str)
dfdpt['observation_id'] = dfdpt['observation_id'].str.replace(r' ', '-')
##remove unwanted last twpo characters
dfdpt['observation_id'] = dfdpt['observation_id'].str[:-6]
dfdpt["observation_id"]=dfdpt["observation_id"]+'-'+dfdpt['observed_variable'].astype(str)+'-'+dfdpt['value_significance'].astype(str)
dfdpt["report_id"]=dfdpt["observation_id"].str[:-6]
##set up qc table
dfdpt= dfdpt[["observation_id","report_id","data_policy_licence","date_time",
"date_time_meaning","observation_duration","longitude","latitude",
"crs","z_coordinate","z_coordinate_type","observation_height_above_station_surface",
"observed_variable","secondary_variable","observation_value",
"value_significance","secondary_value","units","code_table",
"conversion_flag","location_method","location_precision",
"z_coordinate_method","bbox_min_longitude","bbox_max_longitude",
"bbox_min_latitude","bbox_max_latitude","spatial_representativeness",
"quality_flag","numerical_precision","sensor_id","sensor_automation_status",
"exposure_of_sensor","original_precision","original_units",
"original_code_table","original_value","conversion_method",
"processing_code","processing_level","adjustment_id","traceability",
"advanced_qc","advanced_uncertainty","advanced_homogenisation",
"advanced_assimilation_feedback","source_id"]]
dfdpt.dropna(subset = ["observation_value"], inplace=True)
dfdpt['source_id'] = dfdpt['source_id'].astype(str).apply(lambda x: x.replace('.0',''))
dfdpt['data_policy_licence'] = dfdpt['data_policy_licence'].astype(str).apply(lambda x: x.replace('.0',''))
dfdpt["source_id"] = pd.to_numeric(dfdpt["source_id"],errors='coerce')
dfdpt["observation_value"] = pd.to_numeric(dfdpt["observation_value"],errors='coerce')
dfdpt["observation_value"]= dfdpt["observation_value"].round(2)
#====================================================================================
#convert station level to cdmlite
dfslp = df[["observation_id","report_id","data_policy_licence","date_time",
"date_time_meaning","observation_duration","longitude","latitude",
"crs","z_coordinate","z_coordinate_type","observation_height_above_station_surface",
"observed_variable","secondary_variable","observation_value",
"value_significance","secondary_value","units","code_table",
"conversion_flag","location_method","location_precision",
"z_coordinate_method","bbox_min_longitude","bbox_max_longitude",
"bbox_min_latitude","bbox_max_latitude","spatial_representativeness",
"quality_flag","numerical_precision","sensor_id","sensor_automation_status",
"exposure_of_sensor","original_precision","original_units",
"original_code_table","original_value","conversion_method",
"processing_code","processing_level","adjustment_id","traceability",
"advanced_qc","advanced_uncertainty","advanced_homogenisation",
"advanced_assimilation_feedback","source_id","primary_station_id"]]
##change for each variable to convert to cdm compliant values
dfslp["observation_value"]=df["station_level_pressure"]
dfslp["source_id"]=df["station_level_pressure_Source_Code"]
dfslp["Seconds"]="00"
dfslp["quality_flag"]=df["station_level_pressure_QC_flag"]
dfslp["conversion_flag"]="0"
dfslp["conversion_method"]="7"
dfslp["numerical_precision"]="10"
dfslp["original_precision"]="0.1"
dfslp["original_units"]="530"
dfslp["original_value"]=df["station_level_pressure"]
dfslp["observation_height_above_station_surface"]="2"
dfslp["units"]="32"
dfslp["observed_variable"]="57"
##set quality flag from df master for variable and fill all nan with Null then change all nonnan to
dfslp.loc[dfslp['quality_flag'].notnull(), "quality_flag"] = 1
dfslp = dfslp.fillna("Null")
dfslp.quality_flag[dfslp.quality_flag == "Null"] = 0
#change for each variable if required
##remove unwanted mising data rows
dfslp = dfslp.fillna("null")
dfslp = dfslp.replace({"null":"-99999"})
dfslp = dfslp[dfslp.observation_value != -99999]
#df = df.astype(str)
dfslp["source_id"] = pd.to_numeric(dfslp["source_id"],errors='coerce')
#df = df.astype(str)
#concatenate columns for joining df for next step
dfslp['source_id'] = dfslp['source_id'].astype(str).apply(lambda x: x.replace('.0',''))
dfslp['primary_station_id_2']=dfslp['primary_station_id'].astype(str)+'-'+dfslp['source_id'].astype(str)
dfslp["observation_value"] = pd.to_numeric(dfslp["observation_value"],errors='coerce')
#dfslp.to_csv("ttest.csv", index=False, sep=",")
###add data policy and record number to df
df2=pd.read_csv("D:/Python_CDM_conversion/new recipe tables/record_id.csv")
dfslp = dfslp.astype(str)
df2 = df2.astype(str)
dfslp= df2.merge(dfslp, on=['primary_station_id_2'])
dfslp['data_policy_licence'] = dfslp['data_policy_licence_x']
dfslp['data_policy_licence'] = dfslp['data_policy_licence'].astype(str).apply(lambda x: x.replace('.0',''))
dfslp['observation_id']=dfslp['primary_station_id'].astype(str)+'-'+dfslp['record_number'].astype(str)+'-'+dfslp['date_time'].astype(str)
dfslp['observation_id'] = dfslp['observation_id'].str.replace(r' ', '-')
##remove unwanted last twpo characters
dfslp['observation_id'] = dfslp['observation_id'].str[:-6]
dfslp["observation_id"]=dfslp["observation_id"]+'-'+dfslp['observed_variable'].astype(str)+'-'+dfslp['value_significance'].astype(str)
dfslp["report_id"]=dfslp["observation_id"].str[:-6]
##set up qc table
dfslp = dfslp[["observation_id","report_id","data_policy_licence","date_time",
"date_time_meaning","observation_duration","longitude","latitude",
"crs","z_coordinate","z_coordinate_type","observation_height_above_station_surface",
"observed_variable","secondary_variable","observation_value",
"value_significance","secondary_value","units","code_table",
"conversion_flag","location_method","location_precision",
"z_coordinate_method","bbox_min_longitude","bbox_max_longitude",
"bbox_min_latitude","bbox_max_latitude","spatial_representativeness",
"quality_flag","numerical_precision","sensor_id","sensor_automation_status",
"exposure_of_sensor","original_precision","original_units",
"original_code_table","original_value","conversion_method",
"processing_code","processing_level","adjustment_id","traceability",
"advanced_qc","advanced_uncertainty","advanced_homogenisation",
"advanced_assimilation_feedback","source_id"]]
##make sure no decimal places an dround value to reuqred decimal places
dfslp['observation_value'] = dfslp['observation_value'].map(float)
dfslp['observation_value'] = (dfslp['observation_value']*100)
dfslp['observation_value'] = dfslp['observation_value'].map(int)
dfslp['source_id'] = dfslp['source_id'].astype(str).apply(lambda x: x.replace('.0',''))
dfslp['data_policy_licence'] = dfslp['data_policy_licence'].astype(str).apply(lambda x: x.replace('.0',''))
dfslp["source_id"] = pd.to_numeric(dfslp["source_id"],errors='coerce')
dfslp['observation_value'] = dfslp['observation_value'].astype(str).apply(lambda x: x.replace('.0',''))
#dfslp.to_csv("slp.csv", index=False, sep=",")
#===========================================================================================
#convert sea level presure to cdmlite
dfmslp = df[["observation_id","report_id","data_policy_licence","date_time",
"date_time_meaning","observation_duration","longitude","latitude",
"crs","z_coordinate","z_coordinate_type","observation_height_above_station_surface",
"observed_variable","secondary_variable","observation_value",
"value_significance","secondary_value","units","code_table",
"conversion_flag","location_method","location_precision",
"z_coordinate_method","bbox_min_longitude","bbox_max_longitude",
"bbox_min_latitude","bbox_max_latitude","spatial_representativeness",
"quality_flag","numerical_precision","sensor_id","sensor_automation_status",
"exposure_of_sensor","original_precision","original_units",
"original_code_table","original_value","conversion_method",
"processing_code","processing_level","adjustment_id","traceability",
"advanced_qc","advanced_uncertainty","advanced_homogenisation",
"advanced_assimilation_feedback","source_id","primary_station_id"]]
##change for each variable to convert to cdm compliant values
dfmslp["observation_value"]=df["sea_level_pressure"]
dfmslp["source_id"]=df["sea_level_pressure_Source_Code"]
dfmslp["Seconds"]="00"
dfmslp["quality_flag"]=df["sea_level_pressure_QC_flag"]
dfmslp["conversion_flag"]="0"
dfmslp["conversion_method"]="7"
dfmslp["numerical_precision"]="10"
dfmslp["original_precision"]="0.1"
dfmslp["original_units"]="530"
dfmslp["original_value"]=df["temperature"]
dfmslp["observation_height_above_station_surface"]="2"
dfmslp["units"]="32"
dfmslp["observed_variable"]="58"
##set quality flag from df master for variable and fill all nan with Null then change all nonnan to
dfmslp.loc[dfmslp['quality_flag'].notnull(), "quality_flag"] = 1
dfmslp = dfmslp.fillna("null")
dfmslp.quality_flag[dfmslp.quality_flag == "Null"] = 0
#change for each variable if required
##remove unwanted mising data rows
dfmslp = dfmslp.fillna("null")
dfmslp = dfmslp.replace({"null":"-99999"})
dfmslp = dfmslp[dfmslp.observation_value != -99999]
#df = df.astype(str)
dfmslp["source_id"] =
|
pd.to_numeric(dfmslp["source_id"],errors='coerce')
|
pandas.to_numeric
|
import numpy as np
import pandas as pd
from numpy import nan
from pvlib import modelchain, pvsystem
from pvlib.modelchain import ModelChain
from pvlib.pvsystem import PVSystem
from pvlib.tracking import SingleAxisTracker
from pvlib.location import Location
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pytest
from test_pvsystem import sam_data
from conftest import requires_scipy
@pytest.fixture
def system(sam_data):
modules = sam_data['sandiamod']
module_parameters = modules['Canadian_Solar_CS5P_220M___2009_'].copy()
inverters = sam_data['cecinverter']
inverter = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'].copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module_parameters=module_parameters,
inverter_parameters=inverter)
return system
@pytest.fixture
def cec_dc_snl_ac_system(sam_data):
modules = sam_data['cecmod']
module_parameters = modules['Canadian_Solar_CS5P_220M'].copy()
module_parameters['b'] = 0.05
module_parameters['EgRef'] = 1.121
module_parameters['dEgdT'] = -0.0002677
inverters = sam_data['cecinverter']
inverter = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'].copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module_parameters=module_parameters,
inverter_parameters=inverter)
return system
@pytest.fixture
def cec_dc_adr_ac_system(sam_data):
modules = sam_data['cecmod']
module_parameters = modules['Canadian_Solar_CS5P_220M'].copy()
module_parameters['b'] = 0.05
module_parameters['EgRef'] = 1.121
module_parameters['dEgdT'] = -0.0002677
inverters = sam_data['adrinverter']
inverter = inverters['Zigor__Sunzet_3_TL_US_240V__CEC_2011_'].copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module_parameters=module_parameters,
inverter_parameters=inverter)
return system
@pytest.fixture
def pvwatts_dc_snl_ac_system(sam_data):
module_parameters = {'pdc0': 220, 'gamma_pdc': -0.003}
inverters = sam_data['cecinverter']
inverter = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'].copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module_parameters=module_parameters,
inverter_parameters=inverter)
return system
@pytest.fixture
def pvwatts_dc_pvwatts_ac_system(sam_data):
module_parameters = {'pdc0': 220, 'gamma_pdc': -0.003}
inverter_parameters = {'eta_inv_nom': 0.95}
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module_parameters=module_parameters,
inverter_parameters=inverter_parameters)
return system
@pytest.fixture()
def location():
return Location(32.2, -111, altitude=700)
def test_ModelChain_creation(system, location):
mc = ModelChain(system, location)
@pytest.mark.parametrize('strategy, expected', [
(None, (32.2, 180)), ('None', (32.2, 180)), ('flat', (0, 180)),
('south_at_latitude_tilt', (32.2, 180))
])
def test_orientation_strategy(strategy, expected, system, location):
mc = ModelChain(system, location, orientation_strategy=strategy)
# the || accounts for the coercion of 'None' to None
assert (mc.orientation_strategy == strategy or
mc.orientation_strategy is None)
assert system.surface_tilt == expected[0]
assert system.surface_azimuth == expected[1]
@requires_scipy
def test_run_model(system, location):
mc = ModelChain(system, location)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
ac = mc.run_model(times).ac
expected = pd.Series(np.array([ 183.522449305, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected, check_less_precise=2)
def test_run_model_with_irradiance(system, location):
mc = ModelChain(system, location)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
irradiance = pd.DataFrame({'dni':900, 'ghi':600, 'dhi':150},
index=times)
ac = mc.run_model(times, weather=irradiance).ac
expected = pd.Series(np.array([ 1.90054749e+02, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
def test_run_model_perez(system, location):
mc = ModelChain(system, location, transposition_model='perez')
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
irradiance = pd.DataFrame({'dni':900, 'ghi':600, 'dhi':150},
index=times)
ac = mc.run_model(times, weather=irradiance).ac
expected = pd.Series(np.array([ 190.194545796, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
def test_run_model_gueymard_perez(system, location):
mc = ModelChain(system, location, airmass_model='gueymard1993',
transposition_model='perez')
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
irradiance = pd.DataFrame({'dni':900, 'ghi':600, 'dhi':150},
index=times)
ac = mc.run_model(times, weather=irradiance).ac
expected = pd.Series(np.array([ 190.194760203, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
@requires_scipy
def test_run_model_with_weather(system, location):
mc = ModelChain(system, location)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
weather = pd.DataFrame({'wind_speed':5, 'temp_air':10}, index=times)
ac = mc.run_model(times, weather=weather).ac
expected = pd.Series(np.array([ 201.691634921, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected, check_less_precise=2)
@requires_scipy
def test_run_model_tracker(system, location):
system = SingleAxisTracker(module_parameters=system.module_parameters,
inverter_parameters=system.inverter_parameters)
mc = ModelChain(system, location)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
ac = mc.run_model(times).ac
expected = pd.Series(np.array([119.067713606, nan]),
index=times)
assert_series_equal(ac, expected, check_less_precise=2)
expect = pd.DataFrame(np.
array([[ 54.82513187, 90. , 11.0039221 , 11.0039221 ],
[ nan, 0. , 0. , nan]]),
columns=['aoi', 'surface_azimuth', 'surface_tilt', 'tracker_theta'],
index=times)
expect = expect[['tracker_theta', 'aoi', 'surface_azimuth', 'surface_tilt']]
assert_frame_equal(mc.tracking, expect, check_less_precise=2)
def poadc(mc):
mc.dc = mc.total_irrad['poa_global'] * 0.2
mc.dc.name = None # assert_series_equal will fail without this
@requires_scipy
@pytest.mark.parametrize('dc_model, expected', [
('sapm', [181.604438144, -2.00000000e-02]),
('singlediode', [181.044109596, -2.00000000e-02]),
('pvwatts', [190.028186986, 0]),
(poadc, [189.183065667, 0]) # user supplied function
])
def test_dc_models(system, cec_dc_snl_ac_system, pvwatts_dc_pvwatts_ac_system,
location, dc_model, expected):
dc_systems = {'sapm': system, 'singlediode': cec_dc_snl_ac_system,
'pvwatts': pvwatts_dc_pvwatts_ac_system,
poadc: pvwatts_dc_pvwatts_ac_system}
system = dc_systems[dc_model]
mc = ModelChain(system, location, dc_model=dc_model,
aoi_model='no_loss', spectral_model='no_loss')
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
ac = mc.run_model(times).ac
expected = pd.Series(np.array(expected), index=times)
assert_series_equal(ac, expected, check_less_precise=2)
@requires_scipy
@pytest.mark.parametrize('dc_model', ['sapm', 'singlediode', 'pvwatts_dc'])
def test_infer_dc_model(system, cec_dc_snl_ac_system,
pvwatts_dc_pvwatts_ac_system, location, dc_model,
mocker):
dc_systems = {'sapm': system, 'singlediode': cec_dc_snl_ac_system,
'pvwatts_dc': pvwatts_dc_pvwatts_ac_system}
system = dc_systems[dc_model]
m = mocker.spy(system, dc_model)
mc = ModelChain(system, location,
aoi_model='no_loss', spectral_model='no_loss')
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
mc.run_model(times)
assert m.call_count == 1
assert isinstance(mc.dc, (pd.Series, pd.DataFrame))
def acdc(mc):
mc.ac = mc.dc
@requires_scipy
@pytest.mark.parametrize('ac_model, expected', [
('snlinverter', [181.604438144, -2.00000000e-02]),
('adrinverter', [np.nan, -25.00000000e-02]),
('pvwatts', [190.028186986, 0]),
(acdc, [199.845296258, 0]) # user supplied function
])
def test_ac_models(system, cec_dc_adr_ac_system, pvwatts_dc_pvwatts_ac_system,
location, ac_model, expected):
ac_systems = {'snlinverter': system, 'adrinverter': cec_dc_adr_ac_system,
'pvwatts': pvwatts_dc_pvwatts_ac_system,
acdc: pvwatts_dc_pvwatts_ac_system}
system = ac_systems[ac_model]
mc = ModelChain(system, location, ac_model=ac_model,
aoi_model='no_loss', spectral_model='no_loss')
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
ac = mc.run_model(times).ac
expected = pd.Series(np.array(expected), index=times)
assert_series_equal(ac, expected, check_less_precise=2)
def constant_aoi_loss(mc):
mc.aoi_modifier = 0.9
@requires_scipy
@pytest.mark.parametrize('aoi_model, expected', [
('sapm', [182.784057666, -2.00000000e-02]),
('ashrae', [180.825930547, -2.00000000e-02]),
('physical', [181.453077805, -2.00000000e-02]),
('no_loss', [181.604438144, -2.00000000e-02]),
(constant_aoi_loss, [164.997043305, -2e-2])
])
def test_aoi_models(system, location, aoi_model, expected):
mc = ModelChain(system, location, dc_model='sapm',
aoi_model=aoi_model, spectral_model='no_loss')
times =
|
pd.date_range('20160101 1200-0700', periods=2, freq='6H')
|
pandas.date_range
|
"""
for f in sorted(glob('*.py')):
# print(f'nohup python -u {f} 0 > LOG/log_{f}.txt &')
print(f'python -u {f} > LOG/log_{f}.txt')
"""
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import numpy as np
from glob import glob
import os
from socket import gethostname
HOSTNAME = gethostname()
from tqdm import tqdm
#from itertools import combinations
from sklearn.model_selection import KFold
from time import time, sleep
from datetime import datetime
from multiprocessing import cpu_count, Pool
import gc
# =============================================================================
# global variables
# =============================================================================
COMPETITION_NAME = 'santander-customer-transaction-prediction'
IMP_FILE = 'LOG/xxx.csv'
IMP_FILE_BEST = 'LOG/xxx.csv'
SUB_BEST = '../output/0328-1.csv.gz'
# =============================================================================
# def
# =============================================================================
def start(fname):
global st_time
st_time = time()
print("""
#==============================================================================
# START!!! {} PID: {} time: {}
#==============================================================================
""".format( fname, os.getpid(), datetime.today() ))
send_line(f'{HOSTNAME} START {fname} time: {elapsed_minute():.2f}min')
return
def reset_time():
global st_time
st_time = time()
return
def end(fname):
print("""
#==============================================================================
# SUCCESS !!! {}
#==============================================================================
""".format(fname))
print('time: {:.2f}min'.format( elapsed_minute() ))
send_line(f'{HOSTNAME} FINISH {fname} time: {elapsed_minute():.2f}min')
return
def elapsed_minute():
return (time() - st_time)/60
def mkdir_p(path):
try:
os.stat(path)
except:
os.mkdir(path)
def to_feature(df, path):
if df.columns.duplicated().sum()>0:
raise Exception(f'duplicated!: { df.columns[df.columns.duplicated()] }')
df.reset_index(inplace=True, drop=True)
df.columns = [c.replace('/', '-').replace(' ', '-') for c in df.columns]
for c in df.columns:
df[[c]].to_feather(f'{path}_{c}.f')
return
def to_pickles(df, path, split_size=3, inplace=True):
"""
path = '../output/mydf'
wirte '../output/mydf/0.p'
'../output/mydf/1.p'
'../output/mydf/2.p'
"""
print(f'shape: {df.shape}')
if inplace==True:
df.reset_index(drop=True, inplace=True)
else:
df = df.reset_index(drop=True)
gc.collect()
mkdir_p(path)
kf = KFold(n_splits=split_size)
for i, (train_index, val_index) in enumerate(tqdm(kf.split(df))):
df.iloc[val_index].to_pickle(f'{path}/{i:03d}.p')
return
def read_pickles(path, col=None, use_tqdm=True):
if col is None:
if use_tqdm:
df = pd.concat([ pd.read_pickle(f) for f in tqdm(sorted(glob(path+'/*'))) ])
else:
print(f'reading {path}')
df = pd.concat([ pd.read_pickle(f) for f in sorted(glob(path+'/*')) ])
else:
df = pd.concat([ pd.read_pickle(f)[col] for f in tqdm(sorted(glob(path+'/*'))) ])
return df
def to_pkl_gzip(df, path):
df.to_pickle(path)
os.system('rm ' + path + '.gz')
os.system('gzip ' + path)
return
def save_test_features(df):
for c in df.columns:
df[[c]].to_pickle(f'../feature/test_{c}.pkl')
return
# =============================================================================
#
# =============================================================================
def get_dummies(df):
"""
binary would be drop_first
"""
col = df.select_dtypes('O').columns.tolist()
nunique = df[col].nunique()
col_binary = nunique[nunique==2].index.tolist()
[col.remove(c) for c in col_binary]
df = pd.get_dummies(df, columns=col)
df =
|
pd.get_dummies(df, columns=col_binary, drop_first=True)
|
pandas.get_dummies
|
import slack
from flask import Response
import pandas as pd
import numpy as np
from numpy import nan
import re
import os
import networkx as nx
from pyvis.network import Network
from dotenv import load_dotenv, dotenv_values
from statsmodels.tsa.arima.model import ARIMA
# load environment variables
# config = dotenv_values(".env")
load_dotenv()
SLACK_TOKEN = os.getenv('SLACK_TOKEN')
# define slack client
client = slack.WebClient(token=SLACK_TOKEN)
# function to retrieve the display name of the user based on user id
def get_name(user_id):
try:
out = client.users_info(user=user_id)["user"]["profile"]["real_name"]
except:
out = None
return out
# function to get the channels that a user is active in
def get_user_channels(user_id):
return client.users_conversations(user=user_id)["channels"]
# send response message to user
def send_response_message(user_id):
# define message to be posted
message = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': (
":sparkles: Hey, check out our latest analysis of your team here: <https://network-analysis.azurewebsites.net|LINK> :sparkles:"
)
}
}
client.chat_postMessage(channel=user_id, blocks=[message])
# function used to retrieve network analysis data for a specific channel
def get_slack_data(user_id, text):
# define channel id
try:
channel_id = [channel["id"] for channel in get_user_channels(user_id) if channel["name"] == text][0]
except:
channel_id = "C01T6GGTBQD"
# get channel history
result = client.conversations_history(channel=channel_id, limit=1000)
# retrieve messages
conversation_history = result["messages"]
# create DataFrame
messages = pd.DataFrame(conversation_history)
# add channel id to df
messages["user_id"] = str(user_id)
# convert timestamp to datetime object
messages['date'] =
|
pd.to_datetime(messages['ts'], unit="s")
|
pandas.to_datetime
|
import numpy as np
import pandas as pd
from tqdm import tqdm
import datetime as dt
from collections import defaultdict
from dateutil.relativedelta import relativedelta
def collect_dates_for_cohort(df_pop, control_reservoir, control_dates, col_names=None):
'''
Fill 'control_used' dictionary with the dates (specified in 'control_dates') of each person
(represented by their CPF) regarding the main events considered in the analysis.
Args:
df_pop:
pandas.DataFrame.
control_reservoir:
collections.defaultdict.
control_used:
collections.defaultdict.
control_dates:
collections.defaultdict.
col_names:
dictionary.
Return:
None.
'''
if col_names is None:
col_names = {
"D1": "data D1(VACINADOS)",
"D2": "data D2(VACINADOS)",
"OBITO COVID": "data_obito(OBITO COVID)",
"OBITO GERAL": "data falecimento(CARTORIOS)",
"HOSPITALIZACAO COVID": "DATA HOSPITALIZACAO",
}
for j in tqdm(range(df_pop.shape[0])):
cpf = df_pop["CPF"].iat[j]
sex, age = df_pop["SEXO"].iat[j], df_pop["IDADE"].iat[j]
# Different outcomes' dates
dt_d1 = df_pop[col_names["D1"]].iat[j]
dt_d2 = df_pop[col_names["D2"]].iat[j]
dt_death = df_pop[col_names["OBITO COVID"]].iat[j]
dt_death_general = df_pop[col_names["OBITO GERAL"]].iat[j]
dt_hosp_covid = df_pop[col_names["HOSPITALIZACAO COVID"]].iat[j]
control_reservoir[(age,sex)].append(cpf)
if pd.notna(dt_d1):
control_dates["D1"][cpf] = dt_d1
if pd.notna(dt_d2):
control_dates["D2"][cpf] = dt_d2
if pd.notna(dt_death):
control_dates["DEATH COVID"][cpf] = dt_death
if pd.notna(dt_death_general):
control_dates["DEATH GENERAL"][cpf] = dt_death_general
if pd.notna(dt_hosp_covid):
control_dates["HOSPITALIZATION COVID"][cpf] = dt_hosp_covid
def rearrange_controls(control_reservoir, seed):
'''
Shuffle the order of the controls in the structure containing all
control candidates.
Args:
control_reservoir:
collections.defaultdict.
seed:
Integer.
Return:
None.
'''
np.random.seed(seed)
for key in control_reservoir.keys():
np.random.shuffle(control_reservoir[key])
def perform_matching(datelst, df_vac, control_reservoir, control_used, control_dates, col_names):
'''
Description.
Args:
datelst:
List of datetime.date.
df_vac:
pandas.DataFrame.
control_reservoir:
collections.defaultdict.
control_used:
collections.defaultdict.
control_dates:
collections.defaultdict.
col_names:
dictionary.
Return:
pareados:
pandas.DataFrame.
matched:
dictionary.
'''
if col_names is None:
col_names = {
"D1": "data D1(VACINADOS)",
"D2": "data D2(VACINADOS)",
"OBITO COVID": "data_obito(OBITO COVID)",
"OBITO GERAL": "data falecimento(CARTORIOS)"
}
matchings = defaultdict(lambda:-1)
matched = defaultdict(lambda:False)
for current_date in tqdm(datelst):
# Select all people who was vaccinated at the current date
df_vac["compare_date"] = df_vac[col_names["D1"]].apply(lambda x: True if x==current_date else False)
current_vaccinated = df_vac[df_vac["compare_date"]==True]
cpf_list = current_vaccinated["CPF"].tolist()
age_list = current_vaccinated["IDADE"].tolist()
sex_list = current_vaccinated["SEXO"].tolist()
# For each person vaccinated at the current date, check if there is a control for he/she.
for j in range(0, len(cpf_list)):
pair = find_pair(current_date, age_list[j], sex_list[j], control_reservoir, control_used, control_dates)
if pair!=-1:
matchings[cpf_list[j]] = pair
items_matching = matchings.items()
pareados = pd.DataFrame({"CPF CASO": [ x[0] for x in items_matching ], "CPF CONTROLE": [ x[1] for x in items_matching ]})
for cpf in [ x[0] for x in items_matching ]+[ x[1] for x in items_matching ]:
matched[cpf]=True
return pareados, matched
def get_events(df_pop, pareados, matched, col_names):
'''
Description.
Args:
df_pop:
pareados:
matched:
col_names:
Return:
datas:
pandas.DataFrame.
'''
if col_names is None:
col_names = {
"D1": "data D1(VACINADOS)",
"D2": "data D2(VACINADOS)",
"OBITO COVID": "data_obito(OBITO COVID)",
"OBITO GERAL": "data falecimento(CARTORIOS)"
}
data_obito = defaultdict(lambda:np.nan)
data_obito_geral = defaultdict(lambda:np.nan)
data_d1 = defaultdict(lambda:np.nan)
data_d2 = defaultdict(lambda:np.nan)
for j in range(df_pop.shape[0]):
cpf = df_pop["CPF"].iat[j]
d1_dt = df_pop[col_names["D1"]].iat[j]
d2_dt = df_pop[col_names["D2"]].iat[j]
obito = df_pop[col_names["OBITO COVID"]].iat[j]
obito_geral = df_pop[col_names["OBITO GERAL"]].iat[j]
#teste = df_pop["DATA SOLICITACAO(TESTES)"].iat[j]
if not pd.isna(obito):
data_obito[cpf] = obito
elif not pd.isna(obito_geral):
data_obito_geral[cpf] = obito_geral
if not pd.isna(d1_dt):
data_d1[cpf] = d1_dt
if not pd.isna(d2_dt):
data_d2[cpf] = d2_dt
# -- create cols with dates --
datas = {
"CPF": [], "DATA D1": [], "DATA D2": [],
"DATA OBITO COVID": [], "DATA OBITO GERAL": [],
"TIPO": [], "PAR": [], "PAREADO": []
}
print("Criando tabela de eventos ...")
for j in tqdm(range(0, pareados.shape[0])):
cpf_caso = pareados["CPF CASO"].iat[j]
cpf_control = pareados["CPF CONTROLE"].iat[j]
# Fill new columns
datas["CPF"] += [cpf_caso, cpf_control]
datas["DATA D1"] += [data_d1[cpf_caso], data_d1[cpf_control]]
datas["DATA D2"] += [data_d2[cpf_caso], data_d2[cpf_control]]
datas["DATA OBITO COVID"] += [data_obito[cpf_caso], data_obito[cpf_control]]
datas["DATA OBITO GERAL"] += [data_obito_geral[cpf_caso], data_obito_geral[cpf_control]]
#datas["DATA HOSPITALIZACAO"] += [data_hospitalizado[cpf_caso], data_hospitalizado[cpf_control]]
#datas["DATA TESTE"] += [data_teste[cpf_caso], data_teste[cpf_control]]
datas["TIPO"] += ["CASO", "CONTROLE"]
datas["PAR"] += [cpf_control, cpf_caso]
datas["PAREADO"] += [True, True]
print("Criando tabela de eventos ... Concluído")
print("Incluindo não pareados ...")
for j in tqdm(range(df_pop.shape[0])):
cpf = df_pop["CPF"].iat[j]
if matched[cpf]==False:
datas["CPF"] += [cpf]
datas["DATA D1"] += [data_d1[cpf]]
datas["DATA D2"] += [data_d2[cpf]]
datas["DATA OBITO COVID"] += [data_obito[cpf]]
datas["DATA OBITO GERAL"] += [data_obito_geral[cpf]]
#datas["DATA HOSPITALIZACAO"] += [data_hospitalizado[cpf]]
#datas["DATA TESTE"] += [data_teste[cpf]]
datas["TIPO"] += ["NAO PAREADO"]
datas["PAR"] += [np.nan]
datas["PAREADO"] += [False]
print("Incluindo não pareados ... Concluído.")
datas = pd.DataFrame(datas)
return datas
def get_events_per_pair(df_pop, pareados, col_names):
'''
Description.
Args:
df_pop:
pareados:
matched:
col_names:
Return:
datas:
pandas.DataFrame.
'''
if col_names is None:
col_names = {
"D1": "data D1(VACINADOS)",
"D2": "data D2(VACINADOS)",
"OBITO COVID": "data_obito(OBITO COVID)",
"OBITO GERAL": "data falecimento(CARTORIOS)"
}
data_obito = defaultdict(lambda:np.nan)
data_obito_geral = defaultdict(lambda:np.nan)
data_d1 = defaultdict(lambda:np.nan)
data_d2 = defaultdict(lambda:np.nan)
for j in range(df_pop.shape[0]):
cpf = df_pop["cpf"].iat[j]
d1_dt = df_pop[col_names["D1"]].iat[j]
d2_dt = df_pop[col_names["D2"]].iat[j]
obito = df_pop[col_names["OBITO COVID"]].iat[j]
obito_geral = df_pop[col_names["OBITO GERAL"]].iat[j]
#teste = df_pop["DATA SOLICITACAO(TESTES)"].iat[j]
if not pd.isna(obito):
data_obito[cpf] = obito
elif not pd.isna(obito_geral):
data_obito_geral[cpf] = obito_geral
if not pd.isna(d1_dt):
data_d1[cpf] = d1_dt
if not pd.isna(d2_dt):
data_d2[cpf] = d2_dt
# -- create cols with dates --
datas = {
"CPF CASO": [], "DATA D1 CASO": [], "DATA D2 CASO": [],
"DATA OBITO COVID CASO": [], "DATA OBITO GERAL CASO": [],
"CPF CONTROLE": [], "DATA D1 CONTROLE": [], "DATA D2 CONTROLE": [],
"DATA OBITO COVID CONTROLE": [], "DATA OBITO GERAL CONTROLE": []
}
print("Criando tabela de eventos por par ...")
for j in tqdm(range(0, pareados.shape[0])):
cpf_caso = pareados["CPF CASO"].iat[j]
cpf_control = pareados["CPF CONTROLE"].iat[j]
# Fill new columns
datas["CPF CASO"] += [cpf_caso]
datas["CPF CONTROLE"] += [cpf_control]
datas["DATA D1 CASO"] += [data_d1[cpf_caso]]
datas["DATA D1 CONTROLE"] += [data_d1[cpf_control]]
datas["DATA D2 CASO"] += [data_d2[cpf_caso]]
datas["DATA D2 CONTROLE"] += [data_d2[cpf_control]]
datas["DATA OBITO COVID CASO"] += [data_obito[cpf_caso]]
datas["DATA OBITO COVID CONTROLE"] += [data_obito[cpf_control]]
datas["DATA OBITO GERAL CASO"] += [data_obito_geral[cpf_caso]]
datas["DATA OBITO GERAL CONTROLE"] += [data_obito_geral[cpf_control]]
print("Criando tabela de eventos por par ... Concluído")
datas = pd.DataFrame(datas)
return datas
def get_intervals_events(events_pair_df, final_cohort, which="D1"):
'''
Calculate the intervals between the start of the pair's cohort and all
possible events for the case and control.
For both case and control individuals, there 4 possible events:
- Death by Covid (Outcome)
- Death due to another cause (Censored)
- Control vaccination (Censored)
- End of the cohort (Censored)
The intervals are calculated for all events, and for the survival analysis
only the earliest event should be considered.
Args:
events_pair_df:
pandas.DataFrame.
Return:
data:
pandas.DataFrame
Return:
...
'''
# Column names translator.
colname = {
"CPF CASO": "CPF CASO", "D1 CASO": "DATA D1 CASO",
"D2 CASO": "DATA D2 CASO", "OBITO CASO": "DATA OBITO GERAL CASO",
"OBITO COVID CASO": "DATA OBITO COVID CASO",
"CPF CONTROLE": "CPF CONTROLE", "D1 CONTROLE": "DATA D1 CONTROLE",
"D2 CONTROLE": "DATA D2 CONTROLE", "OBITO CONTROLE": "DATA OBITO GERAL CONTROLE",
"OBITO COVID CONTROLE": "DATA OBITO COVID CONTROLE",
}
# Calculate intervals for case.
sbst1 = [colname["OBITO COVID CASO"], colname[f"{which} CASO"]]
sbst2 = [colname["OBITO CASO"], colname[f"{which} CASO"]]
events_pair_df[f"INTV OBITO COVID CASO({which})"] = events_pair_df[sbst1].apply(lambda x: calc_interval(x,sbst1), axis=1)
events_pair_df[f"INTV OBITO GERAL CASO({which})"] = events_pair_df[sbst2].apply(lambda x: calc_interval(x,sbst2), axis=1)
sbst_d1d2 = [colname[f"D2 CASO"], colname[f"D1 CASO"]]
if which=="D1":
events_pair_df[f"INTV D2-D1 CASO"] = events_pair_df[sbst_d1d2].apply(lambda x: calc_interval(x, sbst_d1d2), axis=1)
# Calculate intervals for control
sbst1 = [colname["OBITO COVID CONTROLE"], colname[f"{which} CASO"]]
sbst2 = [colname["OBITO CONTROLE"], colname[f"{which} CASO"]]
events_pair_df[f"INTV OBITO COVID CONTROLE({which})"] = events_pair_df[sbst1].apply(lambda x: calc_interval(x,sbst1), axis=1)
events_pair_df[f"INTV OBITO GERAL CONTROLE({which})"] = events_pair_df[sbst2].apply(lambda x: calc_interval(x,sbst2), axis=1)
sbst_d1d2 = [colname[f"D2 CONTROLE"], colname[f"D1 CONTROLE"]]
if which=="D1":
events_pair_df[f"INTV D2-D1 CONTROLE"] = events_pair_df[sbst_d1d2].apply(lambda x: calc_interval(x, sbst_d1d2), axis=1)
# Interval in common for both individuals
sbst_d1 = [colname["D1 CONTROLE"], colname[f"{which} CASO"]]
events_pair_df[f"INTV D1 CASO CONTROLE({which})"] = events_pair_df[sbst_d1].apply(lambda x: calc_interval(x,sbst_d1), axis=1)
events_pair_df[f"INTV FIM COORTE({which})"] = events_pair_df[colname[f"{which} CASO"]].apply(lambda x: (final_cohort-x.date()).days if not pd.isna(x) else np.nan)
return events_pair_df
def get_intervals(events_pair_df, final_cohort=dt.date(2021, 8, 31)):
'''
Description.
Args:
events_df:
pandas.DataFrame.
Return:
data:
pandas.DataFrame.
'''
colname = {
"CPF CASO": "CPF CASO",
"D1 CASO": "DATA D1 CASO",
"D2 CASO": "DATA D2 CASO",
"OBITO CASO": "DATA OBITO GERAL CASO",
"OBITO COVID CASO": "DATA OBITO COVID CASO",
"CPF CONTROLE": "CPF CONTROLE",
"D1 CONTROLE": "DATA D1 CONTROLE",
"D2 CONTROLE": "DATA D2 CONTROLE",
"OBITO CONTROLE": "DATA OBITO GERAL CONTROLE",
"OBITO COVID CONTROLE": "DATA OBITO COVID CONTROLE",
}
data = {
"CPF": [], "DATA D1": [], "DATA D2": [], "DATA OBITO COVID": [],
"DATA OBITO GERAL": [], "TIPO": [], "PAR": [], "PAREADO": [],
"OBITO COVID DURACAO": [], "COM DESFECHO - OBITO COVID": []
}
# --> Go through each pair
for j in tqdm(range(events_pair_df.shape[0])):
cpf_caso = events_pair_df[colname["CPF CASO"]].iat[j]
cpf_controle = events_pair_df[colname["CPF CONTROLE"]].iat[j]
d2_caso = events_pair_df[colname["D2 CASO"]].iat[j]
d2_controle = events_pair_df[colname["D2 CONTROLE"]].iat[j]
init = events_pair_df[colname["D1 CASO"]].iat[j].date()
events_caso = {
"OBITO CASO": events_pair_df[colname["OBITO CASO"]].iat[j],
"OBITO COVID CASO": events_pair_df[colname["OBITO COVID CASO"]].iat[j],
"COORTE FINAL": final_cohort
}
events_controle = {
"D1 CONTROLE": events_pair_df[colname["D1 CONTROLE"]].iat[j],
"OBITO CONTROLE": events_pair_df[colname["OBITO CONTROLE"]].iat[j],
"OBITO COVID CONTROLE": events_pair_df[colname["OBITO COVID CONTROLE"]].iat[j],
"COORTE FINAL": final_cohort
}
# Convert date strings to date formats.
for key in events_caso.keys():
if not pd.isna(events_caso[key]) and type(events_caso[key])!=dt.date:
events_caso[key] = events_caso[key].date()
for key in events_controle.keys():
if not pd.isna(events_controle[key]) and type(events_controle[key])!=dt.date:
events_controle[key] = events_controle[key].date()
# Determine final day of each person of the pair.
# --> For case:
timeline_namecaso = ["D1 CONTROLE", "OBITO COVID CASO", "OBITO CASO", "COORTE FINAL"]
timeline_caso = [events_controle["D1 CONTROLE"], events_caso["OBITO COVID CASO"],
events_caso["OBITO CASO"], events_caso["COORTE FINAL"]]
# replace NaN for any date later than "COORTE FINAL"
timeline_caso = [x if not pd.isna(x) else dt.date(2050, 1, 1) for x in timeline_caso ]
sorted_tp_caso = sorted(zip(timeline_caso, timeline_namecaso))
final_namecaso = sorted_tp_caso[0][1]
final_caso = sorted_tp_caso[0][0]
interval_caso = (final_caso-init).days
#print(sorted_tp_caso, interval_caso, final_namecaso, final_caso)
if final_namecaso!="OBITO COVID CASO":
type_caso = False
else:
type_caso = True
# --> For control:
timeline_namecontrole = ["D1 CONTROLE", "OBITO COVID CONTROLE", "OBITO CONTROLE", "COORTE FINAL"]
timeline_controle = [events_controle["D1 CONTROLE"], events_controle["OBITO COVID CONTROLE"],
events_controle["OBITO CONTROLE"], events_controle["COORTE FINAL"]]
timeline_controle = [x if not pd.isna(x) else dt.date(2050, 1, 1) for x in timeline_controle ]
sorted_tp_controle = sorted(zip(timeline_controle, timeline_namecontrole))
final_namecontrole = sorted_tp_controle[0][1]
final_controle = sorted_tp_controle[0][0]
interval_controle = (final_controle-init).days
#print(sorted_tp_caso, interval_caso, final_namecaso, final_caso)
if final_namecontrole!="OBITO COVID CONTROLE":
type_controle = False
else:
type_controle = True
# --> Organize values
data["CPF"] += [cpf_caso, cpf_controle]
data["DATA D1"] += [init, events_pair_df[colname["D1 CONTROLE"]].iat[j]]
data["DATA D2"] += [d2_caso, d2_controle]
data["DATA OBITO COVID"] += [events_caso["OBITO COVID CASO"], events_controle["OBITO COVID CONTROLE"]]
data["DATA OBITO GERAL"] += [events_caso["OBITO CASO"], events_controle["OBITO CONTROLE"]]
data["TIPO"] += ["CASO", "CONTROLE"]
data["PAR"] += [cpf_controle, cpf_caso]
data["PAREADO"] += [True, True]
data["OBITO COVID DURACAO"] += [interval_caso, interval_controle]
data["COM DESFECHO - OBITO COVID"] += [type_caso, type_controle]
data =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.