prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import logging
import math
import re
from collections import Counter
import numpy as np
import pandas as pd
from certa.utils import diff
def get_original_prediction(r1, r2, predict_fn):
r1r2 = get_row(r1, r2)
return predict_fn(r1r2)[['nomatch_score', 'match_score']].values[0]
def get_row(r1, r2, lprefix='ltable_', rprefix='rtable_'):
r1_df = pd.DataFrame(data=[r1.values], columns=r1.index)
r2_df = pd.DataFrame(data=[r2.values], columns=r2.index)
r1_df.columns = list(map(lambda col: lprefix + col, r1_df.columns))
r2_df.columns = list(map(lambda col: rprefix + col, r2_df.columns))
r1r2 = pd.concat([r1_df, r2_df], axis=1)
return r1r2
def support_predictions(r1: pd.Series, r2: pd.Series, lsource: pd.DataFrame,
rsource: pd.DataFrame, predict_fn, lprefix, rprefix, num_triangles: int = 100,
class_to_explain: int = None, max_predict: int = -1,
use_w: bool = True, use_q: bool = True):
'''
generate a pd.DataFrame of support predictions to be used to generate open triangles.
:param r1: the "left" record
:param r2: the "right" record
:param lsource: the "left" data source
:param rsource: the "right" data source
:param predict_fn: the ER model prediction function
:param lprefix: the prefix of attributes from the "left" table
:param rprefix: the prefix of attributes from the "right" table
:param num_triangles: number of open triangles to be used to generate the explanation
:param class_to_explain: the class to be explained
:param max_predict: the maximum number of predictions to be performed by the ER model to generate the requested
number of open triangles
:param use_w: whether to use left open triangles
:param use_q: whether to use right open triangles
:return: a pd.DataFrame of record pairs with one record from the original prediction and one record yielding an
opposite prediction by the ER model
'''
r1r2 = get_row(r1, r2)
original_prediction = predict_fn(r1r2)[['nomatch_score', 'match_score']].values[0]
r1r2['id'] = "0@" + str(r1r2[lprefix + 'id'].values[0]) + "#" + "1@" + str(r1r2[rprefix + 'id'].values[0])
copies, copies_left, copies_right = expand_copies(lprefix, lsource, r1, r2, rprefix, rsource)
find_positives, support = get_support(class_to_explain, pd.concat([lsource, copies_left]), max_predict,
original_prediction, predict_fn, r1, r2, pd.concat([rsource, copies_right]),
use_w, use_q, lprefix, rprefix, num_triangles)
if len(support) > 0:
if len(support) > num_triangles:
support = support.sample(n=num_triangles)
else:
logging.warning(f'could find {str(len(support))} triangles of the {str(num_triangles)} requested')
support['label'] = list(map(lambda predictions: int(round(predictions)),
support.match_score.values))
support = support.drop(['match_score', 'nomatch_score'], axis=1)
if class_to_explain == None:
r1r2['label'] = np.argmax(original_prediction)
else:
r1r2['label'] = class_to_explain
support_pairs = pd.concat([r1r2, support], ignore_index=True)
return support_pairs, copies_left, copies_right
else:
logging.warning('no triangles found')
return pd.DataFrame(), pd.DataFrame(), pd.DataFrame()
def find_candidates_predict(record, source, find_positives, predict_fn, num_candidates, lj=True,
max=-1, lprefix='ltable_', rprefix='rtable_'):
if lj:
records = pd.DataFrame()
records = records.append([record] * len(source), ignore_index=True)
copy = source.copy()
records.columns = list(map(lambda col: lprefix + col, records.columns))
copy.columns = list(map(lambda col: rprefix + col, copy.columns))
records.index = copy.index
samples = pd.concat([records, copy], axis=1)
else:
copy = source.copy()
records = pd.DataFrame()
records = records.append([record] * len(source), ignore_index=True)
records.index = copy.index
copy.columns = list(map(lambda col: lprefix + col, copy.columns))
records.columns = list(map(lambda col: rprefix + col, records.columns))
samples = pd.concat([copy, records], axis=1)
if max > 0:
samples = samples.sample(frac=1)[:max]
record2text = " ".join([str(val) for k, val in record.to_dict().items() if k not in ['id']])
samples['score'] = samples.T.apply(lambda row: cs(record2text, " ".join(row.astype(str))))
samples = samples.sort_values(by='score', ascending=not find_positives)
samples = samples.drop(['score'], axis=1)
result = pd.DataFrame()
batch = num_candidates * 4
splits = min(10, int(len(samples) / batch))
i = 0
while len(result) < num_candidates and i < splits:
batch_samples = samples[batch * i:batch * (i + 1)]
predicted = predict_fn(batch_samples)
if find_positives:
out = predicted[predicted["match_score"] > 0.5]
else:
out = predicted[predicted["match_score"] < 0.5]
if len(out) > 0:
result = pd.concat([result, out], axis=0)
logging.info(f'{i}:{len(out)},{len(result)}')
i += 1
return result
def generate_subsequences(lsource, rsource, max=-1):
new_records_left_df = pd.DataFrame()
for i in np.arange(len(lsource[:max])):
r = lsource.iloc[i]
nr_df = pd.DataFrame(generate_modified(r, start_id=len(new_records_left_df) + len(lsource)))
if len(nr_df) > 0:
nr_df.columns = lsource.columns
new_records_left_df = pd.concat([new_records_left_df, nr_df])
new_records_right_df = pd.DataFrame()
for i in np.arange(len(rsource[:max])):
r = rsource.iloc[i]
nr_df = pd.DataFrame(generate_modified(r, start_id=len(new_records_right_df) + len(rsource)))
if len(nr_df) > 0:
nr_df.columns = rsource.columns
new_records_right_df = pd.concat([new_records_right_df, nr_df])
return new_records_left_df, new_records_right_df
def get_support(class_to_explain, lsource, max_predict, original_prediction, predict_fn, r1, r2,
rsource, use_w, use_q, lprefix, rprefix, num_triangles):
candidates4r1 = pd.DataFrame()
candidates4r2 = pd.DataFrame()
num_candidates = int(num_triangles / 2)
if class_to_explain == None:
findPositives = bool(original_prediction[0] > original_prediction[1])
else:
findPositives = bool(0 == int(class_to_explain))
if use_q:
candidates4r1 = find_candidates_predict(r1, rsource, findPositives, predict_fn, num_candidates,
lj=True, max=max_predict, lprefix=lprefix, rprefix=rprefix)
if use_w:
candidates4r2 = find_candidates_predict(r2, lsource, findPositives, predict_fn, num_candidates,
lj=False, max=max_predict, lprefix=lprefix, rprefix=rprefix)
neighborhood = | pd.DataFrame() | pandas.DataFrame |
"""
GUI code modified based on https://github.com/miili/StreamPick
For earthquake PKiKP coda quality evaluation and stack
"""
import os
import pickle
import pandas as pd
import numpy as np
# GUI import
import PyQt5
from PyQt5 import QtGui
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
import sys
import signal
import scipy
import gpar
from gpar.util import util
from itertools import cycle
#figure plot import
import matplotlib
matplotlib.use('Qt5Agg')
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.transforms import offset_copy
from matplotlib.widgets import RectangleSelector
import matplotlib.colors as mcolors
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
from mpl_toolkits.basemap import shiftgrid
# from mpl_toolkits.axesgrid1 import make_axes_locatable
#obspy import
from obspy.taup import TauPyModel
import obspy
from obspy.core.trace import Trace
from obspy.core.stream import Stream
from obspy.core import read
from obspy.core import AttribDict
signal.signal(signal.SIGINT, signal.SIG_DFL)
# color = list(mcolors.cnames.values())
color = ['red', 'blue', 'green','yellow','cyan','magenta','purple']
#class for event first evaluation
class glanceEQ(QtWidgets.QMainWindow):
def __init__(self, array=None, parent=None, ap=None):
if ap is None:
self.qApp = QtWidgets.QApplication(sys.argv)
else:
self.qApp = ap
self.KeepGoing = False
if isinstance(array, str):
ar = util.loadArray(array)
elif isinstance(array, gpar.arrayProcess.Array):
ar = array
else:
msg = 'Define Array instance = gpar.arrayPropocess.Array() or a path to a pickle file'
raise ValueError(msg)
self.array = ar
self.eve_type = ['A','B','C','D']
self._shortcuts = {'eve_next': 'n',
'eve_prev': 'p',
'trim_apply': 'w',
'gain_up': 'u',
'gain_down': 'd',
'strip': 's',
'A':'a',
'B':'b',
'C':'c',
'D':'d'}
self._plt_drag = None
# init events in the array
self._events = ar.events #defines list self._events
self.savefile = None
self._initEqList()
self._stripDF = pd.DataFrame()
self._badDF = pd.DataFrame()
self._btype = 'beam'
self._method = 'all'
self.trinWin = [{'name':'N200-C200','noise':200.0,'coda':200.0,
'stime':400.0,'etime':1800,
'smooth':4.0,'model':'ak135'}]
self._current_win = None
self._current_strip = False
self._eventCycle = cycle(self._eqlist)
self._eventInfo(next(self._eventCycle))
QMainWindow.__init__(self)
self.setupUI()
def setupUI(self):
self.main_widget = QtWidgets.QWidget(self)
self._initMenu()
self._createStatusBar()
self._initPlots()
l = QVBoxLayout(self.main_widget)
l.addLayout(self.btnbar)
l.addLayout(self.btnbar2)
l.addWidget(self.canvas)
self.setCentralWidget(self.main_widget)
self.setGeometry(300, 300, 1200, 800)
self.setWindowTitle('Array Analysis: %s'%self.array.name)
self.show()
def _killLayout():
pass
def _initEqList(self):
self._eqlist = []
for _eve in self._events:
self._eqlist.append(_eve.ID)
self._eqlist.sort()
def _initPlots(self):
self.fig = Figure(facecolor='.86',dpi=100, frameon=True)
self.canvas = FigureCanvas(self.fig)
self.canvas.setFocusPolicy(PyQt5.QtCore.Qt.StrongFocus)
self._drawFig()
# connect the events
self.fig.canvas.mpl_connect('scroll_event', self._pltOnScroll)
self.fig.canvas.mpl_connect('motion_notify_event', self._pltOnDrag)
self.fig.canvas.mpl_connect('button_release_event', self._pltOnButtonRelease)
def _initMenu(self):
# Next and Prev Earthquake
nxt = QtWidgets.QPushButton('Next >>',
shortcut=self._shortcuts['eve_next'], parent=self.main_widget)
nxt.clicked.connect(self._pltNextEvent)
nxt.setToolTip('shortcut <b>n</d>')
nxt.setMaximumWidth(150)
prv = QPushButton('Prev >>',
shortcut=self._shortcuts['eve_prev'], parent=self.main_widget)
prv.clicked.connect(self._pltPrevEvent)
prv.setToolTip('shortcut <b>p</d>')
prv.setMaximumWidth(150)
# Earthquake drop-down
self.evecb = QComboBox(self)
for eve in self._eqlist:
self.evecb.addItem(eve)
self.evecb.activated.connect(self._pltEvent)
self.evecb.setMaximumWidth(1000)
self.evecb.setMinimumWidth(80)
# coda strip button
self.codabtn = QtWidgets.QPushButton('Strip',
shortcut=self._shortcuts['strip'],parent=self.main_widget)
self.codabtn.setToolTip('shortcut <b>s</b>')
self.codabtn.clicked.connect(self._appStrip)
self.codacb = QComboBox(self)
for med in ['all', 'coda','twoline']:
self.codacb.addItem(med)
self.codacb.activated.connect(self._selectMethod)
self.codacb.setMaximumWidth(100)
self.codacb.setMinimumWidth(80)
self.wincb = QComboBox(self)
self.wincb.activated.connect(self._changeStrip)
self._updateWindow()
# edit/delete coda selected window
winEdit = QtWidgets.QPushButton('Coda Window')
winEdit.resize(winEdit.sizeHint())
winEdit.clicked.connect(self._editTimeWindow)
winDelt = QtWidgets.QPushButton('Delete')
winDelt.resize(winDelt.sizeHint())
winDelt.clicked.connect(self._deleteWin)
# Coda level
_radbtn = []
for _o in self.eve_type:
_radbtn.append(QRadioButton(_o.upper(), shortcut=self._shortcuts[_o.upper()]))
_radbtn[-1].setToolTip('Level: '+_o)
self.levelGrp = QButtonGroup()
self.levelGrp.setExclusive(True)
levelbtn = QHBoxLayout()
for _i, _btn in enumerate(_radbtn):
self.levelGrp.addButton(_btn, _i)
levelbtn.addWidget(_btn)
# plot slide beam figure button
self.sbcb = QComboBox(self)
for btype in ['beam', 'slide', 'vespetrum','strip']:
self.sbcb.addItem(btype)
self.sbcb.activated.connect(self._updatePlot)
self.vepcb = QComboBox(self)
for scale in ['log10', 'log','sqrt','beam']:
self.vepcb.addItem(scale)
self.vepcb.activated.connect(self._updatePlot )
self.vepcb.setEnabled(False)
self.codacb.setMaximumWidth(100)
self.codacb.setMinimumWidth(80)
self.ampmin = QDoubleSpinBox(decimals=1, maximum=5, minimum=-2, singleStep=.5, value=1)
self.ampmax = QDoubleSpinBox(decimals=1, maximum=5, minimum=-2, singleStep=.5, value=3)
self.ampmin.valueChanged.connect(self._updatePlot)
self.ampmax.valueChanged.connect(self._updatePlot)
self.ampmin.setEnabled(False)
self.ampmax.setEnabled(False)
# self._initAmp()
self.sbcb.activated.connect(self._activeAmp)
self.ttbtn = QtWidgets.QPushButton('Phases', parent=self.main_widget)
self.ttbtn.setCheckable(True)
self.ttbtn.clicked.connect(self._updatePlot)
# Arrange buttons
vline = QFrame()
vline.setFrameStyle(QFrame.VLine | QFrame.Raised)
self.btnbar = QHBoxLayout()
self.btnbar.addWidget(prv)
self.btnbar.addWidget(nxt)
self.btnbar.addWidget(QLabel('Event'))
self.btnbar.addWidget(self.evecb)
##
self.btnbar.addWidget(vline)
self.btnbar.addWidget(self.codabtn)
self.btnbar.addWidget(self.codacb)
self.btnbar.addWidget(self.wincb)
self.btnbar.addWidget(winEdit)
self.btnbar.addWidget(winDelt)
self.btnbar.addStretch(1)
self.btnbar2 = QHBoxLayout()
self.btnbar2.addWidget(QLabel('Level: '))
self.btnbar2.addLayout(levelbtn)
self.btnbar2.addWidget(vline)
self.btnbar2.addWidget(QLabel('TYPE'))
self.btnbar2.addWidget(self.sbcb)
self.btnbar2.addWidget(vline)
self.btnbar2.addWidget(QLabel('Scale'))
self.btnbar2.addWidget(self.vepcb)
self.btnbar2.addWidget(QLabel('AMP'))
self.btnbar2.addWidget(self.ampmin)
self.btnbar2.addWidget(self.ampmax)
self.btnbar2.addWidget(vline)
self.btnbar2.addWidget(self.ttbtn)
self.btnbar2.addStretch(1)
#Menubar
menubar = self.menuBar()
fileMenu = menubar.addMenu('&File')
fileMenu.addAction(QtGui.QIcon().fromTheme('document-save'),
'Save', self._saveFile)
fileMenu.addAction(QtGui.QIcon().fromTheme('document-save'),
'Save as', self._saveFileFormat)
fileMenu.addSeparator()
fileMenu.addAction(QIcon().fromTheme('document-open'),
'Load array', self._openArray)
fileMenu.addAction(QtGui.QIcon().fromTheme('document-open'),
'Load Strip Pickle File', self._openFile)
fileMenu.addSeparator()
fileMenu.addAction(QtGui.QIcon().fromTheme('document-save'),
'Save Plot', self._savePlot)
fileMenu.addSeparator()
quit = QAction(QIcon().fromTheme('application-exit')," &Exit", self)
fileMenu.addAction(quit)
fileMenu.triggered[QAction].connect(self.closeArray)
def _hardExist(self):
self.deleteLater()
def _activeAmp(self):
if self.sbcb.currentText() == 'vespetrum':
self.ampmin.setEnabled(True)
self.ampmax.setEnabled(True)
self.vepcb.setEnabled(True)
if self.vepcb.currentText() == 'beam':
self.ampmax.setMaximum(100000)
# self.ampmax.setValue(1000)
self.ampmax.setSingleStep(500)
# self.ampmin.setValue(10)
self.ampmin.setMaximum(100000)
self.ampmin.setSingleStep(500)
elif self.vepcb.currentText() == 'sqrt':
self.ampmax.setMaximum(300)
# self.ampmax.setValue(30)
self.ampmax.setSingleStep(5)
# self.ampmin.setValue(3)
self.ampmin.setMaximum(300)
self.ampmin.setSingleStep(5)
elif self.vepcb.currentText() == 'log':
self.ampmax.setMaximum(12)
# # self.ampmax.setValue(7)
self.ampmax.setSingleStep(1)
# # self.ampmin.setValue(2)
self.ampmin.setMaximum(12)
self.ampmin.setSingleStep(1)
elif self.vepcb.currentText() == 'log10':
self.ampmax.setSingleStep(0.5)
self.ampmin.setSingleStep(0.5)
self.ampmax.setMaximum(5)
self.ampmin.setMaximum(5)
else:
self.ampmin.setEnabled(False)
self.ampmax.setEnabled(False)
self.vepcb.setEnabled(False)
def _createStatusBar(self):
"""
Creates the status bar
"""
sb =QStatusBar()
sb.setFixedHeight(18)
self.setStatusBar(sb)
self.statusBar().showMessage('Ready')
def _selectMethod(self, index):
self._method = self.codacb.currentText()
self.sbcb.setCurrentIndex(3)
self._updatePlot()
def _changeStrip(self,index):
if index == len(self.trinWin):
return self._newTrim()
else:
return self._appStrip()
def _newTrim(self):
"""
Creat new strip window
"""
newWin = self.defWindow(self)
if newWin.exec_():
self.trinWin.append(newWin.getValues())
self._updateWindow()
self.wincb.setCurrentIndex(len(self.trinWin)-1)
self._appStrip()
def _editTimeWindow(self):
"""
Edit existing coda selection window
"""
_i = self.wincb.currentIndex()
this_window = self.trinWin[_i]
editWindow = self.defWindow(self, this_window)
if editWindow.exec_():
self.trinWin[_i] = editWindow.getValues()
self._updateWindow()
self.wincb.setCurrentIndex(_i)
self._appStrip()
def _deleteWin(self):
"""
Delete window
"""
pass
_i = self.wincb.currentIndex()
def _updateWindow(self):
self.wincb.clear()
self.wincb.setCurrentIndex(-1)
for _i, _f in enumerate(self.trinWin):
self.wincb.addItem('Noise %.2f sec - Coda %.2f sec' %(_f['noise'], _f['coda']))
self.wincb.addItem('Create new Window')
def _appStrip(self, button=True, draw=True):
"""
Apply coda strip
"""
_method = self.codacb.currentText()
_j = self.wincb.currentIndex()
self._eventInfo(self._current_id)
self._current_strip = True
spts = int(self.trinWin[_j]['smooth'] / self._current_delta )
codaStrip(self._current_event, method=_method, window=spts,
siglen=self.trinWin[_j]['coda'], noise=self.trinWin[_j]['noise'],beamphase=self.beamphase,
model=self.trinWin[_j]['model'], stime=self.trinWin[_j]['stime'], etime=self.trinWin[_j]['etime'],)
self._btype = 'strip'
self.sbcb.setCurrentIndex(3)
self._setCodaStrip()
self._updatePlot()
def _pltEvent(self):
"""
Plot event from DropDown Menu
"""
_i = self.evecb.currentIndex()
while next(self._eventCycle) != self._eqlist[_i]:
pass
self._eventInfo(self._eqlist[_i])
self._current_strip = False
_id = self._current_event.ID
if len(self._stripDF) != 0:
existDF = self._stripDF[self._stripDF.ID == _id]
else:
existDF = pd.DataFrame()
if len(existDF) != 0:
level = existDF.Level.iloc[0]
ind = self.eve_type.index(level)
self.levelGrp.button(ind).setChecked(True)
self._current_strip=True
else:
if len(self._badDF) != 0:
_badDF = self._badDF[self._badDF.ID == _id]
if len(_badDF) != 0:
self.levelGrp.button(3).setChecked(True)
self._current_strip = True
self._drawFig()
def _pltPrevEvent(self):
"""
Plot previous events
"""
_j = self.evecb.currentIndex()
for _i in range(len(self._eqlist) - 1):
prevEvent = next(self._eventCycle)
self._eventInfo(prevEvent)
self._current_strip = False
_id = self._current_event.ID
if len(self._stripDF) != 0:
existDF = self._stripDF[self._stripDF.ID == _id]
else:
existDF = pd.DataFrame()
if len(existDF) != 0:
level = existDF.Level.iloc[0]
ind = self.eve_type.index(level)
self.levelGrp.button(ind).setChecked(True)
self._current_strip = True
else:
if len(self._badDF) != 0:
_badDF = self._badDF[self._badDF.ID == _id]
if len(_badDF) != 0:
self.levelGrp.button(3).setChecked(True)
self._current_strip = True
if _j == 0:
_n = len(self.evecb) - 1
self.evecb.setCurrentIndex(_n)
else:
self.evecb.setCurrentIndex(_j-1)
if self._btype == 'strip':
self._btype = 'beam'
self.sbcb.setCurrentIndex(0)
self._drawFig()
def _pltNextEvent(self):
_id = self._current_event.ID
level = self.eve_type[self.levelGrp.checkedId()]
if level == 'D':
self._current_strip = True
self._setCodaStrip()
else:
# if len(self._stripDF) != 0:
# existDF = self._stripDF[(self._stripDF.ID == _id)]
# else:
# existDF = pd.DataFrame()
# if len(existDF) == 0:
if not self._current_strip:
choice = QMessageBox.question(self, 'Stripping?',
"Haven't stripping yet, want to do it?",
QMessageBox.Yes | QMessageBox.No)
if choice is QMessageBox.Yes:
self._current_strip = True
self._appStrip()
return
self._eventInfo(next(self._eventCycle))
self._current_strip = False
_id = self._current_event.ID
if len(self._stripDF) != 0:
existDF = self._stripDF[self._stripDF.ID == _id]
else:
existDF = pd.DataFrame()
if len(existDF) != 0:
level = existDF.Level.iloc[0]
ind = self.eve_type.index(level)
self.levelGrp.button(ind).setChecked(True)
self._current_strip = True
else:
if len(self._badDF) != 0:
_badDF = self._badDF[self._badDF.ID == _id]
if len(_badDF) != 0:
self.levelGrp.button(3).setChecked(True)
self._current_strip = True
_i = self.evecb.currentIndex()
if _i == len(self.evecb) - 1:
self.evecb.setCurrentIndex(0)
else:
self.evecb.setCurrentIndex(_i+1)
if self._btype == 'strip':
self._btype = 'beam'
self.sbcb.setCurrentIndex(0)
self._drawFig()
def _eventInfo(self, eqid):
"""
Copies the array process result from the current Earthquake object
"""
for eve in self._events:
if eve.ID == eqid:
event = eve
self._current_event = event
self.beamphase = event.beamphase
self._current_id = eqid
if not hasattr(event, 'beam'):
return
self._current_beam = event.beam
filts = {}
for tr in self._current_beam:
filts[tr.stats.station] = tr.stats.channel
self._current_filts = filts
self._current_ID = event.ID
self._current_dis = event.dis
self._current_p = event.rayp
self._current_bb = event.bb
self._current_bakAz = event.baz
self._current_delta = event.delta
if hasattr(event, 'slideSt'):
self._current_slide = event.slideSt
if hasattr(event, 'energy'):
self._current_energy = event.energy
self._current_time = event.slantTime
self._current_K = event.slantK
self._current_type = event.slantType
def _setCodaStrip(self):
if not self._current_strip:
return
event = self._current_event
_i = self.wincb.currentIndex()
win = self.trinWin[_i]
if len(self._stripDF) != 0:
existDF = self._stripDF[(self._stripDF.ID == self._current_event.ID) & (self._stripDF.winName == win['name'])]
else:
existDF = pd.DataFrame()
if len(self._badDF) !=0:
_badDF = self._badDF[self._badDF.ID == self._current_event.ID]
else:
_badDF = pd.DataFrame()
if len(existDF) !=0:
choice = QMessageBox.question(self, 'Replace stripping',
"Do you want to replace existed stripping?",
QMessageBox.Yes | QMessageBox.No)
if choice == QMessageBox.Yes:
index = existDF.index
self._stripDF.drop(index,axis=0,inplace=True)
self._stripDF.reset_index(inplace=True, drop=True)
else:
return
if len(_badDF) != 0:
choice = QMessageBox.question(self, 'Bad Event',
"Want to replace it?",
QMessageBox.Yes | QMessageBox.No)
if choice == QMessageBox.Yes:
index = _badDF.index
self._badDF.drop(index,axis=0,inplace=True)
self._badDF.reset_index(inplace=True, drop=True)
else:
return
level = self.eve_type[self.levelGrp.checkedId()]
ID = event.ID
lat = event.lat
lon = event.lon
dep = event.dep
mw = event.mw
dis = event.dis
bb = event.bb
bakAzi = event.baz
delta = event.delta
if level =='D':
newRow = {'ID': ID, 'lat':lat,
'lon':lon,'dep':dep,
'Mw':mw,'Del':dis,
'BB':bb,'bakAzi':bakAzi,'Level':'D'}
msg = ('%s is Bad Event'%self._current_ID)
gpar.log(__name__, msg, level='info', pri=True)
self._badDF = self._badDF.append(newRow, ignore_index=True)
else:
if self._method == 'all':
newRow = {'ID': ID, 'lat':lat,
'lon':lon,'dep':dep,
'Mw':mw,'Del':dis,
'BB':bb,'bakAzi':bakAzi,
'winName':win['name'], 'win':win,
'Level':level, 'delta': delta,
'codaResSt':event.codaResSt,
'codaSt':event.codaSt,
'crms':event.codaMod,
'twoResSt':event.twoResSt,
'twoSt':event.twoSt,
'trms':event.twoMod}
self._stripDF = self._stripDF.append(newRow, ignore_index=True)
elif self._method == 'coda':
newRow = {'ID': ID, 'lat':lat,
'lon':lon,'dep':dep,
'Mw':mw,'Del':dis,
'winName':win['name'], 'win':win,
'BB':bb,'bakAzi':bakAzi,
'Level':level, 'delta': delta,
'codaResSt':event.codaResSt,
'codaSt':event.codaSt,
'crms':event.codaMod,
}
self._stripDF = self._stripDF.append(newRow, ignore_index=True)
elif self._method == 'twoline':
newRow = {'ID': ID, 'lat':lat,
'lon':lon,'dep':dep,
'Mw':mw,'Del':dis,
'BB':bb,'bakAzi':bakAzi,
'Level':level, 'delta': delta,
'winName':win['name'], 'win':win,
'twoResSt':event.twoResSt,
'twoSt':event.twoSt,
'trms':event.twoMod}
self._stripDF = self._stripDF.append(newRow, ignore_index=True)
def _drawFig(self):
self.fig.clear()
a = u"\u00b0"
if self._btype == 'beam':
num_plots = len(self._current_beam)
for _i, tr in enumerate(self._current_beam):
ax = self.fig.add_subplot(num_plots, 1, _i+1)
if hasattr(tr.stats, 'channel'):
label = tr.stats.channel
else:
label=None
time = np.arange(tr.stats.npts) * tr.stats.delta + tr.stats.sac.b
ax.plot(time, tr.data, 'k', label=label)
if not hasattr(self._current_event, 'arrivals'):
self._current_event.getArrival()
arrival = self._current_event.arrivals[self.beamphase]['TT']# - self._current_event.time
ax.vlines(arrival, ax.get_ylim()[0],ax.get_ylim()[1],'r', label=self.beamphase)
if self.ttbtn.isChecked():
_arr = self._current_event.arrivals
# del _arr[self.beamphase]
for name, tt in _arr.items():
if name is self.beamphase:
continue
ax.vlines(tt['TT'], ax.get_ylim()[0],ax.get_ylim()[1],'b',label=name)
ax.legend()
if _i == 0:
ax.set_xlabel('Seconds')
self.fig.suptitle('%s - %s\nDep:%s Distance: %s%s'
%(self._current_event.ID, self._btype, self._current_event.dep, self._current_event.dis, a))
elif self._btype == 'slide':
self.fig.suptitle('%s - %s\nDep:%s Distance: %s%s'
%(self._current_event.ID, self._btype, self._current_event.dep, self._current_event.dis, a))
nfilts = len(self._current_slide.keys())
ax = self.fig.subplots(4, nfilts, sharex='col', sharey='row')
ax = ax.reshape(4,nfilts)
for ind, (name,st) in enumerate(self._current_slide.items()):
for _i, tr in enumerate(st):
if hasattr(tr.stats, 'channel'):
label = tr.stats.channel
else:
label=None
time = np.arange(tr.stats.npts) * tr.stats.delta + tr.stats.sac.b
ax[_i,ind].plot(time, tr.data, 'k', label=None)
ax[_i, ind].set_xlim([np.min(time), np.max(time)])
if label == 'Amplitude':
peak = np.max(tr.data) + 1
ax[_i,ind].set_ylim([-1, peak])
elif label == 'Slowness':
ax[_i,ind].set_ylim([0, 15])
rp = self._current_event.rayp
ax[_i, ind].hlines(rp, np.min(time), np.max(time), 'r', 'dashed')
elif label == 'Back Azimuth':
ax[_i, ind].set_ylim([0,360])
elif label == 'coherence':
ax[_i, ind].set_ylim([0,1])
if not hasattr(self._current_event, 'arrivals'):
self._current_event.getArrival()
arrival = self._current_event.arrivals[self.beamphase]['TT']# - self._current_event.time
ax[_i,ind].vlines(arrival, ax[_i,ind].get_ylim()[0],ax[_i,ind].get_ylim()[1],'r',label=self.beamphase)
if self.ttbtn.isChecked():
_arr = self._current_event.arrivals
# del _arr[self.beamphase]
for pname, tt in _arr.items():
if pname is self.beamphase:
continue
ax[_i,ind].vlines(tt['TT'], ax[_i,ind].get_ylim()[0],ax[_i,ind].get_ylim()[1],'b',label=pname)
ax[_i,ind].legend()
# ax[_i,ind].set_aspect(aspect=0.3)
if _i == 3:
ax[_i,ind].set_xlabel('Seconds')
if _i == 0:
ax[_i,ind].set_title(name)
if ind == 0:
ax[_i,ind].set_ylabel(label)
elif self._btype == 'vespetrum':
num = len(self._current_energy)
extent=[np.min(self._current_time),np.max(self._current_time),np.min(self._current_K),np.max(self._current_K)]
vmin = float(self.ampmin.cleanText())
vmax = float(self.ampmax.cleanText())
if not hasattr(self._current_event, 'arrivals'):
self._current_event.getArrival()
for ind, _row in self._current_energy.iterrows():
# abspow = _row.POWER
name = _row.FILT
if self.vepcb.currentText() == 'log10':
abspow = np.log10(np.abs(_row.POWER))
elif self.vepcb.currentText() == 'log':
abspow = np.log(np.abs(_row.POWER))
elif self.vepcb.currentText() == 'sqrt':
abspow = np.sqrt(np.abs(_row.POWER))
else:
abspow = np.abs(_row.POWER)
ax = self.fig.add_subplot(1, num, ind+1)
ax.imshow(abspow, extent=extent, aspect='auto', cmap='Reds', vmin=vmin, vmax=vmax, origin='lower')
arrival = self._current_event.arrivals[self.beamphase]['TT']
ax.vlines(arrival, ax.get_ylim()[0],ax.get_ylim()[1],'k',label=self.beamphase)
rp = self._current_event.rayp
ax.hlines(rp, ax.get_xlim()[0],ax.get_xlim()[1], 'b')
ax.hlines(-rp, ax.get_xlim()[0],ax.get_xlim()[1], 'b')
if self.ttbtn.isChecked():
_arr = self._current_event.arrivals
# del _arr[self.beamphase]
for name, tt in _arr.items():
if name is self.beamphase:
continue
ax.vlines(tt['TT'], ax.get_ylim()[0],ax.get_ylim()[1],'b',label=name)
ax.legend()
ax.set_xlabel('Seconds')
if ind == 0:
ax.set_ylabel(self._current_type)
ax.set_title(name)
if self._current_type == 'slowness':
title = '%s - %s\nSlant Stack at a Backazimuth of %.1f %sN\nDep:%s Distance: %s%s' \
%(self._btype, self._current_ID, self._current_event.baz,a,
self._current_event.dep, self._current_event.dis, a)
elif self._current_type == 'theta':
title = '%s - %s\nSlant Stack at a slowness of %.2f s/deg\nDep:%s Distance: %s%s' \
%(self._btype, self._current_ID, self._current_event.rayp,
self._current_event.dep, self._current_event.dis, a)
self.fig.suptitle(title)
elif self._btype == 'strip':
_i = self.wincb.currentIndex()
win = self.trinWin[_i]
if len(self._stripDF) != 0:
existDF = self._stripDF[(self._stripDF.ID == self._current_event.ID) & (self._stripDF.winName == win['name'])]
else:
existDF = pd.DataFrame()
if len(self._badDF) != 0:
_badDF = self._badDF[self._badDF.ID == self._current_event.ID]
else:
_badDF = pd.DataFrame()
if len(existDF) == 0 and len(_badDF) == 0:
choice = QMessageBox.question(self, 'Stripping?',
"Haven't stripping yet, want to do it?",
QMessageBox.Yes | QMessageBox.No)
if choice == QMessageBox.Yes:
self._appStrip()
else:
self._btype = 'beam'
self.sbcb.setCurrentIndex(0)
self._updatePlot()
elif len(_badDF) != 0:
choice = QMessageBox.question(self, 'Bad event!',
"Want to reevalua it?",
QMessageBox.Yes | QMessageBox.No)
if choice == QMessageBox.Yes:
index = _badDF.index
self._badDF.drop(index,axis=0,inplace=True)
self._badDF.reset_index(inplace=True, drop=True)
self.sbcb.setCurrentIndex(0)
self._updatePlot()
else:
self.sbcb.setCurrentIndex(0)
self._updatePlot()
elif len(existDF) != 0:
trinwin = existDF.win.iloc[0]
stime = trinwin['stime']
etime = trinwin['etime']
delta = self._current_beam[0].stats.delta
# npts = int((etime - stime)/delta) + 1
npts = int((etime - stime)/delta)
# time = np.linspace(stime, etime, npts)
time = stime + np.arange(npts) * delta
sind = int(stime / delta)
# eind = int(etime / delta)
if self._method == 'all':
codamode = existDF.crms.iloc[0]
twomode = existDF.trms.iloc[0]
nfilter = len(codamode)
codaSt = existDF.codaSt.iloc[0]
twoSt = existDF.twoSt.iloc[0]
cRes = existDF.codaResSt.iloc[0]
tRes = existDF.twoResSt.iloc[0]
timeR = np.arange(cRes[0].stats.npts)*cRes[0].stats.delta - trinwin['noise']
data_time = np.arange(twoSt[0].stats.npts) * delta + (twoSt[0].stats.starttime - self._current_beam[0].stats.starttime)
ax = self.fig.subplots(2, nfilter)
if nfilter == 1:
ax = ax.reshape(2, nfilter)
for ind in range(nfilter):
data = np.abs(scipy.signal.hilbert(self._current_beam[ind].data))[sind:sind+npts]
ax[0,ind].plot(time,np.log10(data),'k', label='beam')
data_coda = codaSt[ind].data
time_coda = stime + np.arange(len(data_coda)) * delta
ax[0,ind].plot(time_coda,np.log10(data_coda),'r', label='coda')
data_two = twoSt[ind].data
ax[0,ind].plot(data_time, data_two,'b', label='twoline')
ax[0,ind].set_xlim([stime, etime])
ax[0,ind].set_ylim([-1, 5])
ax[0,ind].set_xlabel('Seconds')
ax[0,ind].legend()
label_c = "Coda: Mean RMS = %s"%(codamode['RMS'].iloc[ind])
label_t = "Twoline: Mean RMS = %s"%(twomode['RMS'].iloc[ind])
ax[1,ind].plot(timeR,cRes[ind].data, 'r', label=label_c)
ax[1,ind].plot(timeR, tRes[ind].data, 'b',label=label_t)
ax[1,ind].legend()
ax[1,ind].set_xlabel('Seconds')
ax[1,ind].set_xlim([-trinwin['noise']/2, trinwin['noise']/2+trinwin['coda']])
ax[0,ind].set_title('Filter: %s'%twomode['FILT'].iloc[ind])
if ind is 0:
ax[0,ind].set_ylabel('log10(Amp)')
ax[1,ind].set_ylabel('Amp')
elif self._method == 'coda':
codamode = existDF.crms.iloc[0]
nfilter = len(codamode)
codaSt = existDF.codaSt.iloc[0]
cRes = existDF.codaResSt.iloc[0]
timeR = np.arange(cRes[0].stats.npts)*cRes[0].stats.delta - trinwin['noise']
ax = self.fig.subplots(2, nfilter)
for ind in range(nfilter):
data = np.abs(scipy.signal.hilbert(self._current_beam[ind].data))[sind:sind+npts]
ax[0,ind].plot(time,np.log10(data),'k', label='beam')
data_coda = codaSt[ind].data
time_coda = stime + np.arange(len(data_coda)) * delta
ax[0,ind].plot(time_coda,np.log10(data_coda),'r', label='coda')
ax[0,ind].set_xlim([stime, etime])
ax[0,ind].set_ylim([-1, 5])
ax[0,ind].set_xlabel('Seconds')
ax[0,ind].legend()
label_c = "Coda: Mean RMS = %s"%(codamode['RMS'].iloc[ind])
ax[1,ind].plot(timeR,cRes[ind].data, 'r', label=label_c)
ax[1,ind].legend()
ax[1,ind].set_xlabel('Seconds')
ax[1,ind].set_xlim([-trinwin['noise']/2, trinwin['noise']/2+trinwin['coda']])
ax[0,ind].set_title('Filter: %s'%codamode['FILT'].iloc[ind])
if ind is 0:
ax[0,ind].set_ylabel('log10(Amp)')
ax[1,ind].set_ylabel('Amp')
elif self._method == 'twoline':
twomode = existDF.trms.iloc[0]
nfilter = len(twomode)
twoSt = existDF.twoSt.iloc[0]
tRes = existDF.twoResSt.iloc[0]
timeR = np.arange(tRes[0].stats.npts)*tRes[0].stats.delta - trinwin['noise']
data_time = np.arange(twoSt[0].stats.npts) * delta + (twoSt[0].stats.starttime - self._current_beam[0].stats.starttime)
ax = self.fig.subplots(2, nfilter)
for ind in range(nfilter):
data = np.abs(scipy.signal.hilbert(self._current_beam[ind].data))[sind:sind+npts]
ax[0,ind].plot(time,np.log10(data),'k', label='beam')
data_two = twoSt[ind].data
ax[0,ind].plot(data_time, data_two,'b', label='twoline')
ax[0,ind].set_xlim([stime, etime])
ax[0,ind].set_ylim([-1, 5])
ax[0,ind].set_xlabel('Seconds')
ax[0,ind].legend()
label_t = "Twoline: Mean RMS = %s"%(twomode['RMS'].iloc[ind])
ax[1,ind].plot(timeR, tRes[ind].data, 'b',label=label_t)
ax[1,ind].legend()
ax[1,ind].set_xlabel('Seconds')
ax[1,ind].set_xlim([-trinwin['noise']/2, trinwin['noise']/2+trinwin['coda']])
ax[0,ind].set_title('Filter: %s'%twomode['FILT'].iloc[ind])
if ind is 0:
ax[0,ind].set_ylabel('log10(Amp)')
ax[1,ind].set_ylabel('Amp')
self.fig.suptitle('Coda Strip for %s using %s method in win %s\nDep:%s Distance: %s%s'
%(self._current_event.ID, self._method, trinwin['name'],
self._current_event.dep, self._current_event.dis, a))
self._canvasDraw()
#def _plotTT(self):
# if self.ttbtn.isChecked() is False:
def _updatePlot(self):
self._activeAmp()
self._btype = self.sbcb.currentText()
self._drawFig()
def _canvasDraw(self):
"""
Redraws the canvas and re-set mouse focus
"""
# if isinstance(st, obspy.core.stream.Stream):
# delta = st[0].stats.delta
# elif isinstance(st, obspy.core.trace.Trace):
# delta = st.stats.delta
for _i, _ax in enumerate(self.fig.get_axes()):
_ax.set_xticklabels(_ax.get_xticks())
self.fig.canvas.draw()
self.canvas.setFocus()
def _pltOnScroll(self, event):
"""
Scrolls/Redraws the plot along x axis
"""
if event.inaxes is None:
return
if event.key == 'control':
axes = [event.inaxes]
else:
axes = self.fig.get_axes()
for _ax in axes:
left = _ax.get_xlim()[0]
right = _ax.get_xlim()[1]
extent_x = right - left
dxzoom = .2 * extent_x
aspect_left = (event.xdata - _ax.get_xlim()[0]) / extent_x
aspect_right = (_ax.get_xlim()[1] - event.xdata) / extent_x
up = _ax.get_ylim()[1]
down = _ax.get_ylim()[0]
extent_y = up - down
dyzoom = 0.5 * extent_y
aspect_down = (0 - _ax.get_ylim()[0]) / extent_y
aspect_up = _ax.get_ylim()[1] / extent_y
if event.button == 'up':
left += dxzoom * aspect_left
right -= dxzoom * aspect_right
down += dyzoom * aspect_down
up -= dyzoom * aspect_up
elif event.button == 'down':
left -= dxzoom * aspect_left
right += dxzoom * aspect_right
down -= dyzoom * aspect_down
up += dyzoom * aspect_up
else:
return
_ax.set_xlim([left, right])
_ax.set_ylim([down, up])
self._canvasDraw()
def _pltOnDrag(self, event):
"""
Drags/redraws the plot upon drag
"""
if event.inaxes is None:
return
if event.key == 'control':
axes = [event.inaxes]
else:
axes = self.fig.get_axes()
if event.button == 1:
if self._plt_drag is None:
self._plt_drag = event.xdata
return
for _ax in axes:
_ax.set_xlim([_ax.get_xlim()[0] +
(self._plt_drag - event.xdata), _ax.get_xlim()[1] + (self._plt_drag - event.xdata)])
else:
return
self._canvasDraw()
def _pltOnButtonRelease(self, event):
"""
On Button Release Reset drag variable
"""
self._plt_drag = None
# def _pltOnButtonPress(self, event):
# """
# This function is using for zoom in relative phase region
# """
def _saveFile(self):
if self.savefile is None:
return self._saveFileFormat()
savefile = str(self.savefile)
if os.path.splitext(savefile)[1].lower() == '.pkl':
self._savePickle(savefile)
elif os.path.splitext(savefile)[1].lower() == '.csv':
self._saveCSV(savefile)
def _saveFileFormat(self):
files_types = "Pickle (*.pkl);; CSV (*.csv)"
self.savefile,_ = QFileDialog.getSaveFileName(self,
'Save as', os.getcwd(), files_types)
self.savefile = str(self.savefile)
if os.path.splitext(self.savefile)[1].lower() == '.pkl':
self._savePickle(self.savefile)
elif os.path.splitext(self.savefile)[1].lower() == '.csv':
self._saveCSV(self.savefile)
def _savePickle(self, filename):
self._stripDF.to_pickle(filename)
name = os.path.splitext(filename)
badname = name[0]+'.D'+name[1]
if len(self._badDF) != 0:
self._badDF.to_pickle(badname)
def _saveCSV(self, filename):
_stripDF = self._stripDF
_stripDF.drop(['codaSt','twoSt','twoResSt','codaResSt'])
_stripDF.to_csv(filename,index=False,sep=',')
if len(self._badDF) != 0:
_badDF = self._badDF
name = os.path.splitext(filename)
badname = name[0] +'.D' +name[1]
_badDF.to_csv(badname, index=False, sep=',')
def _openFile(self):
filename,_ = QFileDialog.getOpenFileName(self,'Load Pickle File',
os.getcwd(), 'Pickle Format (*.pkl)', '20')
if filename:
filename = str(filename)
self._stripDF = pd.read_pickle(filename)
name = os.path.splitext(filename)
badname = name[0]+'.D'+name[1]
if os.path.exists(badname):
self._badDF = pd.read_pickle(badname)
self._pltEvent()
self.savefile = str(filename)
def _openArray(self):
filename,_ = QFileDialog.getOpenFileName(self, 'Load array',
os.getcwd(), 'Pickle Format (*.pkl)', '20')
if filename:
filename = str(filename)
ar = util.loadArray(filename)
self._refreshArray(ar)
def _refreshArray(self, ar):
self.array = ar
self._plt_drag = None
# init events in the array
self._events = ar.events #defines list self._events
self.savefile = ar.name+'.strip.pkl'
self._initEqList()
self._stripDF = pd.DataFrame()
self._badDF = pd.DataFrame()
self._btype = 'beam'
self._method = 'all'
self.trinWin = [{'name':'N200-C200','noise':200.0,'coda':200.0,
'stime':400.0,'etime':1800,'model':'ak135'}]
self._current_win = None
self._current_strip = False
self._eventCycle = cycle(self._eqlist)
self._eventInfo(next(self._eventCycle))
self.setWindowTitle('Array Analysis: %s'%self.array.name)
self.evecb.clear()
for eve in self._eqlist:
self.evecb.addItem(eve)
self._drawFig()
def _savePlot(self):
# path = os.getcwd()
# path = os.path.join(path,self.array.name,self._current_event.ID)
file_types = "Image Format (*.png *.pdf *.ps *.eps);; ALL (*)"
filename,_ = QFileDialog.getSaveFileName(self, 'Save Plot',
os.getcwd(), file_types)
if not filename:
return
filename = str(filename)
formats = os.path.splitext(filename)[1][1:].lower()
if formats not in ['png', 'pdf', 'ps', 'eps']:
formats = 'png'
filename += '.' +formats
self.fig.savefig(filename)
def closeArray(self,event):
if len(self._stripDF) > 0 and self.savefile is None:
ask = QMessageBox.question(self, 'Save stripping?',
'Do you want to save your coda data?',
QMessageBox.Save |
QMessageBox.Discard |
QMessageBox.Cancel, QMessageBox.Save)
if ask == QMessageBox.Save:
self._saveFileFormat()
self.close()
elif ask == QMessageBox.Cancel:
event.ignore()
class defWindow(QDialog):
def __init__(self, parent=None, windowvalue=None):
"""
Coda strip window dialog
"""
QDialog.__init__(self, parent)
self.setWindowTitle('Create new coda strip window')
self.noisewin = QDoubleSpinBox(decimals=1, maximum=400, minimum=20, singleStep=10, value=10)
self.codawin = QDoubleSpinBox(decimals=1, maximum=400, minimum=20, singleStep=10, value=10)
self.stime = QDoubleSpinBox(decimals=1, maximum=600, minimum=0, singleStep=50, value=50)
self.etime = QDoubleSpinBox(decimals=1, maximum=2400, minimum=1600, singleStep=50, value=50)
self.smooth = QDoubleSpinBox(decimals=1, maximum=20, minimum=1, singleStep=1, value=4)
self.winName = QLineEdit('Window Name')
self.winName.selectAll()
self.model = QLineEdit('ak135')
self.model.selectAll()
grid = QGridLayout()
grid.addWidget(QLabel('Window Name'), 0, 0)
grid.addWidget(self.winName, 0, 1)
grid.addWidget(QLabel('Noise Win.'), 1, 0)
grid.addWidget(self.noisewin, 1, 1)
grid.addWidget(QLabel('Coda Win.'), 2, 0)
grid.addWidget(self.codawin, 2, 1)
grid.addWidget(QLabel('Start Time.'), 3, 0)
grid.addWidget(self.stime, 3, 1)
grid.addWidget(QLabel('End Time.'), 4, 0)
grid.addWidget(self.etime, 4, 1)
grid.addWidget(QLabel('Smooth.'), 5, 0)
grid.addWidget(self.smooth, 5, 1)
grid.addWidget(QLabel('Model.'), 6, 0)
grid.addWidget(self.model, 6, 1)
grid.setVerticalSpacing(10)
btnbox = QDialogButtonBox(QDialogButtonBox.Ok |
QDialogButtonBox.Cancel)
btnbox.accepted.connect(self.accept)
btnbox.rejected.connect(self.reject)
layout = QVBoxLayout()
layout.addWidget(QLabel('Define noise window and coda window for stripping'))
layout.addLayout(grid)
layout.addWidget(btnbox)
if windowvalue is not None:
self.winName.setText(windowvalue['name'])
self.noisewin.setValue(windowvalue['noise'])
self.codawin.setValue(windowvalue['coda'])
self.stime.setValue(windowvalue['stime'])
self.etime.setValue(windowvalue['etime'])
self.smooth.setValue(windowvalue['smooth'])
self.model.setText(windowvalue['model'])
self.setLayout(layout)
self.setSizeGripEnabled(False)
def getValues(self):
"""
Return window dialog values as a dictionary
"""
return dict(name=str(self.winName.text()),
noise=float(self.noisewin.cleanText()),
coda=float(self.codawin.cleanText()),
stime=float(self.stime.cleanText()),
etime=float(self.etime.cleanText()),
smooth=float(self.smooth.cleanText()),
model=str(self.model.text()))
# class for event stacking in arrays
class stackArray(QtWidgets.QMainWindow):
def __init__(self, arraylist=None, parent=None, ap=None):
if ap is None:
self.qApp = QApplication(sys.argv)
else:
self.qApp = ap
if isinstance(arraylist, str):
arlist = pd.read_csv(arraylist, delimiter='\s+')
elif isinstance(arraylist, pd.DataFrame):
arlist = arraylist
else:
msg = 'Define array list in DataFrame or a path to a csv file'
raise ValueError(msg)
self._shortcuts = {'arr_next': 'n',
'arr_prev': 'p',
'cancel': 'c',
'accept': 'a',
'stack': 's'}
self._list = arlist
self._initArrayList()
self._arrayCycle = cycle(self._namelist)
self.dis = [{'name':'All-Dis',
'mindis': 50.0,
'maxdis': 75.0,
'step':25.0,
'overlap':0,
'write':False}]
self._arrayInfo(next(self._arrayCycle))
self._initReg()
QMainWindow.__init__(self)
self.setupUI()
def setupUI(self):
self.main_widget = QWidget(self)
self._initMenu()
self._createStatusBar()
self._initPlots()
l = QVBoxLayout(self.main_widget)
l.addLayout(self.btnbar)
l.addLayout(self.btnbar2)
l.addWidget(self.canvas)
self.setCentralWidget(self.main_widget)
self.setGeometry(300, 300, 1200, 800)
self.setWindowTitle('Array Stack')
self.show()
def _killLayout():
pass
def _initPlots(self):
self.fig = Figure(dpi=100, constrained_layout=True)
self.canvas = FigureCanvas(self.fig)
self.canvas.setFocusPolicy(PyQt5.QtCore.Qt.StrongFocus)
self._drawFig()
self.fig.canvas.mpl_connect('key_press_event', self._selectRegOnPress)
def _initMenu(self):
# Next and Prev array
nxt = QtWidgets.QPushButton('Next >>',
shortcut=self._shortcuts['arr_next'], parent=self.main_widget)
nxt.clicked.connect(self._pltNextArray)
nxt.setToolTip('shortcut <b>n</d>')
nxt.setMaximumWidth(150)
prv = QtWidgets.QPushButton('Prev >>',
shortcut=self._shortcuts['arr_prev'], parent=self.main_widget)
prv.clicked.connect(self._pltPrevArray)
prv.setToolTip('shortcut <b>p</d>')
prv.setMaximumWidth(150)
# Array drop-down
self.arcb = QComboBox(self)
for arr in self._namelist:
self.arcb.addItem(arr)
self.arcb.activated.connect(self._pltArray)
self.arcb.setMaximumWidth(1000)
self.arcb.setMinimumWidth(80)
# filter selection
self.filtcb = QComboBox(self)
self.filtcb.addItem('all')
for filt in self._current_filter:
self.filtcb.addItem(filt)
self.filtcb.activated.connect(self._drawStack)
self.filtcb.setMaximumWidth(1000)
self.filtcb.setMinimumWidth(80)
# Select region
# Stacking earthquakes in array
self.stbtn = QtWidgets.QPushButton('Stack',
shortcut=self._shortcuts['stack'], parent=self.main_widget)
self.stbtn.setCheckable(True)
self.stbtn.setStyleSheet('QPushButton:checked {background-color: lightgreen;}')
self.stbtn.setToolTip('shortcut <b>s</b>')
self.stbtn.clicked.connect(self._drawStack)
self.nbtn = QtWidgets.QPushButton('Norm',
parent=self.main_widget)
self.nbtn.setCheckable(True)
self.nbtn.setStyleSheet('QPushButton:checked {background-color: lightgreen;}')
self.nbtn.clicked.connect(self._drawStack)
self.ebtn = QtWidgets.QPushButton('ERR',
parent=self.main_widget)
self.ebtn.setCheckable(True)
self.ebtn.setStyleSheet('QPushButton:checked {background-color: lightgreen;}')
self.ebtn.clicked.connect(self._drawStack)
# Select distance
self.discb = QComboBox(self)
self.discb.activated.connect(self._changeStack)
self._updateWindow()
disEdit = QtWidgets.QPushButton('Edit')
disEdit.resize(disEdit.sizeHint())
disEdit.clicked.connect(self._editDis)
disDelt = QtWidgets.QPushButton('Delete')
disDelt.resize(disEdit.sizeHint())
disDelt.clicked.connect(self._deleteDis)
# Select region
self.regcb = QComboBox(self)
self.regcb.activated.connect(self._changeRegion)
self._updateRegion()
regEdit = QtWidgets.QPushButton('Edit')
regEdit.resize(regEdit.sizeHint())
regEdit.clicked.connect(self._editRegion)
regDelt = QtWidgets.QPushButton('Delete')
regDelt.resize(regDelt.sizeHint())
regDelt.clicked.connect(self._deleteRegion)
#button to plot all regin in one plot
self.allbtn = QPushButton('All Region')
self.allbtn.setCheckable(True)
self.allbtn.setStyleSheet('QPushButton:checked {background-color: lightgreen;}')
self.allbtn.clicked.connect(self._stackAll)
#reset region button
self.rsbtn = QtWidgets.QPushButton('Reset')
self.rsbtn.clicked.connect(self._resetReg)
self.btnbar = QHBoxLayout()
self.btnbar.addWidget(prv)
self.btnbar.addWidget(nxt)
self.btnbar.addWidget(QLabel('Array'))
self.btnbar.addWidget(self.arcb)
vline = QFrame()
vline.setFrameStyle(QFrame.VLine | QFrame.Raised)
self.btnbar.addWidget(vline)
self.btnbar.addWidget(self.stbtn)
self.btnbar.addWidget(QLabel('Step'))
self.btnbar.addWidget(self.discb)
self.btnbar.addWidget(disEdit)
self.btnbar.addWidget(disDelt)
self.btnbar.addStretch(1)
self.btnbar2 = QHBoxLayout()
self.btnbar2.addWidget(QLabel('Region'))
self.btnbar2.addWidget(self.regcb)
self.btnbar2.addWidget(regEdit)
self.btnbar2.addWidget(regDelt)
self.btnbar2.addWidget(self.allbtn)
self.btnbar2.addWidget(vline)
self.btnbar2.addWidget(self.rsbtn)
self.btnbar2.addWidget(vline)
self.btnbar2.addWidget(self.nbtn)
self.btnbar2.addWidget(vline)
self.btnbar2.addWidget(self.ebtn)
self.btnbar2.addWidget(vline)
self.btnbar2.addWidget(QLabel('Filter'))
self.btnbar2.addWidget(self.filtcb)
self.btnbar2.addStretch(1)
#Menubar
menubar = self.menuBar()
fileMenu = menubar.addMenu('&File')
fileMenu.addAction(QtGui.QIcon().fromTheme('document-save'),
'Save Reg', self._saveFile)
def _saveFile(self):
if len(self._region) >= 2:
_region = self._region[1:]
for _reg in _region:
name = _reg['name']
array = self._current_array['name']
_df = self.regDf[name]
savename = os.path.join(array,name+'.pkl')
_df.to_pickle(savename)
def _initArrayList(self):
self._arlist = | pd.DataFrame() | pandas.DataFrame |
import os
import sys
import glob
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from astropy.io import fits
import time
import ramp2018_mcmc as wl_ramp2
import ramp2018 as wl_ramp
import batman
from RECTE import RECTE
sys.path.insert(0, '../bin_analysis')
import get_limb as gl
def get_data(visit, x, y, get_raw=0):
folder = '../data_reduction/reduced/%s/final/*.fits' % (visit)
data=np.sort(np.asarray(glob.glob(folder)))
nexposure = len(data)
print('There are %d exposures in this visit' % nexposure)
date=np.zeros(len(data))
icount=np.zeros_like(date)
test=fits.open(data[0])
xlen, ylen = test[0].data.shape
test.close()
xlen-=2*x
ylen-=2*y
alldata=np.ma.zeros((len(data),xlen, ylen))
allerr=np.zeros((len(data),xlen,ylen))
allraw=allerr.copy()
xmin=x
xmax=xlen-x
ymin=y
ymax=ylen-y
for i, img in enumerate(data):
expfile=fits.open(img)
hdr=expfile[0].header
exp=expfile[0].data
mask=expfile[1].data
errs=expfile[2].data
raws=expfile[3].data
expfile.close()
date[i]=(hdr['EXPSTART']+hdr['EXPEND'])/2.
icount[i]=hdr['COUNT']
exptime=hdr['EXPTIME']
expo=exp[xmin:xmax, ymin:ymax]
mask=mask[xmin:xmax, ymin:ymax]
errs=errs[xmin:xmax, ymin:ymax]
raws=raws[xmin:xmax, ymin:ymax]
alldata[i,:,:]=np.ma.array(expo, mask=mask)
allerr[i,:,:]=errs
allraw[i,:,:]=raws
date_order=np.argsort(date)
date=date[date_order]
icount=icount[date_order]
alldata=alldata[date_order,:,:]
allerr=allerr[date_order,:,:]
allraw=allraw[date_order, :,:]
if get_raw != 0:
return allraw, exptime
else:
return date, icount, alldata, allerr, allraw, exptime
def event_time(date, properties):
"""Program to determine the expected event time
Inputs
date: 1D array of the date of each exposure (MJD)
properties: 1D array containing the last observed eclipse
and the period. (MJD, days)"""
time=properties[1]
period=properties[4]
while time < date[0]:
time+=period
return float(time)
def get_orbits(date):
"""Procedure to organize light curve data by HST orbit"""
orbit=np.zeros(1).astype(int)
for i in range(len(date)-1):
t=date[i+1]-date[i]
if t*86400 > 1200.:
orbit=np.append(orbit, i+1) # 1800s is about half an HST orbit
return np.append(orbit, len(date))
def inputs(data, transit=True):
""" Function to read in priors for a system.
INPUTS:
data: data table of priors for a particular planet
OUTPUTS:
Returns array of system properties: [rprs, central event time, inc
,a/r, period, depth]
"""
inp_values=pd.read_table(data,sep=' ')
data_arr=inp_values.iloc[:,2].values
labels=inp_values.iloc[:,0].values
param_errs=inp_values.iloc[:,3].values
# Rj-m, Rsolar-m,AU-m, JD -> MJD
conversions=np.array([6.9911e7, 6.957e8, 1.49598e11, -2400000.5])
inc=data_arr[5]
period=data_arr[4]
a_R=data_arr[7]*conversions[2]/(data_arr[1]*conversions[1])
a_R_err=np.sqrt((param_errs[7]*conversions[2]/data_arr[1]/conversions[1])**2
+ (a_R*param_errs[1]/conversions[1])**2)
rprs = data_arr[0]*conversions[0]/(data_arr[1]*conversions[1])
if transit==True:
epoch=data_arr[6]+conversions[3]
depth=rprs*rprs
else:
epoch=data_arr[6]+conversions[3]+period/2.
depth = rprs*rprs*(data_arr[2]/data_arr[3])/3
props=np.zeros(6)
props[0]=rprs
props[1]=epoch
props[2]=inc
props[3]=a_R
props[4]=period
props[5]=depth
errors=np.zeros(6)
errors[0]=0
errors[1]=param_errs[6]
errors[2]=param_errs[5]
errors[3]=a_R_err
errors[4]=param_errs[4]
errors[5]=0
return [props,errors]
def intrinsic(date, light, raw, pixels=0):
means=np.zeros(len(light))
for i,l in enumerate(light):
try:
means[i]=np.mean([light[i-3], light[i-2], light[i-1], l])
except IndexError:
means[i]=l
end=np.argmax(means)
begin=end-3
if pixels==0:
raw_img=np.median(raw[begin:end,:,:], axis=0)
else:
raw_img=np.median(raw[begin:end,:,pixels[0]:pixels[1]])
intrinsic=np.median(raw_img)
return intrinsic
# def correction(inputs, date, flux, transit=False):
# params=batman.TransitParams()
# params.w=90.
# params.ecc=0
# params.rp=np.sqrt(inputs[0])
# t0=inputs[1]
# params.inc=inputs[2]
# params.a=inputs[3]
# params.per=inputs[4]
# depth=inputs[5]
# phase = (date-t0)/params.per
# phase = phase - np.floor(phase)
# phase[phase > 0.5] = phase[phase > 0.5] - 1.0
# if transit==True:
# params.t0=t0
# params.u=inputs[6:]
# params.limb_dark="quadratic"
# m=batman.TransitModel(params, date)
# model=m.light_curve(params)
# else:
# params.fp=inputs[5]
# params.t_secondary=t0
# params.u=[]
# params.limb_dark="uniform"
# m=batman.TransitModel(params, date, transittype="secondary")
# model=m.light_curve(params)
# corrected=flux/model
# return corrected
# def remove_bad_data(light_corrected, date1, user_inputs, check=False):
# """Procedure to remove "bad" data from light curve"""
# med= np.ma.median(light_corrected)
# sigma = np.sqrt(np.sum((light_corrected-med)**2)/(2*len(light_corrected)))
# medi=np.zeros_like(date1)+med
# sig3=medi+3*sigma
# sig4=medi+4*sigma
# sig5=medi+5*sigma
# sig3m=medi-3*sigma
# sig4m=medi-4*sigma
# sig5m=medi-5*sigma
# if check==False:
# nPasses=int(user_inputs[3])
# sigma_cut_factor=user_inputs[2]
# else:
# data=plt.plot(date1, light_corrected,'bo',ls='dotted')
# plt.xlabel('MJD')
# plt.ylabel('Total Flux')
# s5=plt.plot(date1, sig5,'pink',date1, sig5m, 'pink')
# s5[0].set_label('5-sigma')
# s4=plt.plot(date1, sig4,'g', date1, sig4m, 'g')
# s4[0].set_label('4-sigma')
# s3=plt.plot(date1, sig3,'r', date1, sig3m, 'r')
# s3[0].set_label('3-sigma')
# plt.plot(date1, medi, label='Median',ls='solid')
# plt.legend(scatterpoints=1)
# plt.show(block=False)
# cut = raw_input("Enter the sigma-cut factor (3-5 recommended): ")
# sigma_cut_factor = float(cut)
# user_inputs[2]=sigma_cut_factor
# plt.close()
# Cut out the "bad" data
# med= np.ma.median(light_corrected)
# sigma = np.sqrt(np.sum((light_corrected-med)**2)/(2*len(light_corrected)))
# dif= np.abs(light_corrected-med)
# index=np.where(dif < sigma_cut_factor*sigma)[0]
# return index
def preprocess_ramp(visit, direction, x=0, y=0, ploton=True
, check=True, inp_file=False, savedata=False
, transit=False):
"""
PURPOSE: Allow user to e xtract relevant orbital data from reduced time
series of a visit. Also allow user to exclude any outlier data points.
INPUTS
x, y, allow the user to reduce aperture
checks: set to "on" to manually reduce data
If checks is set to on, "user_inputs" will return the inputs
that the user used: [first orbit, last orbit, sigma cut factor,
number of passes, center eclipse time]. If checks is set to off, then
the user_inputs array will be used as inputs (easier to automate) """
date, icount, alldata, allerr, allraw, exptime=get_data(visit+'/'+ direction, x, y)
nexposure=len(date)
planet = visit[:-8]
props, errs=inputs('../planets/%s/inputs.dat' % planet, transit)
a1=gl.get_limb(planet, 14000.,'a1')
a2=gl.get_limb(planet, 14000.,'a2')
a3=gl.get_limb(planet, 14000.,'a3')
a4=gl.get_limb(planet, 14000.,'a4')
props=np.append(props, [a1,a2,a3,a4])
errs=np.append(errs, np.zeros(4))
props_hold=props.copy()
orbit = np.zeros(1)
# Classify the data by each HST orbit. Returns array (orbit)
# which contains the indeces for the start of each orbit
orbit=get_orbits(date)
print("Number of total orbits: %d" % (len(orbit)-1))
# Choose which orbits to include in the eclipse fitting. 1-2 on either
# side of the eclipse is recommended
check2=check
if check == False:
if inp_file == True:
df= | pd.read_csv('./preprocess_info.csv') | pandas.read_csv |
# ---
# jupyter:
# jupytext:
# formats: py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import os
from plotly.offline import plot
from dash.exceptions import PreventUpdate
from Historic_Crypto import HistoricalData
from Historic_Crypto import Cryptocurrencies
# +
from Historic_Crypto import Cryptocurrencies
data = Cryptocurrencies(coin_search = '', extended_output=True).find_crypto_pairs()
# -
crypto_dict = {}
for base, group in data.groupby('base_currency'):
crypto_dict[base] = list(group.quote_currency.unique())
import os
import pandas as pd
from midiutil import MIDIFile
audiolizer_temp_dir = os.environ.get('AUDIOLIZER_TEMP', './history/')
print('audiolizer temp data:', audiolizer_temp_dir)
granularity = 300 # seconds
def load_date(ticker, granularity, int_):
start_ = int_.left.strftime('%Y-%m-%d-%H-%M')
end_ = int_.right.strftime('%Y-%m-%d-%H-%M')
try:
return HistoricalData(ticker,
granularity,
start_,
end_,
).retrieve_data()
except:
print('could not load using {} {}'.format(start_, end_))
raise
def get_gaps(df, granularity):
new_ = refactor(df, '{}s'.format(granularity))
return new_[new_.close.isna()]
# +
def get_history(ticker, start_date, end_date = None, granularity=granularity):
"""Fetch/load historical data from Coinbase API at specified granularity
params:
start_date: (str) (see pandas.to_datetime for acceptable formats)
end_date: (str)
granularity: (int) seconds (default: 300)
price data is saved by ticker and date and stored in audiolizer_temp_dir
"""
start_date = pd.to_datetime(start_date).tz_localize(None)
today = pd.Timestamp.now().tz_localize(None)
if end_date is None:
end_date = today + pd.Timedelta('1D')
else:
end_date = min(today, pd.to_datetime(end_date).tz_localize(None))
fnames = []
for int_ in pd.interval_range(start_date,
end_date):
fname = audiolizer_temp_dir + '/{}-{}.csv.gz'.format(
ticker, int_.left.strftime('%Y-%m-%d'))
if not os.path.exists(fname):
int_df = load_date(ticker, granularity, int_)
int_df.to_csv(fname, compression='gzip')
fnames.append(fname)
df = pd.concat(map(lambda file: pd.read_csv(file,index_col='time', parse_dates=True),
fnames)).drop_duplicates()
gaps = get_gaps(df, granularity)
if len(gaps) > 0:
print('found data gaps')
# fetch the data for each date
for start_date in gaps.groupby( | pd.Grouper(freq='1d') | pandas.Grouper |
"""The module for the Nampi data entry form parser.
Classes:
Nampi_data_entry_form
"""
import json
import logging
from datetime import date, datetime
from typing import List, Optional
import gspread
import pandas
from oauth2client.service_account import ServiceAccountCredentials
from pandas import Series
from rdflib.term import URIRef
from modules.appellation import Appellation, Appellation_type
from modules.appellation_assignment import Appellation_assignment
from modules.aspect import Aspect
from modules.author import Author
from modules.birth import Birth
from modules.burial import Burial
from modules.date import Date
from modules.death import Death
from modules.di_act import Di_act
from modules.event import Event
from modules.family import Family
from modules.gettypesandstati import GetTypesAndStati
# from modules.gender import Gender
from modules.group import Group
from modules.nampi_graph import Nampi_graph
from modules.nampi_type import Nampi_type
from modules.person import Person
from modules.place import Place
from modules.source import Source
from modules.source_location import Source_location
from modules.source_type import Source_type
from modules.title import Title
from parsers.nampi_by_josephis.classes.entity_importer_josephis import \
Entity_Importer_Josephis
from parsers.nampi_by_prodomo.classes.date import Dates
from parsers.nampi_data_entry_form.nampi_data_entry_form import (
Table, added_investiture_label, family_member_label)
_types = dict(
Geburt="Geburt",
Abt="Abt",
Beerdigung="Beerdigung",
Beziehung="Beziehung",
Bischof="Bischof",
Geschwister="Geschwister",
Konvent="Konvent",
Pfarrvikar="Pfarrvikar",
Priester="Priester",
Subdiakon="Subdiakon",
Taufe="Taufe",
Tod="Tod",
)
_group_types = {
"Christian denomination": Nampi_type.Mona.christian_denomination,
"Diocese": Nampi_type.Mona.diocese,
"Family": Nampi_type.Mona.family,
"Monastic community": Nampi_type.Mona.monastic_community,
"Parish": Nampi_type.Mona.parish,
"Polity": Nampi_type.Mona.polity,
"Religious denomination": Nampi_type.Mona.religious_denomination,
"Religious order": Nampi_type.Mona.religious_order,
"Religious polity": Nampi_type.Mona.religious_polity,
"Historic diocese": Nampi_type.Mona.historic_diocese,
}
_status_types = {
"Academic degree": Nampi_type.Mona.academic_degree,
"Clergy": Nampi_type.Mona.clergy,
"Community subsacristan": Nampi_type.Mona.community_subsacristan,
"Community superior": Nampi_type.Mona.community_superior,
"Member of a religious community": Nampi_type.Mona.member_of_a_religious_community,
"Member of a religious community with manual focus": Nampi_type.Mona.member_of_a_religious_community_with_manual_focus,
"Member of a religious community with spiritual focus": Nampi_type.Mona.member_of_a_religious_community_with_spiritual_focus,
"Procurator": Nampi_type.Mona.procurator,
"Professed member of a religious community": Nampi_type.Mona.professed_member_of_a_religious_community,
"Vice community superior": Nampi_type.Mona.vice_community_superior,
"Visitator": Nampi_type.Mona.visitator,
"Monastic office with spiritual focus": Nampi_type.Mona.monastic_office_with_spiritual_focus,
"Monastic office with manual focus": Nampi_type.Mona.monastic_office_with_manual_focus,
"Monastic office": Nampi_type.Mona.monastic_office,
"Member of a religious community visiting": Nampi_type.Mona.member_of_a_religious_community_visiting,
"Religious life outside a community": Nampi_type.Mona.religious_life_outside_a_community,
"Office in a diocese": Nampi_type.Mona.office_in_a_diocese,
"Secular office": Nampi_type.Mona.secular_office,
"Educator": Nampi_type.Mona.educator,
"Office": Nampi_type.Mona.office,
"Ruler of a school": Nampi_type.Mona.ruler_of_a_school,
"Status": Nampi_type.Core.status,
"Aspect": Nampi_type.Core.aspect,
"Unspecified aspect": Nampi_type.Mona.unspecified_aspect,
}
_occupation_types = {
"Administration of a community": Nampi_type.Mona.administration_of_a_community,
"Associated parish clergy": Nampi_type.Mona.associated_parish_clergy,
"Clergy": Nampi_type.Mona.clergy,
"Official": Nampi_type.Mona.official,
"Trade": Nampi_type.Mona.trade,
"Rule of a community": Nampi_type.Mona.rule_of_a_community,
"Monastic office": Nampi_type.Mona.monastic_office,
"Secular office": Nampi_type.Mona.secular_office,
"Office in a diocese": Nampi_type.Mona.office_in_a_diocese,
"Office": Nampi_type.Mona.office,
"Educator": Nampi_type.Mona.educator,
"Servant": Nampi_type.Mona.servant,
"Visitator": Nampi_type.Mona.visitator,
"Highly skilled professional": Nampi_type.Mona.highly_skilled_professional,
"Rule of a school": Nampi_type.Mona.rule_of_a_school,
"Occupation": Nampi_type.Core.occupation,
"Aspect": Nampi_type.Core.aspect,
"Unspecified aspect": Nampi_type.Mona.unspecified_aspect,
}
authors = ["<NAME>", "<NAME>", "<NAME>", "<NAME>", "<NAME>"]
def safe_str(row: Series, column: str) -> Optional[str]:
return str(row[column]) if column in row else None
Entities_dict = {}
class Nampi_data_entry_form_parser_josephis:
"""A parser that parses the NAMPI input tables and transforms the data to an RDF graph."""
_graph: Nampi_graph
_stati: {}
_occupation: {}
# Get all Entries from Group_Entities Spreadsheet
def getEntities(self):
logging.info("Getting Group_Entites")
# Extract and print all of the values
list_of_hashes = GetTypesAndStati("Josephis").getData()
print("--Start analyzing 'Josephis_Überarbeitungsformular_ASB' --")
i = 0
for val in list_of_hashes:
Entry = Entity_Importer_Josephis()
Entry.ExactCite = val["Exaktes Zitat"]
Entry.Enable = val["Aufnehmen (x)"].strip()
Entry.RelTitle = val["Religious title"]
Entry.Forename = val["Vorname(n)"]
Entry.Surename = val["Nachname"]
Entry.Deathdate = val["Todesdatum"]
Entry.Deathdateearly = val["Todesdatum (frühest)"]
Entry.Deathdatelate = val["Todesdatum (spätest)"]
Entry.Deathplace = val["Todesort"]
Entry.DeathplaceGeo = val["geonames Todesort (populated place)"]
Entry.IssuePlace = val["Wirkungsort"]
Entry.IssuePlacegeo = val["geonames Wirkungsort (populated place)"]
Entry.Community = val["Group/Community"]
Entry.Status = val["Status (mehrere durch % trennen)"]
Entry.Status_Nampi = val["Status_nampi (name)"]
Entry.Occupation = val["Occupation (mehrere durch % trennen)"]
Entry.Occupation_Nampi = val["Occupation_nampi (name)"]
Entry.Event = val["Eventdefinition_nampi (name)"]
Entry.Cite = val["Zitation (Jahr und Tagesdatum)"]
Entry.GND = val["GND"]
Entry.Comment = val["Kommentar"]
Entry.Source = val["Quellenangabe"]
Entities_dict[i] = Entry
i = i + 1
logging.info("Finished Getting Group_Entities")
print("--Ready with 'Josephis_Überarbeitungsformular_ASB' --")
def createJosephis(self):
print("-- Create entries --")
logging.info("-- Create entries --")
for index in Entities_dict:
Entry = Entity_Importer_Josephis()
Entry = Entities_dict[index]
# Just do stuff, if Enable == "X"
if Entry.Enable.upper() == "X":
# Person
Comment = Entry.Comment
persName = Entry.Forename + " " + Entry.Surename
print("Create entry: " + persName)
logging.info("-- Create entry: --" + persName)
person = self.__get_person(persName, Entry.GND)
if Comment:
person.add_comment(Comment)
# check if Deathdate is valid
date = Entry.Deathdate
datefirst = Entry.Deathdateearly
datelast = Entry.Deathdatelate
# Geburt / inits
family_names = Entry.Surename
date_value = ""
# Exaktes Datum ist vorhanden. Ansonsten letzt verfügbares Datum nehmen
if len(date) > 0:
date_value = date
elif len(datelast) > 0:
date_value = datelast
birth = Birth(
self._graph,
person,
latest_date=date_value,
family_name_label=family_names,
)
birth.add_text(Entry.ExactCite, "la")
self.__insert_di_act(
birth,
(),
authors,
Entry.Source,
Entry.Cite,
self._d1,
)
family = Family(self._graph, family_names)
aspect = Aspect(self._graph, family_names)
# Nur wenn Nachname gefüllt
if len(family_names) != 0:
become_member_event = Event(
self._graph,
person,
Nampi_type.Core.has_main_participant,
label="Become family member",
)
become_member_event.add_relationship(
Nampi_type.Core.adds_aspect, aspect
)
become_member_event.add_relationship(
Nampi_type.Core.changes_aspect_related_to, family
)
logging.debug("Added 'membership' in family ")
self.__insert_di_act(
become_member_event,
(),
authors,
Entry.Source,
Entry.Cite,
self._d1,
)
# Tod
# Exaktes Datum / Datum frühestens / Datum spätestens
death = self.add_deaths(
persName,
(),
Entry.Deathplace,
Entry.DeathplaceGeo,
Entry.Cite,
datefirst,
date,
datelast,
Entry.Source,
)
# Wenn Event vorhanden, schreiben
if death:
death.add_text(Entry.ExactCite, "la")
self.__insert_di_act(
death,
(),
authors,
Entry.Source,
Entry.Cite,
self._d1,
)
cite = Entry.Cite
# Titel
if Entry.RelTitle:
RelTitle = Event(
self._graph,
person,
latest_date=date
, label="Get title " + Entry.RelTitle
)
RelTitle.add_text(Entry.ExactCite, "la")
title = Title(
self._graph, Entry.RelTitle, Nampi_type.Mona.religious_title
)
RelTitle.add_relationship(
obj=person, pred=Nampi_type.Core.has_main_participant
)
RelTitle.add_relationship(
obj=title, pred=Nampi_type.Core.adds_aspect
)
self.__insert_di_act(
RelTitle,
(),
authors,
Entry.Source,
Entry.Cite,
self._d1,
)
# Inits
PlaceArray = ""
PlaceGeoArray = ""
GroupArray = ""
StatusArray = ""
StatusNampiArray = ""
OccupationArray = ""
OccupationNampiArray = ""
EventArray = ""
# Place
if Entry.IssuePlace.find("%"):
PlaceArray = Entry.IssuePlace.split("%")
if str(Entry.IssuePlacegeo).find("%"):
PlaceGeoArray = str(Entry.IssuePlacegeo).split("%")
# Community
if Entry.Community.find("%"):
GroupArray = Entry.Community.split("%")
# Status
if Entry.Status.find("%"):
StatusArray = Entry.Status.split("%")
# Status Nampi
if Entry.Status_Nampi.find("%"):
StatusNampiArray = Entry.Status_Nampi.split("%")
# Occupation
if Entry.Occupation.find("%"):
OccupationArray = Entry.Occupation.split("%")
# Occupation_Nampi
if Entry.Occupation_Nampi.find("%"):
OccupationNampiArray = Entry.Occupation_Nampi.split("%")
# Event
if Entry.Event.find("%"):
EventArray = Entry.Event.split("%")
for (i, val) in enumerate(GroupArray):
Group = self.__get_group(
"Monastic community",
GroupArray[i].replace('"', ""),
GroupArray[i].replace('"', ""),
)
if len(PlaceArray) > i and len(PlaceGeoArray) > i:
Place = self.__get_place(
PlaceArray[i].strip(), PlaceGeoArray[i].split(" ")[0]
)
strPlace = PlaceArray[i].strip()
elif len(PlaceArray) > 0 and len(PlaceGeoArray) > 0:
Place = self.__get_place(
PlaceArray[-1].strip(), PlaceGeoArray[-1].split(" ")[0]
)
strPlace = PlaceArray[-1].strip()
else:
Place = ""
strPlace = ""
if len(EventArray) > i:
varEvent = EventArray[i]
elif len(EventArray) > 0:
varEvent = EventArray[-1]
else:
varEvent = ""
if len(StatusArray) > i:
varStatus = StatusArray[i]
elif len(StatusArray) > 0:
varStatus = StatusArray[-1]
else:
varStatus = ""
if len(StatusNampiArray) > i:
varStatusNampi = StatusNampiArray[i]
elif len(StatusNampiArray) > 0:
varStatusNampi = StatusNampiArray[-1]
else:
varStatusNampi = ""
varStatusNampi = varStatusNampi.strip()
if len(OccupationArray) > i is not None:
varOccupation = OccupationArray[i]
elif len(OccupationArray) > 0:
varOccupation = OccupationArray[-1]
if len(OccupationNampiArray) > i is not None:
varOccupation_Nampi = OccupationNampiArray[i]
elif len(OccupationNampiArray) > 0:
varOccupation_Nampi = OccupationNampiArray[-1]
if len(varStatusNampi.strip()) > 0:
if self._stati.getValues()[varStatusNampi.strip()]["Type"]:
type = self._stati.getValues()[varStatusNampi.strip()][
"Type"
]
varStatusType = _status_types[type]
# if self._occupation.getValues()[varStatusNampi.strip()]["Type"]:
# type = self._stati.getValues()[varOccupation_Nampi.strip()]["Type"]
# varOccupationType = _occupation_types[type]
event = None
if len(date) > 0:
event = Event(
self._graph,
person,
Nampi_type.Core.has_main_participant,
varEvent,
(),
Place,
latest_date=date,
)
event.add_text(Entry.ExactCite, "la")
elif len(datelast) > 0:
event = Event(
self._graph,
person,
Nampi_type.Core.has_main_participant,
varEvent,
(),
Place,
earliest_date=datefirst,
latest_date=datelast,
)
event.add_text(Entry.ExactCite, "la")
aspect_label = ""
occupation_type = ""
if (
varStatusNampi or varOccupation_Nampi
) and varStatusNampi == varOccupation_Nampi:
aspect_label == varOccupation_Nampi
status_type = varStatusType
occupation_type = "" # varOccupationType
types: List[URIRef] = []
if status_type:
types.append(status_type)
if occupation_type:
types.append(occupation_type)
if event is not None:
aspect = Aspect(self._graph, aspect_label, types)
event.add_relationship(
obj=person, pred=Nampi_type.Core.has_main_participant
)
if Group:
event.add_relationship(
obj=Group,
pred=Nampi_type.Core.changes_aspect_related_to,
)
event.add_relationship(
obj=aspect, pred=Nampi_type.Core.adds_aspect
)
else:
if len(varStatusNampi.strip()) > 0:
status_type = varStatusType
aspect_label == varOccupation_Nampi
if event is not None:
if status_type is None:
status_type = Nampi_type.Core.aspect
aspect = Aspect(
self._graph, varStatusNampi, status_type
)
event.add_relationship(
obj=person,
pred=Nampi_type.Core.has_main_participant,
)
if Group:
event.add_relationship(
obj=Group,
pred=Nampi_type.Core.changes_aspect_related_to,
)
event.add_relationship(
obj=aspect, pred=Nampi_type.Core.adds_aspect
)
elif varOccupation_Nampi:
occupation_type = "" # varOccupationType
if event is not None:
aspect = Aspect(
self._graph, varOccupation_Nampi, occupation_type
)
event.add_relationship(
obj=person,
pred=Nampi_type.Core.has_main_participant,
)
if Group:
event.add_relationship(
obj=Group,
pred=Nampi_type.Core.changes_aspect_related_to,
)
event.add_relationship(
obj=aspect, pred=Nampi_type.Core.adds_aspect
)
if event:
self.__insert_di_act(
event,
(),
authors,
Entry.Source,
cite,
self._d1,
)
print("Create entry: " + persName + " ready")
def __init__(self, graph: Nampi_graph):
"""Initialize the class.
Parameters:
graph: The data graph.
"""
self._graph = graph
today = date.today()
self._d1 = today.strftime("%Y-%m-%d")
self._stati = GetTypesAndStati("Statuses")
self._occu = GetTypesAndStati("Occupations")
self.getEntities()
def add_deaths(
self,
singleperson,
deathday,
deathplace,
deathgeo,
cite,
deathearlist,
deathexact,
deathlatest,
source,
):
"""
Add all death events from the deaths table.
"""
died_person = self.__get_person(singleperson, ())
if not died_person:
return
death_place = self.__get_place(deathplace.strip(), deathgeo)
if len(deathday) > 0:
death = Death(self._graph, died_person, death_place, exact_date=deathexact)
elif len(deathlatest) > 0:
death = Death(
self._graph,
died_person,
death_place,
earliest_date=deathearlist,
latest_date=deathlatest,
)
else:
death = None
if death:
self.__insert_di_act(
death,
(),
authors,
source,
cite,
self._d1,
)
logging.info("Parsed the deaths")
def __get_group(
self, group_type_desc, group_label: Optional[str], part_of_label
) -> Optional[Group]:
if not group_label:
return None
group_type_label = group_label
group_type = _group_types[group_type_desc]
part_of_group = (
self.__get_group(part_of_label, (), ()) if part_of_label else None
)
group = Group(self._graph, group_label, group_type)
if part_of_group:
group.add_relationship(Nampi_type.Core.is_part_of, part_of_group)
return group
def __get_person(
self, person_label: Optional[str], gnd_id: Optional[str]
) -> Optional[Person]:
if gnd_id and str(gnd_id).upper() != "KEINE":
if str(gnd_id).find(" "):
gnd_split = str(gnd_id).split(" ")
gnd_id_split = gnd_split[0]
gnd = "http://d-nb.info/gnd/" + str(gnd_id_split).strip()
else:
gnd = "http://d-nb.info/gnd/" + str(gnd_id).strip()
else:
gnd = ""
gender = ""
return Person.optional(self._graph, person_label, gender, gnd)
def __get_place(
self, place_label: Optional[str], geoid: Optional[str]
) -> Optional[Place]:
if geoid:
geoname_id = str(geoid).strip()
else:
geoname_id = ""
place_label = place_label.replace('"', "")
return Place.optional(self._graph, place_label.strip(), geoname_id, "")
def __get_source_location(
self, source_label: str, location_text: Optional[str]
) -> Source_location:
source_type_text = "Manuscript"
source_type = None
if source_type_text == "Manuscript":
source_type = Source_type.MANUSCRIPT
elif source_type_text == "Online Resource":
source_type = Source_type.ONLINE_RESOURCE
if not source_type:
raise ValueError(
"Could not find source type for '{}'".format(source_type_text)
)
source = Source(self._graph, source_label, source_type)
if len(location_text) > 0:
return Source_location(self._graph, source, location_text)
else:
return source
def __insert_di_act(
self,
event: Event,
row: Series = | pandas.Series() | pandas.Series |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
tm.assert_series_equal(result, exp)
pytest.raises(ValueError, values.str.match, '.*(BAD[_]+).*(BAD)',
as_indexer=False)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = Series([True, NA, True, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract_expand_None(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
def test_extract_expand_unspecified(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)')
def test_extract_expand_False(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er, er, er,
er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(['A1', 'A2', 'A3', 'A4', 'B5'])
with tm.assert_raises_regex(ValueError, "supported"):
idx.str.extract('([AB])([123])', expand=False)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=False)
assert result.name == 'uno'
exp = klass(['A', 'A'], name='uno')
if klass == Series:
tm.assert_series_equal(result, exp)
else:
tm.assert_index_equal(result, exp)
s = Series(['A1', 'B2', 'C3'])
# one group, no matches
result = s.str.extract('(_)', expand=False)
exp = Series([NA, NA, NA], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=False)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=False)
exp = Series(['A', 'B', NA], name='letter')
tm.assert_series_equal(result, exp)
# two named groups
result = s.str.extract('(?P<letter>[AB])(?P<number>[123])',
expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, '3']],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
s = Series(data, index=index)
result = s.str.extract(r'(\d)', expand=False)
exp = Series(['1', '2', NA], index=index)
tm.assert_series_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=False)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
# single_series_name_is_preserved.
s = Series(['a3', 'b3', 'c2'], name='bob')
r = s.str.extract(r'(?P<sue>[a-z])', expand=False)
e = Series(['a', 'b', 'c'], name='sue')
tm.assert_series_equal(r, e)
assert r.name == e.name
def test_extract_expand_True(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er,
er, er, er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=True)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=True)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result_df = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=True)
assert isinstance(result_df, DataFrame)
result_series = result_df['uno']
assert_series_equal(result_series, Series(['A', 'A'], name='uno'))
def test_extract_series(self):
# extract should give the same result whether or not the
# series has a name.
for series_name in None, "series_name":
s = Series(['A1', 'B2', 'C3'], name=series_name)
# one group, no matches
result = s.str.extract('(_)', expand=True)
exp = DataFrame([NA, NA, NA], dtype=object)
tm.assert_frame_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=True)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=True)
exp = DataFrame({"letter": ['A', 'B', NA]})
tm.assert_frame_equal(result, exp)
# two named groups
result = s.str.extract(
'(?P<letter>[AB])(?P<number>[123])',
expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=True)
exp = DataFrame(e_list, columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
def test_extract_optional_groups(self):
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, '3']
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
result = Series(data, index=index).str.extract(
r'(\d)', expand=True)
exp = DataFrame(['1', '2', NA], index=index)
tm.assert_frame_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
def test_extract_single_group_returns_frame(self):
# GH11386 extract should always return DataFrame, even when
# there is only one group. Prior to v0.18.0, extract returned
# Series when there was only one group in the regex.
s = Series(['a3', 'b3', 'c2'], name='series_name')
r = s.str.extract(r'(?P<letter>[a-z])', expand=True)
e = DataFrame({"letter": ['a', 'b', 'c']})
tm.assert_frame_equal(r, e)
def test_extractall(self):
subject_list = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL> some text <EMAIL>',
'<EMAIL> some text c@d.<EMAIL> and <EMAIL>',
np.nan,
"",
]
expected_tuples = [
("dave", "google", "com"),
("tdhock5", "gmail", "com"),
("maudelaperriere", "gmail", "com"),
("rob", "gmail", "com"), ("steve", "gmail", "com"),
("a", "b", "com"), ("c", "d", "com"), ("e", "f", "com"),
]
named_pattern = r"""
(?P<user>[a-z0-9]+)
@
(?P<domain>[a-z]+)
\.
(?P<tld>[a-z]{2,4})
"""
expected_columns = ["user", "domain", "tld"]
S = Series(subject_list)
# extractall should return a DataFrame with one row for each
# match, indexed by the subject from which the match came.
expected_index = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(3, 0),
(3, 1),
(4, 0),
(4, 1),
(4, 2),
], names=(None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = S.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# The index of the input Series should be used to construct
# the index of the output DataFrame:
series_index = MultiIndex.from_tuples([
("single", "Dave"),
("single", "Toby"),
("single", "Maude"),
("multiple", "robAndSteve"),
("multiple", "abcdef"),
("none", "missing"),
("none", "empty"),
])
Si = Series(subject_list, series_index)
expected_index = MultiIndex.from_tuples([
("single", "Dave", 0),
("single", "Toby", 0),
("single", "Maude", 0),
("multiple", "robAndSteve", 0),
("multiple", "robAndSteve", 1),
("multiple", "abcdef", 0),
("multiple", "abcdef", 1),
("multiple", "abcdef", 2),
], names=(None, None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Si.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# MultiIndexed subject with names.
Sn = Series(subject_list, series_index)
Sn.index.names = ("matches", "description")
expected_index.names = ("matches", "description", "match")
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Sn.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# optional groups.
subject_list = ['', 'A1', '32']
named_pattern = '(?P<letter>[AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(named_pattern)
expected_index = MultiIndex.from_tuples([
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=['letter', 'number'])
tm.assert_frame_equal(computed_df, expected_df)
# only one of two groups has a name.
pattern = '([AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(pattern)
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=[0, 'number'])
tm.assert_frame_equal(computed_df, expected_df)
def test_extractall_single_group(self):
# extractall(one named group) returns DataFrame with one named
# column.
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
r = s.str.extractall(r'(?P<letter>[a-z])')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame({"letter": ['a', 'b', 'd', 'c']}, i)
tm.assert_frame_equal(r, e)
# extractall(one un-named group) returns DataFrame with one
# un-named column.
r = s.str.extractall(r'([a-z])')
e = DataFrame(['a', 'b', 'd', 'c'], i)
tm.assert_frame_equal(r, e)
def test_extractall_single_group_with_quantifier(self):
# extractall(one un-named group with quantifier) returns
# DataFrame with one un-named column (GH13382).
s = Series(['ab3', 'abc3', 'd4cd2'], name='series_name')
r = s.str.extractall(r'([a-z]+)')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame(['ab', 'abc', 'd', 'cd'], i)
tm.assert_frame_equal(r, e)
def test_extractall_no_matches(self):
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
# one un-named group.
r = s.str.extractall('(z)')
e = DataFrame(columns=[0])
tm.assert_frame_equal(r, e)
# two un-named groups.
r = s.str.extractall('(z)(z)')
e = DataFrame(columns=[0, 1])
tm.assert_frame_equal(r, e)
# one named group.
r = s.str.extractall('(?P<first>z)')
e = DataFrame(columns=["first"])
tm.assert_frame_equal(r, e)
# two named groups.
r = s.str.extractall('(?P<first>z)(?P<second>z)')
e = DataFrame(columns=["first", "second"])
tm.assert_frame_equal(r, e)
# one named, one un-named.
r = s.str.extractall('(z)(?P<second>z)')
e = DataFrame(columns=[0,
"second"])
tm.assert_frame_equal(r, e)
def test_extractall_stringindex(self):
s = Series(["a1a2", "b1", "c1"], name='xxx')
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)],
names=[None, 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
# index should return the same result as the default index without name
# thus index.name doesn't affect to the result
for idx in [Index(["a1a2", "b1", "c1"]),
Index(["a1a2", "b1", "c1"], name='xxx')]:
res = idx.str.extractall(r"[ab](?P<digit>\d)")
tm.assert_frame_equal(res, exp)
s = Series(["a1a2", "b1", "c1"], name='s_name',
index=Index(["XX", "yy", "zz"], name='idx_name'))
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([("XX", 0), ("XX", 1), ("yy", 0)],
names=["idx_name", 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
def test_extractall_errors(self):
# Does not make sense to use extractall with a regex that has
# no capture groups. (it returns DataFrame with one column for
# each capture group)
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
with tm.assert_raises_regex(ValueError, "no capture groups"):
s.str.extractall(r'[a-z]')
def test_extract_index_one_two_groups(self):
s = Series(['a3', 'b3', 'd4c2'], index=["A3", "B3", "D4"],
name='series_name')
r = s.index.str.extract(r'([A-Z])', expand=True)
e = DataFrame(['A', "B", "D"])
tm.assert_frame_equal(r, e)
# Prior to v0.18.0, index.str.extract(regex with one group)
# returned Index. With more than one group, extract raised an
# error (GH9980). Now extract always returns DataFrame.
r = s.index.str.extract(
r'(?P<letter>[A-Z])(?P<digit>[0-9])', expand=True)
e_list = [
("A", "3"),
("B", "3"),
("D", "4"),
]
e = DataFrame(e_list, columns=["letter", "digit"])
tm.assert_frame_equal(r, e)
def test_extractall_same_as_extract(self):
s = Series(['a3', 'b3', 'c2'], name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_multi_index = s.str.extractall(pattern_two_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_multi_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_multi_index = s.str.extractall(pattern_two_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_multi_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_multi_index = s.str.extractall(pattern_one_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_multi_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_multi_index = s.str.extractall(pattern_one_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_multi_index)
def test_extractall_same_as_extract_subject_index(self):
# same as above tests, but s has an MultiIndex.
i = MultiIndex.from_tuples([
("A", "first"),
("B", "second"),
("C", "third"),
], names=("capital", "ordinal"))
s = Series(['a3', 'b3', 'c2'], i, name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_match_index = s.str.extractall(pattern_two_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_match_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_match_index = s.str.extractall(pattern_two_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_match_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_match_index = s.str.extractall(pattern_one_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_match_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_match_index = s.str.extractall(pattern_one_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_match_index)
def test_empty_str_methods(self):
empty_str = empty = Series(dtype=object)
empty_int = Series(dtype=int)
empty_bool = Series(dtype=bool)
empty_bytes = Series(dtype=object)
# GH7241
# (extract) on empty series
tm.assert_series_equal(empty_str, empty.str.cat(empty))
assert '' == empty.str.cat()
tm.assert_series_equal(empty_str, empty.str.title())
tm.assert_series_equal(empty_int, empty.str.count('a'))
tm.assert_series_equal(empty_bool, empty.str.contains('a'))
tm.assert_series_equal(empty_bool, empty.str.startswith('a'))
tm.assert_series_equal(empty_bool, empty.str.endswith('a'))
tm.assert_series_equal(empty_str, empty.str.lower())
tm.assert_series_equal(empty_str, empty.str.upper())
tm.assert_series_equal(empty_str, empty.str.replace('a', 'b'))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
tm.assert_series_equal(empty_bool, empty.str.match('^a'))
tm.assert_frame_equal(
DataFrame(columns=[0], dtype=str),
empty.str.extract('()', expand=True))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=True))
tm.assert_series_equal(
empty_str,
empty.str.extract('()', expand=False))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=False))
tm.assert_frame_equal(DataFrame(dtype=str), empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_str.str.join(''))
tm.assert_series_equal(empty_int, empty.str.len())
tm.assert_series_equal(empty_str, empty_str.str.findall('a'))
tm.assert_series_equal(empty_int, empty.str.find('a'))
tm.assert_series_equal(empty_int, empty.str.rfind('a'))
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
tm.assert_series_equal(empty_str, empty.str.split('a'))
tm.assert_series_equal(empty_str, empty.str.rsplit('a'))
tm.assert_series_equal(empty_str,
empty.str.partition('a', expand=False))
tm.assert_series_equal(empty_str,
empty.str.rpartition('a', expand=False))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
tm.assert_series_equal(empty_str, empty.str.slice(step=1))
tm.assert_series_equal(empty_str, empty.str.strip())
tm.assert_series_equal(empty_str, empty.str.lstrip())
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.wrap(42))
tm.assert_series_equal(empty_str, empty.str.get(0))
tm.assert_series_equal(empty_str, empty_bytes.str.decode('ascii'))
tm.assert_series_equal(empty_bytes, empty.str.encode('ascii'))
tm.assert_series_equal(empty_str, empty.str.isalnum())
tm.assert_series_equal(empty_str, empty.str.isalpha())
tm.assert_series_equal(empty_str, empty.str.isdigit())
tm.assert_series_equal(empty_str, empty.str.isspace())
tm.assert_series_equal(empty_str, empty.str.islower())
tm.assert_series_equal(empty_str, empty.str.isupper())
tm.assert_series_equal(empty_str, empty.str.istitle())
tm.assert_series_equal(empty_str, empty.str.isnumeric())
tm.assert_series_equal(empty_str, empty.str.isdecimal())
tm.assert_series_equal(empty_str, empty.str.capitalize())
tm.assert_series_equal(empty_str, empty.str.swapcase())
tm.assert_series_equal(empty_str, empty.str.normalize('NFC'))
if compat.PY3:
table = str.maketrans('a', 'b')
else:
import string
table = string.maketrans('a', 'b')
tm.assert_series_equal(empty_str, empty.str.translate(table))
def test_empty_str_methods_to_frame(self):
empty = Series(dtype=str)
empty_df = DataFrame([])
tm.assert_frame_equal(empty_df, empty.str.partition('a'))
tm.assert_frame_equal(empty_df, empty.str.rpartition('a'))
def test_ismethods(self):
values = ['A', 'b', 'Xy', '4', '3A', '', 'TT', '55', '-', ' ']
str_s = Series(values)
alnum_e = [True, True, True, True, True, False, True, True, False,
False]
alpha_e = [True, True, True, False, False, False, True, False, False,
False]
digit_e = [False, False, False, True, False, False, False, True, False,
False]
# TODO: unused
num_e = [False, False, False, True, False, False, # noqa
False, True, False, False]
space_e = [False, False, False, False, False, False, False, False,
False, True]
lower_e = [False, True, False, False, False, False, False, False,
False, False]
upper_e = [True, False, False, False, True, False, True, False, False,
False]
title_e = [True, False, True, False, True, False, False, False, False,
False]
tm.assert_series_equal(str_s.str.isalnum(), Series(alnum_e))
tm.assert_series_equal(str_s.str.isalpha(), Series(alpha_e))
tm.assert_series_equal(str_s.str.isdigit(), Series(digit_e))
tm.assert_series_equal(str_s.str.isspace(), Series(space_e))
tm.assert_series_equal(str_s.str.islower(), Series(lower_e))
tm.assert_series_equal(str_s.str.isupper(), Series(upper_e))
tm.assert_series_equal(str_s.str.istitle(), Series(title_e))
assert str_s.str.isalnum().tolist() == [v.isalnum() for v in values]
assert str_s.str.isalpha().tolist() == [v.isalpha() for v in values]
assert str_s.str.isdigit().tolist() == [v.isdigit() for v in values]
assert str_s.str.isspace().tolist() == [v.isspace() for v in values]
assert str_s.str.islower().tolist() == [v.islower() for v in values]
assert str_s.str.isupper().tolist() == [v.isupper() for v in values]
assert str_s.str.istitle().tolist() == [v.istitle() for v in values]
def test_isnumeric(self):
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
values = ['A', '3', u'¼', u'★', u'፸', u'3', 'four']
s = Series(values)
numeric_e = [False, True, True, False, True, True, False]
decimal_e = [False, True, False, False, False, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
unicodes = [u'A', u'3', u'¼', u'★', u'፸', u'3', u'four']
assert s.str.isnumeric().tolist() == [v.isnumeric() for v in unicodes]
assert s.str.isdecimal().tolist() == [v.isdecimal() for v in unicodes]
values = ['A', np.nan, u'¼', u'★', np.nan, u'3', 'four']
s = Series(values)
numeric_e = [False, np.nan, True, False, np.nan, True, False]
decimal_e = [False, np.nan, False, False, np.nan, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
def test_get_dummies(self):
s = Series(['a|b', 'a|c', np.nan])
result = s.str.get_dummies('|')
expected = DataFrame([[1, 1, 0], [1, 0, 1], [0, 0, 0]],
columns=list('abc'))
tm.assert_frame_equal(result, expected)
s = Series(['a;b', 'a', 7])
result = s.str.get_dummies(';')
expected = DataFrame([[0, 1, 1], [0, 1, 0], [1, 0, 0]],
columns=list('7ab'))
tm.assert_frame_equal(result, expected)
# GH9980, GH8028
idx = Index(['a|b', 'a|c', 'b|c'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0), (1, 0, 1),
(0, 1, 1)], names=('a', 'b', 'c'))
tm.assert_index_equal(result, expected)
def test_get_dummies_with_name_dummy(self):
# GH 12180
# Dummies named 'name' should work as expected
s = Series(['a', 'b,name', 'b'])
result = s.str.get_dummies(',')
expected = DataFrame([[1, 0, 0], [0, 1, 1], [0, 1, 0]],
columns=['a', 'b', 'name'])
tm.assert_frame_equal(result, expected)
idx = Index(['a|b', 'name|c', 'b|name'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0, 0), (0, 0, 1, 1),
(0, 1, 0, 1)],
names=('a', 'b', 'c', 'name'))
tm.assert_index_equal(result, expected)
def test_join(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.split('_').str.join('_')
xp = Series(['a_b', NA, 'asdf_cas_asdf', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan, u('f_g_h')])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
def test_len(self):
values = Series(['foo', 'fooo', 'fooooo', np.nan, 'fooooooo'])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.len()
xp = Series([3, NA, 13, NA, NA, 3, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('foo'), u('fooo'), u('fooooo'), np.nan, u(
'fooooooo')])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
def test_findall(self):
values = Series(['fooBAD__barBAD', NA, 'foo', 'BAD'])
result = values.str.findall('BAD[_]*')
exp = Series([['BAD__', 'BAD'], NA, [], ['BAD']])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['fooBAD__barBAD', NA, 'foo', True, datetime.today(),
'BAD', None, 1, 2.])
rs = Series(mixed).str.findall('BAD[_]*')
xp = Series([['BAD__', 'BAD'], NA, [], NA, NA, ['BAD'], NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo'), u('BAD')])
result = values.str.findall('BAD[_]*')
exp = Series([[u('BAD__'), u('BAD')], NA, [], [u('BAD')]])
tm.assert_almost_equal(result, exp)
def test_find(self):
values = Series(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF', 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, 3, 1, 0, -1]))
expected = np.array([v.find('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, 3, 7, 4, -1]))
expected = np.array([v.find('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.find('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.rfind('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.find(0)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.rfind(0)
def test_find_nan(self):
values = Series(['ABCDEFG', np.nan, 'DEFGHIJEF', np.nan, 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, np.nan, 1, np.nan, -1]))
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
def test_index(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF'])
result = s.str.index('EF')
_check(result, klass([4, 3, 1, 0]))
expected = np.array([v.index('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF')
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('EF', 3)
_check(result, klass([4, 3, 7, 4]))
expected = np.array([v.index('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF', 3)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('E', 4, 8)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.index('E', 4, 8) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('E', 0, 5)
_check(result, klass([4, 3, 1, 4]))
expected = np.array([v.rindex('E', 0, 5) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(ValueError,
"substring not found"):
result = s.str.index('DE')
with tm.assert_raises_regex(TypeError,
"expected a string "
"object, not int"):
result = s.str.index(0)
# test with nan
s = Series(['abcb', 'ab', 'bcbe', np.nan])
result = s.str.index('b')
tm.assert_series_equal(result, Series([1, 1, 0, np.nan]))
result = s.str.rindex('b')
tm.assert_series_equal(result, Series([3, 1, 2, np.nan]))
def test_pad(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left')
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='left')
xp = Series([' a', NA, ' b', NA, NA, ' ee', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='right')
xp = Series(['a ', NA, 'b ', NA, NA, 'ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='both')
xp = Series([' a ', NA, ' b ', NA, NA, ' ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.pad(5, side='left')
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_pad_fillchar(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left', fillchar='X')
exp = Series(['XXXXa', 'XXXXb', NA, 'XXXXc', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right', fillchar='X')
exp = Series(['aXXXX', 'bXXXX', NA, 'cXXXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both', fillchar='X')
exp = Series(['XXaXX', 'XXbXX', NA, 'XXcXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.pad(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.pad(5, fillchar=5)
def test_pad_width(self):
# GH 13598
s = Series(['1', '22', 'a', 'bb'])
for f in ['center', 'ljust', 'rjust', 'zfill', 'pad']:
with tm.assert_raises_regex(TypeError,
"width must be of "
"integer type, not*"):
getattr(s.str, f)('f')
def test_translate(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['abcdefg', 'abcc', 'cdddfg', 'cdefggg'])
if not compat.PY3:
import string
table = string.maketrans('abc', 'cde')
else:
table = str.maketrans('abc', 'cde')
result = s.str.translate(table)
expected = klass(['cdedefg', 'cdee', 'edddfg', 'edefggg'])
_check(result, expected)
# use of deletechars is python 2 only
if not compat.PY3:
result = s.str.translate(table, deletechars='fg')
expected = klass(['cdede', 'cdee', 'eddd', 'ede'])
_check(result, expected)
result = s.str.translate(None, deletechars='fg')
expected = klass(['abcde', 'abcc', 'cddd', 'cde'])
_check(result, expected)
else:
with tm.assert_raises_regex(
ValueError, "deletechars is not a valid argument"):
result = s.str.translate(table, deletechars='fg')
# Series with non-string values
s = Series(['a', 'b', 'c', 1.2])
expected = Series(['c', 'd', 'e', np.nan])
result = s.str.translate(table)
tm.assert_series_equal(result, expected)
def test_center_ljust_rjust(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.center(5)
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'c', 'eee', None,
1, 2.])
rs = Series(mixed).str.center(5)
xp = Series([' a ', NA, ' b ', NA, NA, ' c ', ' eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.ljust(5)
xp = Series(['a ', NA, 'b ', NA, NA, 'c ', 'eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rjust(5)
xp = Series([' a', NA, ' b', NA, NA, ' c', ' eee', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.center(5)
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_center_ljust_rjust_fillchar(self):
values = Series(['a', 'bb', 'cccc', 'ddddd', 'eeeeee'])
result = values.str.center(5, fillchar='X')
expected = Series(['XXaXX', 'XXbbX', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.center(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.ljust(5, fillchar='X')
expected = Series(['aXXXX', 'bbXXX', 'ccccX', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.ljust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rjust(5, fillchar='X')
expected = Series(['XXXXa', 'XXXbb', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.rjust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
# If fillchar is not a charatter, normal str raises TypeError
# 'aaa'.ljust(5, 'XY')
# TypeError: must be char, not str
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.center(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.ljust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.rjust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.center(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.ljust(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.rjust(5, fillchar=1)
def test_zfill(self):
values = Series(['1', '22', 'aaa', '333', '45678'])
result = values.str.zfill(5)
expected = Series(['00001', '00022', '00aaa', '00333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(5) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.zfill(3)
expected = Series(['001', '022', 'aaa', '333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(3) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
values = Series(['1', np.nan, 'aaa', np.nan, '45678'])
result = values.str.zfill(5)
expected = Series(['00001', np.nan, '00aaa', np.nan, '45678'])
tm.assert_series_equal(result, expected)
def test_split(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.split('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.split('__')
tm.assert_series_equal(result, exp)
result = values.str.split('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.split('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.split('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.split('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.split('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.split('[,_]')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
def test_rsplit(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.rsplit('__')
tm.assert_series_equal(result, exp)
result = values.str.rsplit('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.rsplit('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.rsplit('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.rsplit('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.rsplit('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split is not supported by rsplit
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.rsplit('[,_]')
exp = Series([[u('a,b_c')], [u('c_d,e')], NA, [u('f,g,h')]])
tm.assert_series_equal(result, exp)
# setting max number of splits, make sure it's from reverse
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_', n=1)
exp = Series([['a_b', 'c'], ['c_d', 'e'], NA, ['f_g', 'h']])
tm.assert_series_equal(result, exp)
def test_split_noargs(self):
# #1859
s = Series(['<NAME>', '<NAME>'])
result = s.str.split()
expected = ['Travis', 'Oliphant']
assert result[1] == expected
result = s.str.rsplit()
assert result[1] == expected
def test_split_maxsplit(self):
# re.split 0, str.split -1
s = Series(['bd asdf jfg', 'kjasdflqw asdfnfk'])
result = s.str.split(n=-1)
xp = s.str.split()
tm.assert_series_equal(result, xp)
result = s.str.split(n=0)
tm.assert_series_equal(result, xp)
xp = s.str.split('asdf')
result = s.str.split('asdf', n=0)
tm.assert_series_equal(result, xp)
result = s.str.split('asdf', n=-1)
tm.assert_series_equal(result, xp)
def test_split_no_pat_with_nonzero_n(self):
s = Series(['split once', 'split once too!'])
result = s.str.split(n=1)
expected = Series({0: ['split', 'once'], 1: ['split', 'once too!']})
tm.assert_series_equal(expected, result, check_index_type=False)
def test_split_to_dataframe(self):
s = Series(['nosplit', 'alsonosplit'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])})
tm.assert_frame_equal(result, exp)
s = Series(['some_equal_splits', 'with_no_nans'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
s = Series(['some_unequal_splits', 'one_of_these_things_is_not'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'one'],
1: ['unequal', 'of'],
2: ['splits', 'these'],
3: [NA, 'things'],
4: [NA, 'is'],
5: [NA, 'not']})
tm.assert_frame_equal(result, exp)
s = Series(['some_splits', 'with_index'], index=['preserve', 'me'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'with'], 1: ['splits', 'index']},
index=['preserve', 'me'])
tm.assert_frame_equal(result, exp)
with tm.assert_raises_regex(ValueError, "expand must be"):
s.str.split('_', expand="not_a_boolean")
def test_split_to_multiindex_expand(self):
idx = Index(['nosplit', 'alsonosplit'])
result = idx.str.split('_', expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.split('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'equal', 'splits'), (
'with', 'no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(['some_unequal_splits', 'one_of_these_things_is_not'])
result = idx.str.split('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'unequal', 'splits', NA, NA, NA
), ('one', 'of', 'these', 'things',
'is', 'not')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 6
with tm.assert_raises_regex(ValueError, "expand must be"):
idx.str.split('_', expand="not_a_boolean")
def test_rsplit_to_dataframe_expand(self):
s = Series(['nosplit', 'alsonosplit'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])})
tm.assert_frame_equal(result, exp)
s = Series(['some_equal_splits', 'with_no_nans'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
result = s.str.rsplit('_', expand=True, n=2)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
result = s.str.rsplit('_', expand=True, n=1)
exp = DataFrame({0: ['some_equal', 'with_no'], 1: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
s = Series(['some_splits', 'with_index'], index=['preserve', 'me'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: ['some', 'with'], 1: ['splits', 'index']},
index=['preserve', 'me'])
tm.assert_frame_equal(result, exp)
def test_rsplit_to_multiindex_expand(self):
idx = Index(['nosplit', 'alsonosplit'])
result = idx.str.rsplit('_', expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.rsplit('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'equal', 'splits'), (
'with', 'no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.rsplit('_', expand=True, n=1)
exp = MultiIndex.from_tuples([('some_equal', 'splits'),
('with_no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 2
def test_split_nan_expand(self):
# gh-18450
s = Series(["foo,bar,baz", NA])
result = s.str.split(",", expand=True)
exp = DataFrame([["foo", "bar", "baz"], [NA, NA, NA]])
tm.assert_frame_equal(result, exp)
# check that these are actually np.nan and not None
# TODO see GH 18463
# tm.assert_frame_equal does not differentiate
assert all(np.isnan(x) for x in result.iloc[1])
def test_split_with_name(self):
# GH 12617
# should preserve name
s = Series(['a,b', 'c,d'], name='xxx')
res = s.str.split(',')
exp = Series([['a', 'b'], ['c', 'd']], name='xxx')
tm.assert_series_equal(res, exp)
res = s.str.split(',', expand=True)
exp = DataFrame([['a', 'b'], ['c', 'd']])
tm.assert_frame_equal(res, exp)
idx = Index(['a,b', 'c,d'], name='xxx')
res = idx.str.split(',')
exp = Index([['a', 'b'], ['c', 'd']], name='xxx')
assert res.nlevels == 1
tm.assert_index_equal(res, exp)
res = idx.str.split(',', expand=True)
exp = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')])
assert res.nlevels == 2
tm.assert_index_equal(res, exp)
def test_partition_series(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Series([('a', '_', 'b_c'), ('c', '_', 'd_e'), NA,
('f', '_', 'g_h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([('a_b', '_', 'c'), ('c_d', '_', 'e'), NA,
('f_g', '_', 'h')])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.partition('__', expand=False)
exp = Series([('a', '__', 'b__c'), ('c', '__', 'd__e'), NA,
('f', '__', 'g__h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('__', expand=False)
exp = Series([('a__b', '__', 'c'), ('c__d', '__', 'e'), NA,
('f__g', '__', 'h')])
tm.assert_series_equal(result, exp)
# None
values = Series(['a b c', 'c d e', NA, 'f g h'])
result = values.str.partition(expand=False)
exp = Series([('a', ' ', 'b c'), ('c', ' ', 'd e'), NA,
('f', ' ', 'g h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition(expand=False)
exp = Series([('a b', ' ', 'c'), ('c d', ' ', 'e'), NA,
('f g', ' ', 'h')])
tm.assert_series_equal(result, exp)
# Not splited
values = Series(['abc', 'cde', NA, 'fgh'])
result = values.str.partition('_', expand=False)
exp = Series([('abc', '', ''), ('cde', '', ''), NA, ('fgh', '', '')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([('', '', 'abc'), ('', '', 'cde'), NA, ('', '', 'fgh')])
tm.assert_series_equal(result, exp)
# unicode
values = Series([u'a_b_c', u'c_d_e', NA, u'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Series([(u'a', u'_', u'b_c'), (u'c', u'_', u'd_e'),
NA, (u'f', u'_', u'g_h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([(u'a_b', u'_', u'c'), (u'c_d', u'_', u'e'),
NA, (u'f_g', u'_', u'h')])
tm.assert_series_equal(result, exp)
# compare to standard lib
values = Series(['A_B_C', 'B_C_D', 'E_F_G', 'EFGHEF'])
result = values.str.partition('_', expand=False).tolist()
assert result == [v.partition('_') for v in values]
result = values.str.rpartition('_', expand=False).tolist()
assert result == [v.rpartition('_') for v in values]
def test_partition_index(self):
values = Index(['a_b_c', 'c_d_e', 'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Index(np.array([('a', '_', 'b_c'), ('c', '_', 'd_e'), ('f', '_',
'g_h')]))
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.rpartition('_', expand=False)
exp = Index(np.array([('a_b', '_', 'c'), ('c_d', '_', 'e'), (
'f_g', '_', 'h')]))
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.partition('_')
exp = Index([('a', '_', 'b_c'), ('c', '_', 'd_e'), ('f', '_', 'g_h')])
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
result = values.str.rpartition('_')
exp = Index([('a_b', '_', 'c'), ('c_d', '_', 'e'), ('f_g', '_', 'h')])
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
def test_partition_to_dataframe(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_')
exp = DataFrame({0: ['a', 'c', np.nan, 'f'],
1: ['_', '_', np.nan, '_'],
2: ['b_c', 'd_e', np.nan, 'g_h']})
tm.assert_frame_equal(result, exp)
result = values.str.rpartition('_')
exp = DataFrame({0: ['a_b', 'c_d', np.nan, 'f_g'],
1: ['_', '_', np.nan, '_'],
2: ['c', 'e', np.nan, 'h']})
tm.assert_frame_equal(result, exp)
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_', expand=True)
exp = DataFrame({0: ['a', 'c', np.nan, 'f'],
1: ['_', '_', np.nan, '_'],
2: ['b_c', 'd_e', np.nan, 'g_h']})
tm.assert_frame_equal(result, exp)
result = values.str.rpartition('_', expand=True)
exp = DataFrame({0: ['a_b', 'c_d', np.nan, 'f_g'],
1: ['_', '_', np.nan, '_'],
2: ['c', 'e', np.nan, 'h']})
tm.assert_frame_equal(result, exp)
def test_partition_with_name(self):
# GH 12617
s = Series(['a,b', 'c,d'], name='xxx')
res = s.str.partition(',')
exp = DataFrame({0: ['a', 'c'], 1: [',', ','], 2: ['b', 'd']})
tm.assert_frame_equal(res, exp)
# should preserve name
res = s.str.partition(',', expand=False)
exp = Series([('a', ',', 'b'), ('c', ',', 'd')], name='xxx')
tm.assert_series_equal(res, exp)
idx = Index(['a,b', 'c,d'], name='xxx')
res = idx.str.partition(',')
exp = MultiIndex.from_tuples([('a', ',', 'b'), ('c', ',', 'd')])
assert res.nlevels == 3
tm.assert_index_equal(res, exp)
# should preserve name
res = idx.str.partition(',', expand=False)
exp = Index(np.array([('a', ',', 'b'), ('c', ',', 'd')]), name='xxx')
assert res.nlevels == 1
tm.assert_index_equal(res, exp)
def test_pipe_failures(self):
# #2119
s = Series(['A|B|C'])
result = s.str.split('|')
exp = Series([['A', 'B', 'C']])
tm.assert_series_equal(result, exp)
result = s.str.replace('|', ' ')
exp = Series(['A B C'])
tm.assert_series_equal(result, exp)
def test_slice(self):
values = Series(['aafootwo', 'aabartwo', NA, 'aabazqux'])
result = values.str.slice(2, 5)
exp = Series(['foo', 'bar', NA, 'baz'])
tm.assert_series_equal(result, exp)
for start, stop, step in [(0, 3, -1), (None, None, -1), (3, 10, 2),
(3, 0, -1)]:
try:
result = values.str.slice(start, stop, step)
expected = Series([s[start:stop:step] if not isna(s) else NA
for s in values])
tm.assert_series_equal(result, expected)
except:
print('failed on %s:%s:%s' % (start, stop, step))
raise
# mixed
mixed = Series(['aafootwo', NA, 'aabartwo', True, datetime.today(),
None, 1, 2.])
rs = Series(mixed).str.slice(2, 5)
xp = Series(['foo', NA, 'bar', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.slice(2, 5, -1)
xp = Series(['oof', NA, 'rab', NA, NA, NA, NA, NA])
# unicode
values = Series([u('aafootwo'), u('aabartwo'), NA, u('aabazqux')])
result = values.str.slice(2, 5)
exp = Series([u('foo'), u('bar'), NA, u('baz')])
tm.assert_series_equal(result, exp)
result = values.str.slice(0, -1, 2)
exp = Series([u('afow'), u('abrw'), NA, u('abzu')])
tm.assert_series_equal(result, exp)
def test_slice_replace(self):
values = Series(['short', 'a bit longer', 'evenlongerthanthat', '', NA
])
exp = Series(['shrt', 'a it longer', 'evnlongerthanthat', '', NA])
result = values.str.slice_replace(2, 3)
tm.assert_series_equal(result, exp)
exp = Series(['shzrt', 'a zit longer', 'evznlongerthanthat', 'z', NA])
result = values.str.slice_replace(2, 3, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shzort', 'a zbit longer', 'evzenlongerthanthat', 'z', NA
])
result = values.str.slice_replace(2, 2, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shzort', 'a zbit longer', 'evzenlongerthanthat', 'z', NA
])
result = values.str.slice_replace(2, 1, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shorz', 'a bit longez', 'evenlongerthanthaz', 'z', NA])
result = values.str.slice_replace(-1, None, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['zrt', 'zer', 'zat', 'z', NA])
result = values.str.slice_replace(None, -2, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shortz', 'a bit znger', 'evenlozerthanthat', 'z', NA])
result = values.str.slice_replace(6, 8, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['zrt', 'a zit longer', 'evenlongzerthanthat', 'z', NA])
result = values.str.slice_replace(-10, 3, 'z')
tm.assert_series_equal(result, exp)
def test_strip_lstrip_rstrip(self):
values = Series([' aa ', ' bb \n', NA, 'cc '])
result = values.str.strip()
exp = Series(['aa', 'bb', NA, 'cc'])
tm.assert_series_equal(result, exp)
result = values.str.lstrip()
exp = Series(['aa ', 'bb \n', NA, 'cc '])
tm.assert_series_equal(result, exp)
result = values.str.rstrip()
exp = Series([' aa', ' bb', NA, 'cc'])
tm.assert_series_equal(result, exp)
def test_strip_lstrip_rstrip_mixed(self):
# mixed
mixed = Series([' aa ', NA, ' bb \t\n', True, datetime.today(), None,
1, 2.])
rs = Series(mixed).str.strip()
xp = Series(['aa', NA, 'bb', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.lstrip()
xp = Series(['aa ', NA, 'bb \t\n', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rstrip()
xp = Series([' aa', NA, ' bb', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
def test_strip_lstrip_rstrip_unicode(self):
# unicode
values = Series([u(' aa '), u(' bb \n'), NA, u('cc ')])
result = values.str.strip()
exp = Series([u('aa'), u('bb'), NA, u('cc')])
tm.assert_series_equal(result, exp)
result = values.str.lstrip()
exp = Series([u('aa '), u('bb \n'), NA, u('cc ')])
tm.assert_series_equal(result, exp)
result = values.str.rstrip()
exp = Series([u(' aa'), u(' bb'), NA, u('cc')])
tm.assert_series_equal(result, exp)
def test_strip_lstrip_rstrip_args(self):
values = Series(['xxABCxx', 'xx BNSD', 'LDFJH xx'])
rs = values.str.strip('x')
xp = Series(['ABC', ' BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
rs = values.str.lstrip('x')
xp = Series(['ABCxx', ' BNSD', 'LDFJH xx'])
assert_series_equal(rs, xp)
rs = values.str.rstrip('x')
xp = Series(['xxABC', 'xx BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
def test_strip_lstrip_rstrip_args_unicode(self):
values = Series([u('xxABCxx'), u('xx BNSD'), u('LDFJH xx')])
rs = values.str.strip(u('x'))
xp = Series(['ABC', ' BNSD', 'LDFJH '])
| assert_series_equal(rs, xp) | pandas.util.testing.assert_series_equal |
import pandas as pd
import numpy as np
import scipy.stats
from collections import defaultdict
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import cross_val_score
from sklearn.metrics import make_scorer, balanced_accuracy_score
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
import warnings
from sklearn.model_selection import GridSearchCV
# from sklearn.exceptions import FitFailedWarning
from sklearn.exceptions import ConvergenceWarning
warnings.simplefilter("ignore", UserWarning)
def mean_ci(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)
return m, h
def classification(data, wap, target, classifier, cv=10):
warnings.filterwarnings("ignore", category=ConvergenceWarning)
if target == 'building':
target = data['BUILDINGID']
elif target == 'floor':
target = data['FLOOR']
elif target == 'room':
target = data['LOC']
if classifier == 'lr':
clf = LogisticRegression(random_state=1)
elif classifier == 'knn':
clf = KNeighborsClassifier(n_neighbors=3, p=2, algorithm='kd_tree')
elif classifier == 'svm':
clf = SVC(kernel='linear', C=0.1, random_state=1)
cvs = cross_val_score(clf, wap, target,
scoring=make_scorer(balanced_accuracy_score), cv=cv)
return cvs
# print("Average balanced accuracy: %.2f%% ± %.2f%%" % (mean_ci(cvs)[0]* 100, mean_ci(cvs)[1] * 100))
def tune_parameter(data, wap, target, classifier, cv=10):
if target == 'building':
target = data['BUILDINGID']
elif target == 'floor':
target = data['FLOOR']
elif target == 'room':
target = data['LOC']
if classifier == 'lr':
clf = LogisticRegression(random_state=1)
params = {'penalty': ['l1', 'l2', 'none'],
'C': [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1]}
elif classifier == 'knn':
clf = KNeighborsClassifier(algorithm='kd_tree')
params = {'n_neighbors': [1, 3],
'p': [1, 2]}
elif classifier == 'svm':
clf = SVC(kernel='linear', random_state=1)
params = {'C': [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1]}
gscv = GridSearchCV(clf, params, cv=cv).fit(wap, target)
print("BEST PARAMS:", gscv.best_params_)
top_param = np.argmin(gscv.cv_results_['rank_test_score'])
splits = ['split' + str(i) + '_test_score' for i in range(10)]
scores = [gscv.cv_results_[i][top_param] for i in splits]
return gscv.best_estimator_, scores
def classification_room_in_cluster(data, clusters, k, classifier, cv=10):
warnings.filterwarnings(action='ignore')
if classifier == 'lr':
clf = LogisticRegression(random_state=1)
elif classifier == 'knn':
clf = KNeighborsClassifier(n_neighbors=3, p=2, algorithm='kd_tree')
elif classifier == 'svm':
clf = SVC(kernel='linear', C=0.1, random_state=1)
accuracy_scores = defaultdict(dict)
sample_count = defaultdict(dict)
room_count = defaultdict(dict)
floor_count = data['FLOOR'].nunique()
building = data.reset_index(drop=True)
cluster_labels = clusters['c' + str(k)]['labels']
df_cluster_labels = pd.DataFrame({"CLUSTER": cluster_labels})
df = pd.concat([building, df_cluster_labels], axis=1)
for f in range(floor_count):
floor = df.loc[df['FLOOR'] == f]
unique_cluster_labels = floor['CLUSTER'].unique()
accuracy = defaultdict(dict)
samples = defaultdict()
unique_rooms = defaultdict()
for cluster_label in unique_cluster_labels:
rooms = floor.loc[floor['CLUSTER'] == cluster_label, 'LOC']
waps = StandardScaler().fit_transform(
floor.loc[floor['CLUSTER'] == cluster_label, :'WAP519']
)
# samples['c' + str(cluster_label)] = len(rooms)
samples['c' + str(cluster_label)] = waps.shape[0]
# unique_rooms['c' + str(cluster_label)] = len(rooms)
unique_rooms['c' + str(cluster_label)] = rooms.nunique()
try:
cvs = cross_val_score(clf, waps, rooms,
scoring=make_scorer(balanced_accuracy_score), cv=cv)
accuracy['c'+ str(cluster_label)] = mean_ci(cvs)
except ValueError:
print("Floor [%d] - Cluster [%d]: No enough samples to be split." % (f, cluster_label))
accuracy['c'+ str(cluster_label)] = np.nan
sample_count['f' + str(f)] = samples
room_count['f' + str(f)] = unique_rooms
accuracy_scores['f' + str(f)] = accuracy
df_sample_count = pd.DataFrame(sample_count, dtype='Int64').T
df_sample_count = df_sample_count[np.sort(df_sample_count.columns)]
df_room_count = pd.DataFrame(room_count, dtype='Int64').T
# df_room_count = pd.DataFrame(room_count).T
df_room_count = df_room_count[np.sort(df_room_count.columns)]
df_accuracy = pd.DataFrame(accuracy_scores).T
df_accuracy = df_accuracy[np.sort(df_accuracy.columns)]
return df_sample_count, df_room_count, df_accuracy
def tune_room_in_cluster(data, clusters, k, classifier, cv=10):
warnings.filterwarnings(action='ignore')
if classifier == 'lr':
clf = LogisticRegression(random_state=1)
params = {'penalty': ['l1', 'l2', 'none'],
'C': [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1]}
elif classifier == 'knn':
clf = KNeighborsClassifier(algorithm='kd_tree')
params = {'n_neighbors': [1, 3],
'p': [1, 2]}
elif classifier == 'svm':
clf = SVC(kernel='linear', random_state=1)
params = {'C': [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1]}
floor_count = data['FLOOR'].nunique()
building = data.reset_index(drop=True)
cluster_labels = clusters['c' + str(k)]['labels']
df_cluster_labels = | pd.DataFrame({"CLUSTER": cluster_labels}) | pandas.DataFrame |
import streamlit as st
import pandas as pd
import numpy as np
import datetime
from datetime import timedelta
import plotly.graph_objects as go
timechoice = [' ', '00:00:00', '01:00:00', '02:00:00', '03:00:00',
'04:00:00', '05:00:00', '06:00:00', '07:00:00',
'08:00:00', '09:00:00', '10:00:00', '11:00:00',
'12:00:00', '13:00:00', '14:00:00', '15:00:00',
'16:00:00', '17:00:00', '18:00:00', '19:00:00',
'20:00:00', '21:00:00', '22:00:00', '23:00:00']
t1 = '10:00:00'
t1 = | pd.to_datetime(t1) | pandas.to_datetime |
# -*- python -*-
# -*- coding utf-8 -*-
#
# This file is part of GDSCTools software
#
# Copyright (c) 2016 - Wellcome Trust Sanger Institute
# All rights reserved
#
# File author(s): GDSCTools authors
#
# Distributed under the BSD 3-Clause License.
# See accompanying file LICENSE.txt distributed with this software
#
# website: http://github.com/CancerRxGene/gdsctools
#
##############################################################################
"""OmniBEM functions"""
from gdsctools import Reader, GenomicFeatures
import pandas as pd
import pylab
__all__ = ["OmniBEMBuilder"]
class OmniBEMBuilder(object):
"""Utility to create :class:`~gdsctools.readers.GenomicFeatures` instance from BEM data
Starting from an example provided in GDSCTools
(test_omnibem_genomic_alteration.csv.gz), here is the code to get a data
structure compatible with the :class:`GenomicFeature`, which can then be
used as input to the :class:`~gdsctools.anova.ANOVA` class.
See the constructor for the header format.
.. plot::
:include-source:
from gdsctools import gdsctools_data, OmniBEMBuilder, GenomicFeatures
input_data = gdsctools_data("test_omnibem_genomic_alterations.csv.gz")
bem = OmniBEMBuilder(input_data)
# You may filter the data for instance to keep only a set of genes.
# Here, we keep everything
gene_list = bem.df.GENE.unique()
bem.filter_by_gene_list(gene_list)
# Finally, create a MoBEM dataframe
mobem = bem.get_mobem()
# You may filter with other functions(e.g., to keep only Methylation
# features
bem.filter_by_type_list(["Methylation"])
mobem = bem.get_mobem()
# Then, let us create a dataframe that is compatible with
# GenomicFeature. We just need to make sure the columns are correct
mobem[[x for x in mobem.columns if x!="SAMPLE"]]
gf = GenomicFeatures(mobem[[x for x in mobem.columns if x!="SAMPLE"]])
# Now, we can perform an ANOVA analysis:
from gdsctools import ANOVA, ic50_test
an = ANOVA(ic50_test, gf)
results = an.anova_all()
results.volcano()
.. note:: The underlying data is stored in the attribute :attr:`df`.
"""
def __init__(self, genomic_alteration):
""".. rubric:: Constructor
:param str genomic_alteration: a filename in CSV format (gzipped or
not). The format is explained here below.
The input must be a 5-columns CSV file with the following columns::
COSMIC_ID: an integer
TISSUE_TYPE: e.g. Methylation,
SAMPLE: this should correspond to the COSMID_ID
TYPE: Methylation,
GENE: gene name
IDENTIFIER: required for now but may be removed (rows can be
used as identifier indeed
.. warning:: If GENE is set to NA, we drop it. In the resources shown in
the example here above, this corresponds to 380 rows (out of 60703).
Similarly, if TISSUE is NA, we also drop these rows that is about
3000 rows.
"""
self.df = Reader(genomic_alteration).df
# We drop genes and tissues that are set to NA
self.df.dropna(inplace=True)
# Some column names are changed for convenience
self.df.rename(columns={"COSMIC.ID": "COSMIC_ID",
"TISSUE.TYPE": "TISSUE_TYPE"}, inplace=True)
self._update_unified()
def __len__(self):
return len(self.df)
def _update_unified(self):
"""# Sort genes by number of times they have an alteration
bem.unified.sort_values(by="IDENTIFIER")
# top genes represented multiple times i
bem.unified.sort_values(by="IDENTIFIER", ascending=False).head(20)
"""
# In R this is an aggregate function. In Pandas a groupby + aggregate
# http://pandas.pydata.org/pandas-docs/stable/comparison_with_r.html
groups = self.df.groupby(by=["COSMIC_ID", "GENE", "SAMPLE",
"TISSUE_TYPE"], as_index=False)
self.unified = groups[['IDENTIFIER']].aggregate(len)
# Building a summary on cell lines
unique = self.unified[self.unified['COSMIC_ID'].duplicated() == False]
self.frequency = unique.groupby("TISSUE_TYPE")["TISSUE_TYPE"].count()
self.total = self.unified.groupby("TISSUE_TYPE")["TISSUE_TYPE"].count()
df = | pd.concat([self.total, self.frequency, self.total/self.frequency], axis=1) | pandas.concat |
import numpy as np
print("############################################")
print("## 72page. 집약 ")
print("############################################")
import pandas as pd
pd.set_option('display.max_columns', None)
reserve_tb=pd.read_csv('./data/reserve.csv', encoding='UTF-8')
rsv_cnt_tb=reserve_tb.groupby('hotel_id').size().reset_index()
rsv_cnt_tb.columns=['hotel_id', 'rsv_cnt']
cus_cnt_tb=reserve_tb.groupby('hotel_id')['customer_id'].nunique().reset_index()
cus_cnt_tb.columns=['hotel_id', 'cus_cnt']
print(cus_cnt_tb)
pd.merge(rsv_cnt_tb, cus_cnt_tb, on='hotel_id')
print(rsv_cnt_tb)
print('------------')
import pandas as pd
pd.set_option('display.max_columns', None)
reserve_tb=pd.read_csv('./data/reserve.csv', encoding='UTF-8')
result=reserve_tb.groupby('hotel_id').agg({'reserve_id':'count', 'customer_id':'nunique'})
result.reset_index(inplace=True)
result.columns=['hotel_id', 'rsv_cnt', 'cus_cnt']
print(result)
print('------------')
print('SUM값 구하기----')
import pandas as pd
pd.set_option('display.max_columns', None)
reserve_tb= | pd.read_csv('./data/reserve.csv', encoding='UTF-8') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Récupérer des mails d'étudiants en pièce jointe (1:1)
=====================================================
Récupère des fichiers en pièce jointe provenant d'étudiants comme un rendu de projet.
Le programme suppose qu'il n'y en a qu'un par étudiant, que tous les mails ont été
archivés dans un répertoire d'une boîte de message, ici :epkg:`gmail`.
Il faut supprimer le contenu du répertoire pour mettre à jour l'ensemble
des projets. Dans le cas contraire, le code est prévu pour mettre à jour le répertoire
à partir des derniers mails recensés dans le fichiers *mails.txt*.
La récupération se passe souvent en deux étapes.
La prmeière récupère tous les mails et crée un premier archivage
sans tenir compte des groupes. On créé alors un fichier :epkg:`Excel`
qui ressemble à peu près à ceci :
.. runpython::
from pandas import DataFrame
df = DataFrame(dict(groupe=[1, 1, 2], mail=['a.a@m', 'b.b@m', 'c.c@m'],
sujet=['sub1', 'sub1', 'sub2']))
print(df)
On efface tout excepté ce fichier puis on récupère une seconde fois
tous les projets afin de ne créer qu'un répertoire par groupe.
.. _script-fetch-students-projets-py:
"""
#########################################
# import
import sys
import os
import pandas
import warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
import keyring
#################################
# Paramètres de la récupération,
# tous les mails doivent être dans le même répertoire
# de la boîte de message.
server = "imap.gmail.com"
school = "ASSAS"
date = "1-May-2018"
pattern = "Python_{0}_Projet_2018"
group_def = "groupes.xlsx"
col_subject, col_group, col_mail, col_student = "sujet", "groupe", "mail", "Nom"
if school == 'ENSAE':
do_mail = True
mailfolder = ["ensae/ENSAE_1A"]
dest_folder = os.path.normpath(os.path.abspath(os.path.join(
*([os.path.dirname(__file__)] + ([".."] * 5) + ["_data", "ecole", "ENSAE", "2017-2018", "1A_projet"]))))
print("dest", dest_folder)
elif school == 'ASSAS':
do_mail = True
mailfolder = ["ensae/assas"]
dest_folder = os.path.normpath(os.path.abspath(os.path.join(
*([os.path.dirname(__file__)] + ([".."] * 5) + ["_data", "ecole", "assas", "2017-2018", "projet"]))))
print("dest", dest_folder)
else:
raise NotImplementedError()
###########################
# End of customization.
path_df = os.path.join(dest_folder, group_def)
if os.path.exists(path_df):
df_group = | pandas.read_excel(path_df) | pandas.read_excel |
import os
import pandas as pd
import matplotlib.pyplot as plt
import shap
import lightgbm as lgb
from sklearn.metrics import average_precision_score
from takaggle.training.model import Model
from takaggle.training.util import Util
# LightGBMに使えるカスタムメトリクス
# 使用例(この関数で最適化したい場合はパラメーターに metric: 'None'を指定する必要がある)
# self.model = lgb.train(
# params,
# dtrain,
# num_boost_round=num_round,
# valid_sets=(dtrain, dvalid),
# early_stopping_rounds=early_stopping_rounds,
# verbose_eval=verbose_eval,
# feval=pr_auc
# )
def pr_auc(preds, data):
"""PR-AUCスコア"""
y_true = data.get_label()
score = average_precision_score(y_true, preds)
return "pr_auc", score, True
class ModelLGB(Model):
def train(self, tr_x, tr_y, va_x=None, va_y=None):
# データのセット
validation = va_x is not None
dtrain = lgb.Dataset(tr_x, tr_y, categorical_feature=self.categoricals)
if validation:
dvalid = lgb.Dataset(va_x, va_y, categorical_feature=self.categoricals)
# ハイパーパラメータの設定
params = dict(self.params)
num_round = params.pop('num_round')
verbose_eval = params.pop('verbose_eval')
# 学習
if validation:
early_stopping_rounds = params.pop('early_stopping_rounds')
watchlist = [(dtrain, 'train'), (dvalid, 'eval')]
self.model = lgb.train(
params,
dtrain,
num_boost_round=num_round,
valid_sets=(dtrain, dvalid),
early_stopping_rounds=early_stopping_rounds,
verbose_eval=verbose_eval,
)
else:
watchlist = [(dtrain, 'train')]
self.model = lgb.train(params, dtrain, num_boost_round=num_round, evals=watchlist)
# shapを計算しないver
def predict(self, te_x):
return self.model.predict(te_x, num_iteration=self.model.best_iteration)
# shapを計算するver
def predict_and_shap(self, te_x, shap_sampling):
fold_importance = shap.TreeExplainer(self.model).shap_values(te_x[:shap_sampling])
valid_prediticion = self.model.predict(te_x, num_iteration=self.model.best_iteration)
return valid_prediticion, fold_importance
def save_model(self, path):
model_path = os.path.join(path, f'{self.run_fold_name}.model')
os.makedirs(os.path.dirname(model_path), exist_ok=True)
Util.dump(self.model, model_path)
def load_model(self, path):
model_path = os.path.join(path, f'{self.run_fold_name}.model')
self.model = Util.load(model_path)
@classmethod
def calc_feature_importance(self, dir_name, run_name, features, n_splits, type='gain'):
"""feature importanceの計算
"""
model_array = []
for i in range(n_splits):
model_path = os.path.join(dir_name, f'{run_name}-fold{i}.model')
model = Util.load(model_path)
model_array.append(model)
if type == 'gain':
# gainの計算
val_gain = model_array[0].feature_importance(importance_type='gain')
val_gain = pd.Series(val_gain)
for m in model_array[1:]:
s = pd.Series(m.feature_importance(importance_type='gain'))
val_gain = pd.concat([val_gain, s], axis=1)
if n_splits == 1:
val_gain = val_gain.values
df = pd.DataFrame(val_gain, index=features, columns=['importance']).sort_values('importance', ascending=False)
df.to_csv(dir_name + run_name + '_importance_gain.csv')
df = df.sort_values('importance', ascending=True).tail(100)
# 出力
fig, ax1 = plt.subplots(figsize=(10, 30))
plt.tick_params(labelsize=10) # 図のラベルのfontサイズ
# 棒グラフを出力
ax1.set_title('feature importance gain')
ax1.set_xlabel('feature importance')
ax1.barh(df.index, df['importance'], label='importance', align="center", alpha=0.6)
# 凡例を表示(グラフ左上、ax2をax1のやや下に持っていく)
ax1.legend(bbox_to_anchor=(1, 1), loc='upper right', borderaxespad=0.5, fontsize=12)
# グリッド表示(ax1のみ)
ax1.grid(True)
plt.tight_layout()
plt.savefig(dir_name + run_name + '_fi_gain.png', dpi=200, bbox_inches="tight")
plt.close()
else:
# 各foldの平均を算出
val_mean = val_gain.mean(axis=1)
val_mean = val_mean.values
importance_df_mean = pd.DataFrame(val_mean, index=features, columns=['importance']).sort_values('importance')
# 各foldの標準偏差を算出
val_std = val_gain.std(axis=1)
val_std = val_std.values
importance_df_std = pd.DataFrame(val_std, index=features, columns=['importance']).sort_values('importance')
# マージ
df = pd.merge(importance_df_mean, importance_df_std, left_index=True, right_index=True, suffixes=['_mean', '_std'])
# 変動係数を算出
df['coef_of_var'] = df['importance_std'] / df['importance_mean']
df['coef_of_var'] = df['coef_of_var'].fillna(0)
df = df.sort_values('importance_mean', ascending=False)
df.to_csv(dir_name + run_name + '_importance_gain.csv')
df = df.sort_values('importance_mean', ascending=True).tail(100)
# 出力
fig, ax1 = plt.subplots(figsize=(10, 30))
plt.tick_params(labelsize=10) # 図のラベルのfontサイズ
# 棒グラフを出力
ax1.set_title('feature importance gain')
ax1.set_xlabel('feature importance mean & std')
ax1.barh(df.index, df['importance_mean'], label='importance_mean', align="center", alpha=0.6)
ax1.barh(df.index, df['importance_std'], label='importance_std', align="center", alpha=0.6)
# 折れ線グラフを出力
ax2 = ax1.twiny()
ax2.plot(df['coef_of_var'], df.index, linewidth=1, color="crimson", marker="o", markersize=8, label='coef_of_var')
ax2.set_xlabel('Coefficient of variation')
# 凡例を表示(グラフ左上、ax2をax1のやや下に持っていく)
ax1.legend(bbox_to_anchor=(1, 1), loc='upper right', borderaxespad=0.5, fontsize=12)
ax2.legend(bbox_to_anchor=(1, 0.94), loc='upper right', borderaxespad=0.5, fontsize=12)
# グリッド表示(ax1のみ)
ax1.grid(True)
ax2.grid(False)
plt.tight_layout()
plt.savefig(dir_name + run_name + '_fi_gain.png', dpi=200, bbox_inches="tight")
plt.close()
else:
# splitの計算
val_split = self.model_array[0].feature_importance(importance_type='split')
val_split = pd.Series(val_split)
for m in model_array[1:]:
s = pd.Series(m.feature_importance(importance_type='split'))
val_split = pd.concat([val_split, s], axis=1)
if n_splits == 1:
val_split = val_split.values
df = | pd.DataFrame(val_split, index=features, columns=['importance']) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/3/21 17:40
Desc: 天天基金网-基金档案-投资组合
http://fundf10.eastmoney.com/ccmx_000001.html
"""
import pandas as pd
import requests
from bs4 import BeautifulSoup
from akshare.utils import demjson
def fund_portfolio_hold_em(symbol: str = "162411", date: str = "2020") -> pd.DataFrame:
"""
天天基金网-基金档案-投资组合-基金持仓
http://fundf10.eastmoney.com/ccmx_000001.html
:param symbol: 基金代码
:type symbol: str
:param date: 查询年份
:type date: str
:return: 基金持仓
:rtype: pandas.DataFrame
"""
url = "http://fundf10.eastmoney.com/FundArchivesDatas.aspx"
params = {
"type": "jjcc",
"code": symbol,
"topline": "200",
"year": date,
"month": "",
"rt": "0.913877030254846",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{") : -1])
soup = BeautifulSoup(data_json["content"], "lxml")
item_label = [
item.text.split("\xa0\xa0")[1]
for item in soup.find_all("h4", attrs={"class": "t"})
]
big_df = pd.DataFrame()
for item in range(len(item_label)):
temp_df = pd.read_html(data_json["content"], converters={"股票代码": str})[item]
del temp_df["相关资讯"]
temp_df["占净值比例"] = temp_df["占净值比例"].str.split("%", expand=True).iloc[:, 0]
temp_df.rename(columns={"持股数(万股)": "持股数", "持仓市值(万元)": "持仓市值"}, inplace=True)
temp_df.rename(columns={"持股数(万股)": "持股数", "持仓市值(万元人民币)": "持仓市值"}, inplace=True)
temp_df["季度"] = item_label[item]
temp_df = temp_df[
[
"序号",
"股票代码",
"股票名称",
"占净值比例",
"持股数",
"持仓市值",
"季度",
]
]
big_df = big_df.append(temp_df, ignore_index=True)
big_df["占净值比例"] = pd.to_numeric(big_df["占净值比例"], errors="coerce")
big_df["持股数"] = pd.to_numeric(big_df["持股数"], errors="coerce")
big_df["持仓市值"] = pd.to_numeric(big_df["持仓市值"], errors="coerce")
big_df["序号"] = range(1, len(big_df) + 1)
return big_df
def fund_portfolio_bond_hold_em(symbol: str = "000001", date: str = "2021") -> pd.DataFrame:
"""
天天基金网-基金档案-投资组合-债券持仓
http://fundf10.eastmoney.com/ccmx1_000001.html
:param symbol: 基金代码
:type symbol: str
:param date: 查询年份
:type date: str
:return: 债券持仓
:rtype: pandas.DataFrame
"""
url = "http://fundf10.eastmoney.com/FundArchivesDatas.aspx"
params = {
"type": "zqcc",
"code": symbol,
"year": date,
"rt": "0.913877030254846",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{") : -1])
soup = BeautifulSoup(data_json["content"], "lxml")
item_label = [
item.text.split("\xa0\xa0")[1]
for item in soup.find_all("h4", attrs={"class": "t"})
]
big_df = pd.DataFrame()
for item in range(len(item_label)):
temp_df = pd.read_html(data_json["content"], converters={"债券代码": str})[item]
temp_df["占净值比例"] = temp_df["占净值比例"].str.split("%", expand=True).iloc[:, 0]
temp_df.rename(columns={"持仓市值(万元)": "持仓市值"}, inplace=True)
temp_df["季度"] = item_label[item]
temp_df = temp_df[
[
"序号",
"债券代码",
"债券名称",
"占净值比例",
"持仓市值",
"季度",
]
]
big_df = big_df.append(temp_df, ignore_index=True)
big_df["占净值比例"] = pd.to_numeric(big_df["占净值比例"], errors="coerce")
big_df["持仓市值"] = pd.to_numeric(big_df["持仓市值"], errors="coerce")
big_df["序号"] = range(1, len(big_df) + 1)
return big_df
def fund_portfolio_industry_allocation_em(symbol: str = "000001", date: str = "2021") -> pd.DataFrame:
"""
天天基金网-基金档案-投资组合-行业配置
http://fundf10.eastmoney.com/hytz_000001.html
:param symbol: 基金代码
:type symbol: str
:param date: 查询年份
:type date: str
:return: 行业配置
:rtype: pandas.DataFrame
"""
url = "http://api.fund.eastmoney.com/f10/HYPZ/"
headers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Host': 'api.fund.eastmoney.com',
'Pragma': 'no-cache',
'Referer': 'http://fundf10.eastmoney.com/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.82 Safari/537.36',
}
params = {
'fundCode': symbol,
'year': date,
'callback': 'jQuery183006997159478989867_1648016188499',
'_': '1648016377955',
}
r = requests.get(url, params=params, headers=headers)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{") : -1])
temp_list = []
for item in data_json["Data"]["QuarterInfos"]:
temp_list.extend(item["HYPZInfo"])
temp_df = | pd.DataFrame(temp_list) | pandas.DataFrame |
from torch.utils.data import DataLoader
from active_learning.augmentations_based import UncertaintySamplingAugmentationBased
from active_learning.badge_sampling import UncertaintySamplingBadge
from active_learning.others import UncertaintySamplingOthers
from active_learning.mc_dropout import UncertaintySamplingMCDropout
from data.isic_dataset import ISICDataset
from data.matek_dataset import MatekDataset
from data.cifar10_dataset import Cifar10Dataset
from data.jurkat_dataset import JurkatDataset
from data.plasmodium_dataset import PlasmodiumDataset
from data.retinopathy_dataset import RetinopathyDataset
import torch
import time
import numpy as np
from utils import create_model_optimizer_scheduler, AverageMeter, accuracy, Metrics, perform_sampling, \
store_logs, save_checkpoint, get_loss, LossPerClassMeter, create_loaders, load_pretrained, \
create_model_optimizer_autoencoder, create_model_optimizer_simclr, print_args
import pandas as pd
from copy import deepcopy
torch.autograd.set_detect_anomaly(True)
'''
FixMatch implementation, adapted from:
Courtesy to: https://github.com/kekmodel/FixMatch-pytorch
'''
class FixMatch:
def __init__(self, args, verbose=True, uncertainty_sampling_method='random_sampling'):
self.args = args
self.verbose = verbose
self.datasets = {'matek': MatekDataset, 'cifar10': Cifar10Dataset, 'plasmodium': PlasmodiumDataset,
'jurkat': JurkatDataset, 'isic': ISICDataset, 'retinopathy': RetinopathyDataset}
self.model = None
self.kwargs = {'num_workers': 4, 'pin_memory': False, 'drop_last': True}
self.uncertainty_sampling_method = uncertainty_sampling_method
self.init = self.args.semi_supervised_init
def main(self):
if self.uncertainty_sampling_method == 'mc_dropout':
uncertainty_sampler = UncertaintySamplingMCDropout()
self.args.weak_supervision_strategy = 'semi_supervised_active_learning'
elif self.uncertainty_sampling_method == 'augmentations_based':
uncertainty_sampler = UncertaintySamplingAugmentationBased()
self.args.weak_supervision_strategy = 'semi_supervised_active_learning'
elif self.uncertainty_sampling_method == 'random_sampling':
uncertainty_sampler = None
self.args.weak_supervision_strategy = "random_sampling"
elif self.uncertainty_sampling_method == 'badge':
uncertainty_sampler = UncertaintySamplingBadge()
self.args.weak_supervision_strategy = 'semi_supervised_active_learning'
else:
uncertainty_sampler = UncertaintySamplingOthers(verbose=True,
uncertainty_sampling_method=
self.uncertainty_sampling_method)
self.args.weak_supervision_strategy = 'semi_supervised_active_learning'
dataset_cls = self.datasets[self.args.dataset](root=self.args.root,
add_labeled=self.args.add_labeled,
advanced_transforms=True,
merged=self.args.merged,
remove_classes=self.args.remove_classes,
oversampling=self.args.oversampling,
unlabeled_subset_ratio=self.args.unlabeled_subset,
expand_labeled=self.args.fixmatch_k_img,
expand_unlabeled=self.args.fixmatch_k_img*self.args.fixmatch_mu,
unlabeled_augmentations=True if
self.uncertainty_sampling_method == 'augmentations_based'
else False,
seed=self.args.seed, start_labeled=self.args.start_labeled)
base_dataset, labeled_dataset, unlabeled_dataset, labeled_indices, unlabeled_indices, test_dataset = \
dataset_cls.get_dataset()
train_loader, unlabeled_loader, val_loader = create_loaders(self.args, labeled_dataset, unlabeled_dataset,
test_dataset,
labeled_indices, unlabeled_indices, self.kwargs,
dataset_cls.unlabeled_subset_num)
labeled_dataset_fix, unlabeled_dataset_fix = dataset_cls.get_datasets_fixmatch(base_dataset, labeled_indices,
unlabeled_indices)
self.args.lr = 0.0003
model, optimizer, _ = create_model_optimizer_scheduler(self.args, dataset_cls)
if self.init == 'pretrained':
print("Loading ImageNet pretrained model!")
model = load_pretrained(model)
elif self.init == 'autoencoder':
model, optimizer, _ = create_model_optimizer_autoencoder(self.args, dataset_cls)
elif self.init == 'simclr':
model, optimizer, _, _ = create_model_optimizer_simclr(self.args, dataset_cls)
labeled_loader_fix = DataLoader(dataset=labeled_dataset_fix, batch_size=self.args.batch_size,
shuffle=True, **self.kwargs)
unlabeled_loader_fix = DataLoader(dataset=unlabeled_dataset_fix, batch_size=self.args.batch_size,
shuffle=True, **self.kwargs)
criterion_labeled = get_loss(self.args, dataset_cls.labeled_class_samples, reduction='none')
criterion_unlabeled = get_loss(self.args, dataset_cls.labeled_class_samples, reduction='none')
criterions = {'labeled': criterion_labeled, 'unlabeled': criterion_unlabeled}
model.zero_grad()
best_recall, best_report, last_best_epochs = 0, None, 0
best_model = deepcopy(model)
metrics_per_cycle = pd.DataFrame([])
metrics_per_epoch = pd.DataFrame([])
num_class_per_cycle = pd.DataFrame([])
print_args(self.args)
self.args.start_epoch = 0
current_labeled = dataset_cls.start_labeled
for epoch in range(self.args.start_epoch, self.args.fixmatch_epochs):
train_loader_fix = zip(labeled_loader_fix, unlabeled_loader_fix)
train_loss = self.train(train_loader_fix, model, optimizer, epoch, len(labeled_loader_fix), criterions,
base_dataset.classes, last_best_epochs)
val_loss, val_report = self.validate(val_loader, model, last_best_epochs, criterions)
is_best = val_report['macro avg']['recall'] > best_recall
last_best_epochs = 0 if is_best else last_best_epochs + 1
val_report = pd.concat([val_report, train_loss, val_loss], axis=1)
metrics_per_epoch = | pd.concat([metrics_per_epoch, val_report]) | pandas.concat |
import random
import random
import shutil
import sys
from functools import partial
import numpy as np
import pandas as pd
import torch
from joblib import load
from sklearn.experimental import enable_iterative_imputer # noqa
from sklearn.impute import IterativeImputer
from sklearn.model_selection import GroupShuffleSplit
from torch.utils.data import TensorDataset
from models.mdn.ensemble import ens_uncertainty_kl, ens_uncertainty_mode, ens_uncertainty_mean, \
MDNEnsemble
from models.mdn.mdn import mog_mode, marginal_mog_log_prob
from models.mdn.train import train_mdn
from models.plotting import make_mog
from models.simple import fit_svm, bin_factor_score, bin_likert
from processing.loading import process_turk_files
from processing.mappings import question_names as all_question_names
from processing.mappings import short_question_names, factor_names
from search.coverage import trajectory_features
from search.util import load_map
def make_mog_test_plots(model, x_test, y_test, model_name=""):
y_test = y_test[["factor0", "factor1", "factor2"]].to_numpy()
test_data = TensorDataset(torch.from_numpy(np.vstack(x_test["features"])).float(), torch.from_numpy(y_test).float())
pi, sigma, mu = model.forward(test_data.tensors[0])
n = 200
x = np.linspace(-3, 3, n)
x_batch = x.reshape([-1, 1])
probs = torch.exp(marginal_mog_log_prob(pi, sigma, mu, torch.Tensor(x_batch))).detach().numpy()
for i in range(len(x_test)):
truth_i = x_test["uuid"] == x_test["uuid"].iloc[i]
truth_y = y_test[truth_i]
fig = make_mog(f"{model_name} traj={x_test['uuid'].iloc[i]}", probs[i], true_points=truth_y)
fig.show()
return
random.seed(0)
np.random.seed(0)
torch.random.manual_seed(0)
def cross_validate_svm():
acc_by_factor = [[], [], []]
mdn_metrics = [[], [], []]
all_metrics = [[], [], []]
for random_state in range(100):
x, x_test, y, y_test = pd.DataFrame(), | pd.DataFrame() | pandas.DataFrame |
"""Tests for ExtensionDtype Table Schema integration."""
from collections import OrderedDict
import datetime as dt
import decimal
import json
import pytest
from pandas import (
DataFrame,
array,
)
from pandas.core.arrays.integer import Int64Dtype
from pandas.core.arrays.string_ import StringDtype
from pandas.core.series import Series
from pandas.tests.extension.date import (
DateArray,
DateDtype,
)
from pandas.tests.extension.decimal.array import (
DecimalArray,
DecimalDtype,
)
from pandas.io.json._table_schema import (
as_json_table_type,
build_table_schema,
)
class TestBuildSchema:
def test_build_table_schema(self):
df = DataFrame(
{
"A": DateArray([dt.date(2021, 10, 10)]),
"B": DecimalArray([decimal.Decimal(10)]),
"C": array(["pandas"], dtype="string"),
"D": array([10], dtype="Int64"),
}
)
result = build_table_schema(df, version=False)
expected = {
"fields": [
{"name": "index", "type": "integer"},
{"name": "A", "type": "any", "extDtype": "DateDtype"},
{"name": "B", "type": "any", "extDtype": "decimal"},
{"name": "C", "type": "any", "extDtype": "string"},
{"name": "D", "type": "integer", "extDtype": "Int64"},
],
"primaryKey": ["index"],
}
assert result == expected
result = build_table_schema(df)
assert "pandas_version" in result
class TestTableSchemaType:
@pytest.mark.parametrize(
"date_data",
[
DateArray([dt.date(2021, 10, 10)]),
DateArray(dt.date(2021, 10, 10)),
Series(DateArray(dt.date(2021, 10, 10))),
],
)
def test_as_json_table_type_ext_date_array_dtype(self, date_data):
assert as_json_table_type(date_data.dtype) == "any"
def test_as_json_table_type_ext_date_dtype(self):
assert as_json_table_type(DateDtype()) == "any"
@pytest.mark.parametrize(
"decimal_data",
[
DecimalArray([decimal.Decimal(10)]),
Series(DecimalArray([decimal.Decimal(10)])),
],
)
def test_as_json_table_type_ext_decimal_array_dtype(self, decimal_data):
assert as_json_table_type(decimal_data.dtype) == "any"
def test_as_json_table_type_ext_decimal_dtype(self):
assert as_json_table_type(DecimalDtype()) == "any"
@pytest.mark.parametrize(
"string_data",
[
array(["pandas"], dtype="string"),
Series(array(["pandas"], dtype="string")),
],
)
def test_as_json_table_type_ext_string_array_dtype(self, string_data):
assert | as_json_table_type(string_data.dtype) | pandas.io.json._table_schema.as_json_table_type |
import numpy as np
from numpy.random import rand
import matplotlib.pyplot as plt
from scipy.interpolate import griddata
import glob as glob
import pandas as pd
import json
from scipy.interpolate import interp1d
from scipy import interp
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from skimage import exposure,img_as_ubyte
from moviepy.editor import VideoClip
from moviepy.editor import ImageSequenceClip
from moviepy.video.io.bindings import mplfig_to_npimage
from skimage import color
import datetime
import time
import argparse
import os
import h5py
import re
def read_files(args):
'''read in, sort by time and output numpy arrays containing each image frame in the simulation'''
#check if hdf5 file exists
for fname in os.listdir(args.fName):
if fname.endswith('h5'):
if fname == 'dsetf.h5':
print('h5 file found')
hf = h5py.File(args.fName+'/'+fname)
fN = pd.DataFrame(list(hf.keys()))
print(len(list(hf.keys())))
# dfN = pd.DataFrame(glob.glob(args.fName+'/defect*.dat'))
params = np.loadtxt('params.txt')
# print(fN)
fN['time'] = fN[0].str.extract(r'(\d*\.?\d*)\.dat').astype('float')
fN.sort_values(by=['time'],inplace=True)
# dfN['time'] = dfN[0].str.extract(r'(\d*\.?\d*)\.dat').astype('float')
# dfN.sort_values(by=['time'],inplace=True)
#Sort fileNames by number
imSeq = [np.array(hf.get(f)) for f in fN[0]]
break
# dimSeq = [np.loadtxt(f) for f in dfN[0]]
else:
fileNames = glob.glob(args.fName+'/out*.dat')
# dfileNames = glob.glob(args.fName+'/defect*.dat')
fN = | pd.DataFrame(fileNames) | pandas.DataFrame |
import glob
import shutil
import os
import sys
import subprocess
import tarfile
import argparse
import itertools
import numpy as np
import pandas as pd
import utils
import cmdline
import remixt.seqdataio
import remixt.segalg
import remixt.analysis.haplotype
def calculate_segment_counts(seqdata_filename, segments):
segment_counts = list()
chromosomes = remixt.seqdataio.read_chromosomes(seqdata_filename)
for chrom, chrom_segs in segments.groupby('chromosome'):
try:
chrom_reads = next(remixt.seqdataio.read_read_data(seqdata_filename, chromosome=chrom))
except StopIteration:
chrom_reads = pd.DataFrame(columns=['start', 'end'])
chrom_segs.sort_values('start', inplace=True)
chrom_reads.sort_values('start', inplace=True)
chrom_segs['count'] = remixt.segalg.contained_counts(
chrom_segs[['start', 'end']].values,
chrom_reads[['start', 'end']].values,
)
chrom_segs['count'] = chrom_segs['count'].astype(int)
segment_counts.append(chrom_segs)
segment_counts = pd.concat(segment_counts, ignore_index=True)
return segment_counts
def calculate_allele_counts(seqdata_filename):
allele_counts = list()
chromosomes = remixt.seqdataio.read_chromosomes(seqdata_filename)
for chrom in chromosomes:
chrom_allele_counts = remixt.analysis.haplotype.read_snp_counts(seqdata_filename, chrom)
chrom_allele_counts['chromosome'] = chrom
allele_counts.append(chrom_allele_counts)
allele_counts = pd.concat(allele_counts, ignore_index=True)
return allele_counts
def write_theta_format_alleles(allele_filename, allele_count):
allele_count['total'] = allele_count['ref_count'] + allele_count['alt_count']
# Nucleotide count columns, dont appear to be used
for nt in 'ACTG':
allele_count[nt] = 0
allele_count = allele_count[[
'chrom_idx',
'position',
'A', 'C', 'T', 'G',
'total',
'ref_count',
'alt_count',
]]
allele_count.to_csv(allele_filename, sep='\t', index=False, header=False)
count_cols = [
'segment_id',
'chrom_idx',
'start',
'end',
'count_tumour',
'count_normal',
'upper_bound',
'lower_bound',
]
class ThetaTool(object):
def __init__(self, install_directory):
self.install_directory = os.path.abspath(install_directory)
self.packages_directory = os.path.join(self.install_directory, 'packages')
self.bin_directory = os.path.join(self.packages_directory, 'THetA', 'bin')
self.theta_bin = os.path.join(self.bin_directory, 'RunTHetA')
self.max_copynumber = 6
def install(self, **kwargs):
Sentinal = utils.SentinalFactory(os.path.join(self.install_directory, 'sentinal_'), kwargs)
with Sentinal('download_theta') as sentinal:
if sentinal.unfinished:
with utils.CurrentDirectory(self.packages_directory):
utils.rmtree('THetA')
subprocess.check_call('git clone https://github.com/amcpherson/THetA.git', shell=True)
with Sentinal('install_theta') as sentinal:
if sentinal.unfinished:
with utils.CurrentDirectory(os.path.join(self.packages_directory, 'THetA')):
subprocess.check_call('rm -rf bin', shell=True)
subprocess.check_call('mkdir bin', shell=True)
subprocess.check_call('cp python/RunTHetA bin', shell=True)
subprocess.check_call('ant', shell=True)
subprocess.check_call('cp python/CreateExomeInput bin', shell=True)
subprocess.check_call('cp matlab/runBAFGaussianModel.m bin', shell=True)
def create_analysis(self, analysis_directory):
return ThetaAnalysis(self, analysis_directory)
class ThetaAnalysis(object):
def __init__(self, tool, analysis_directory):
self.tool = tool
self.analysis_directory = analysis_directory
utils.makedirs(self.analysis_directory)
def get_analysis_filename(self, *names):
return os.path.realpath(os.path.join(self.analysis_directory, *names))
def prepare(self, normal_filename, tumour_filename, perfect_segment_filename=None, **kwargs):
segments = | pd.read_csv(perfect_segment_filename, sep='\t', converters={'chromosome':str}) | pandas.read_csv |
'''
This file's intended purpose is to assign patient sensitivity designations based
on the traditional (beatAML) method of 20-80 quantile tail assignment.
'''
import pandas as pd
import sys
import numpy as np
SENSITIVE_QUANTILE = 0.2
RESISTSNT_QUANTILE = 0.8
if __name__ == '__main__':
_, data_path = sys.argv
data = pd.read_csv(data_path, low_memory=False)
in_shp = data.shape
data.lab_id = data.lab_id.astype(str)
data.auc = data.auc.astype(float)
# in case already has call
if 'call' in data.columns:
data = data.drop(['call'], axis='columns')
#[print(f' {x}') for x in data['auc']]
data2 = data[~pd.isnull(data['auc'])]
assignments = | pd.DataFrame(columns=['inhibitor','auc','lab_id','call']) | pandas.DataFrame |
from flask import Flask, request
import pandas as pd
import numpy as np
import json
import pickle
import os
app = Flask(__name__)
# Load model and Scaler files
model_path = os.path.join(os.path.pardir, os.path.pardir, 'models')
model_file_path = os.path.join(model_path, 'lr_model.pkl')
scaler_file_path = os.path.join(model_path, 'lr_scaler.pkl')
with open(scaler_file_path, 'rb') as scaler_file:
new_scaler = pickle.load(scaler_file)
with open(model_file_path, 'rb') as model_file:
new_model = pickle.load(model_file)
#columns
columns = [ u'Age', u'Fare', u'FamilySize', \
u'IsMother', u'IsMale', u'Deck_A', u'Deck_B', u'Deck_C', u'Deck_D', \
u'Deck_E', u'Deck_F', u'Deck_G', u'Deck_Z', u'Pclass_1', u'Pclass_2', \
u'Pclass_3', u'Title_Lady', u'Title_Master', u'Title_Miss', u'Title_Mr', \
u'Title_Mrs', u'Title_Officer', u'Title_Sir', u'Fare_Bin_very_low', \
u'Fare_Bin_low', u'Fare_Bin_high', u'Fare_Bin_very_high', u'Embarked_C', \
u'Embarked_Q', u'Embarked_S', u'AgeState_Adult', u'AgeState_Child']
@app.route('/api', methods=['POST'])
def make_prediction():
# read json object and convert to json string
data = json.dumps(request.get_json(force=True))
# create pandas dataframe using json string
df = | pd.read_json(data) | pandas.read_json |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
from ....config import options, option_context
from ....dataframe import DataFrame
from ....tensor import arange, tensor
from ....tensor.random import rand
from ....tests.core import require_cudf
from ....utils import lazy_import
from ... import eval as mars_eval, cut, qcut
from ...datasource.dataframe import from_pandas as from_pandas_df
from ...datasource.series import from_pandas as from_pandas_series
from ...datasource.index import from_pandas as from_pandas_index
from .. import to_gpu, to_cpu
from ..to_numeric import to_numeric
from ..rebalance import DataFrameRebalance
cudf = lazy_import('cudf', globals=globals())
@require_cudf
def test_to_gpu_execution(setup_gpu):
pdf = pd.DataFrame(np.random.rand(20, 30), index=np.arange(20, 0, -1))
df = from_pandas_df(pdf, chunk_size=(13, 21))
cdf = to_gpu(df)
res = cdf.execute().fetch()
assert isinstance(res, cudf.DataFrame)
pd.testing.assert_frame_equal(res.to_pandas(), pdf)
pseries = pdf.iloc[:, 0]
series = from_pandas_series(pseries)
cseries = series.to_gpu()
res = cseries.execute().fetch()
assert isinstance(res, cudf.Series)
pd.testing.assert_series_equal(res.to_pandas(), pseries)
@require_cudf
def test_to_cpu_execution(setup_gpu):
pdf = pd.DataFrame(np.random.rand(20, 30), index=np.arange(20, 0, -1))
df = from_pandas_df(pdf, chunk_size=(13, 21))
cdf = to_gpu(df)
df2 = to_cpu(cdf)
res = df2.execute().fetch()
assert isinstance(res, pd.DataFrame)
pd.testing.assert_frame_equal(res, pdf)
pseries = pdf.iloc[:, 0]
series = from_pandas_series(pseries, chunk_size=(13, 21))
cseries = to_gpu(series)
series2 = to_cpu(cseries)
res = series2.execute().fetch()
assert isinstance(res, pd.Series)
pd.testing.assert_series_equal(res, pseries)
def test_rechunk_execution(setup):
data = pd.DataFrame(np.random.rand(8, 10))
df = from_pandas_df(pd.DataFrame(data), chunk_size=3)
df2 = df.rechunk((3, 4))
res = df2.execute().fetch()
pd.testing.assert_frame_equal(data, res)
data = pd.DataFrame(np.random.rand(10, 10), index=np.random.randint(-100, 100, size=(10,)),
columns=[np.random.bytes(10) for _ in range(10)])
df = from_pandas_df(data)
df2 = df.rechunk(5)
res = df2.execute().fetch()
pd.testing.assert_frame_equal(data, res)
# test Series rechunk execution.
data = pd.Series(np.random.rand(10,))
series = from_pandas_series(data)
series2 = series.rechunk(3)
res = series2.execute().fetch()
pd.testing.assert_series_equal(data, res)
series2 = series.rechunk(1)
res = series2.execute().fetch()
pd.testing.assert_series_equal(data, res)
# test index rechunk execution
data = pd.Index(np.random.rand(10,))
index = from_pandas_index(data)
index2 = index.rechunk(3)
res = index2.execute().fetch()
pd.testing.assert_index_equal(data, res)
index2 = index.rechunk(1)
res = index2.execute().fetch()
pd.testing.assert_index_equal(data, res)
# test rechunk on mixed typed columns
data = pd.DataFrame({0: [1, 2], 1: [3, 4], 'a': [5, 6]})
df = from_pandas_df(data)
df = df.rechunk((2, 2)).rechunk({1: 3})
res = df.execute().fetch()
pd.testing.assert_frame_equal(data, res)
def test_series_map_execution(setup):
raw = pd.Series(np.arange(10))
s = from_pandas_series(raw, chunk_size=7)
with pytest.raises(ValueError):
# cannot infer dtype, the inferred is int,
# but actually it is float
# just due to nan
s.map({5: 10})
r = s.map({5: 10}, dtype=float)
result = r.execute().fetch()
expected = raw.map({5: 10})
pd.testing.assert_series_equal(result, expected)
r = s.map({i: 10 + i for i in range(7)}, dtype=float)
result = r.execute().fetch()
expected = raw.map({i: 10 + i for i in range(7)})
pd.testing.assert_series_equal(result, expected)
r = s.map({5: 10}, dtype=float, na_action='ignore')
result = r.execute().fetch()
expected = raw.map({5: 10}, na_action='ignore')
pd.testing.assert_series_equal(result, expected)
# dtype can be inferred
r = s.map({5: 10.})
result = r.execute().fetch()
expected = raw.map({5: 10.})
pd.testing.assert_series_equal(result, expected)
r = s.map(lambda x: x + 1, dtype=int)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1)
pd.testing.assert_series_equal(result, expected)
def f(x: int) -> float:
return x + 1.
# dtype can be inferred for function
r = s.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_series_equal(result, expected)
def f(x: int):
return x + 1.
# dtype can be inferred for function
r = s.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_series_equal(result, expected)
# test arg is a md.Series
raw2 = pd.Series([10], index=[5])
s2 = from_pandas_series(raw2)
r = s.map(s2, dtype=float)
result = r.execute().fetch()
expected = raw.map(raw2)
pd.testing.assert_series_equal(result, expected)
# test arg is a md.Series, and dtype can be inferred
raw2 = pd.Series([10.], index=[5])
s2 = from_pandas_series(raw2)
r = s.map(s2)
result = r.execute().fetch()
expected = raw.map(raw2)
pd.testing.assert_series_equal(result, expected)
# test str
raw = pd.Series(['a', 'b', 'c', 'd'])
s = from_pandas_series(raw, chunk_size=2)
r = s.map({'c': 'e'})
result = r.execute().fetch()
expected = raw.map({'c': 'e'})
pd.testing.assert_series_equal(result, expected)
# test map index
raw = pd.Index(np.random.rand(7))
idx = from_pandas_index(pd.Index(raw), chunk_size=2)
r = idx.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_index_equal(result, expected)
def test_describe_execution(setup):
s_raw = pd.Series(np.random.rand(10))
# test one chunk
series = from_pandas_series(s_raw, chunk_size=10)
r = series.describe()
result = r.execute().fetch()
expected = s_raw.describe()
pd.testing.assert_series_equal(result, expected)
r = series.describe(percentiles=[])
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[])
pd.testing.assert_series_equal(result, expected)
# test multi chunks
series = from_pandas_series(s_raw, chunk_size=3)
r = series.describe()
result = r.execute().fetch()
expected = s_raw.describe()
pd.testing.assert_series_equal(result, expected)
r = series.describe(percentiles=[])
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[])
pd.testing.assert_series_equal(result, expected)
rs = np.random.RandomState(5)
df_raw = pd.DataFrame(rs.rand(10, 4), columns=list('abcd'))
df_raw['e'] = rs.randint(100, size=10)
# test one chunk
df = from_pandas_df(df_raw, chunk_size=10)
r = df.describe()
result = r.execute().fetch()
expected = df_raw.describe()
pd.testing.assert_frame_equal(result, expected)
r = series.describe(percentiles=[], include=np.float64)
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[], include=np.float64)
pd.testing.assert_series_equal(result, expected)
# test multi chunks
df = from_pandas_df(df_raw, chunk_size=3)
r = df.describe()
result = r.execute().fetch()
expected = df_raw.describe()
pd.testing.assert_frame_equal(result, expected)
r = df.describe(percentiles=[], include=np.float64)
result = r.execute().fetch()
expected = df_raw.describe(percentiles=[], include=np.float64)
pd.testing.assert_frame_equal(result, expected)
# test skip percentiles
r = df.describe(percentiles=False, include=np.float64)
result = r.execute().fetch()
expected = df_raw.describe(percentiles=[], include=np.float64)
expected.drop(['50%'], axis=0, inplace=True)
pd.testing.assert_frame_equal(result, expected)
with pytest.raises(ValueError):
df.describe(percentiles=[1.1])
with pytest.raises(ValueError):
# duplicated values
df.describe(percentiles=[0.3, 0.5, 0.3])
# test input dataframe which has unknown shape
df = from_pandas_df(df_raw, chunk_size=3)
df2 = df[df['a'] < 0.5]
r = df2.describe()
result = r.execute().fetch()
expected = df_raw[df_raw['a'] < 0.5].describe()
pd.testing.assert_frame_equal(result, expected)
def test_data_frame_apply_execute(setup):
cols = [chr(ord('A') + i) for i in range(10)]
df_raw = pd.DataFrame(dict((c, [i ** 2 for i in range(20)]) for c in cols))
old_chunk_store_limit = options.chunk_store_limit
try:
options.chunk_store_limit = 20
df = from_pandas_df(df_raw, chunk_size=5)
r = df.apply('ffill')
result = r.execute().fetch()
expected = df_raw.apply('ffill')
pd.testing.assert_frame_equal(result, expected)
r = df.apply(['sum', 'max'])
result = r.execute().fetch()
expected = df_raw.apply(['sum', 'max'])
pd.testing.assert_frame_equal(result, expected)
r = df.apply(np.sqrt)
result = r.execute().fetch()
expected = df_raw.apply(np.sqrt)
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: pd.Series([1, 2]))
result = r.execute().fetch()
expected = df_raw.apply(lambda x: pd.Series([1, 2]))
pd.testing.assert_frame_equal(result, expected)
r = df.apply(np.sum, axis='index')
result = r.execute().fetch()
expected = df_raw.apply(np.sum, axis='index')
pd.testing.assert_series_equal(result, expected)
r = df.apply(np.sum, axis='columns')
result = r.execute().fetch()
expected = df_raw.apply(np.sum, axis='columns')
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: [1, 2], axis=1)
result = r.execute().fetch()
expected = df_raw.apply(lambda x: [1, 2], axis=1)
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
result = r.execute().fetch()
expected = df_raw.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: [1, 2], axis=1, result_type='expand')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: [1, 2], axis=1, result_type='expand')
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: list(range(10)), axis=1, result_type='reduce')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: list(range(10)), axis=1, result_type='reduce')
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: list(range(10)), axis=1, result_type='broadcast')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: list(range(10)), axis=1, result_type='broadcast')
pd.testing.assert_frame_equal(result, expected)
finally:
options.chunk_store_limit = old_chunk_store_limit
def test_series_apply_execute(setup):
idxes = [chr(ord('A') + i) for i in range(20)]
s_raw = pd.Series([i ** 2 for i in range(20)], index=idxes)
series = from_pandas_series(s_raw, chunk_size=5)
r = series.apply('add', args=(1,))
result = r.execute().fetch()
expected = s_raw.apply('add', args=(1,))
pd.testing.assert_series_equal(result, expected)
r = series.apply(['sum', 'max'])
result = r.execute().fetch()
expected = s_raw.apply(['sum', 'max'])
pd.testing.assert_series_equal(result, expected)
r = series.apply(np.sqrt)
result = r.execute().fetch()
expected = s_raw.apply(np.sqrt)
pd.testing.assert_series_equal(result, expected)
r = series.apply('sqrt')
result = r.execute().fetch()
expected = s_raw.apply('sqrt')
pd.testing.assert_series_equal(result, expected)
r = series.apply(lambda x: [x, x + 1], convert_dtype=False)
result = r.execute().fetch()
expected = s_raw.apply(lambda x: [x, x + 1], convert_dtype=False)
pd.testing.assert_series_equal(result, expected)
s_raw2 = pd.Series([np.array([1, 2, 3]), np.array([4, 5, 6])])
series = from_pandas_series(s_raw2)
dtypes = pd.Series([np.dtype(float)] * 3)
r = series.apply(pd.Series, output_type='dataframe',
dtypes=dtypes)
result = r.execute().fetch()
expected = s_raw2.apply(pd.Series)
pd.testing.assert_frame_equal(result, expected)
@pytest.mark.skipif(pa is None, reason='pyarrow not installed')
def test_apply_with_arrow_dtype_execution(setup):
df1 = pd.DataFrame({'a': [1, 2, 1],
'b': ['a', 'b', 'a']})
df = from_pandas_df(df1)
df['b'] = df['b'].astype('Arrow[string]')
r = df.apply(lambda row: str(row[0]) + row[1], axis=1)
result = r.execute().fetch()
expected = df1.apply(lambda row: str(row[0]) + row[1], axis=1)
pd.testing.assert_series_equal(result, expected)
s1 = df1['b']
s = from_pandas_series(s1)
s = s.astype('arrow_string')
r = s.apply(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = s1.apply(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
def test_transform_execute(setup):
cols = [chr(ord('A') + i) for i in range(10)]
df_raw = pd.DataFrame(dict((c, [i ** 2 for i in range(20)]) for c in cols))
idx_vals = [chr(ord('A') + i) for i in range(20)]
s_raw = pd.Series([i ** 2 for i in range(20)], index=idx_vals)
def rename_fn(f, new_name):
f.__name__ = new_name
return f
old_chunk_store_limit = options.chunk_store_limit
try:
options.chunk_store_limit = 20
# DATAFRAME CASES
df = from_pandas_df(df_raw, chunk_size=5)
# test transform scenarios on data frames
r = df.transform(lambda x: list(range(len(x))))
result = r.execute().fetch()
expected = df_raw.transform(lambda x: list(range(len(x))))
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: list(range(len(x))), axis=1)
result = r.execute().fetch()
expected = df_raw.transform(lambda x: list(range(len(x))), axis=1)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(['cumsum', 'cummax', lambda x: x + 1])
result = r.execute().fetch()
expected = df_raw.transform(['cumsum', 'cummax', lambda x: x + 1])
pd.testing.assert_frame_equal(result, expected)
fn_dict = OrderedDict([
('A', 'cumsum'),
('D', ['cumsum', 'cummax']),
('F', lambda x: x + 1),
])
r = df.transform(fn_dict)
result = r.execute().fetch()
expected = df_raw.transform(fn_dict)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.iloc[:-1], _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.iloc[:-1])
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.iloc[:-1], axis=1, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.iloc[:-1], axis=1)
pd.testing.assert_frame_equal(result, expected)
fn_list = [rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1'),
lambda x: x.iloc[:-1].reset_index(drop=True)]
r = df.transform(fn_list, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(fn_list)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.sum(), _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.sum())
pd.testing.assert_series_equal(result, expected)
fn_dict = OrderedDict([
('A', rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1')),
('D', [rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1'),
lambda x: x.iloc[:-1].reset_index(drop=True)]),
('F', lambda x: x.iloc[:-1].reset_index(drop=True)),
])
r = df.transform(fn_dict, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(fn_dict)
pd.testing.assert_frame_equal(result, expected)
# SERIES CASES
series = from_pandas_series(s_raw, chunk_size=5)
# test transform scenarios on series
r = series.transform(lambda x: x + 1)
result = r.execute().fetch()
expected = s_raw.transform(lambda x: x + 1)
pd.testing.assert_series_equal(result, expected)
r = series.transform(['cumsum', lambda x: x + 1])
result = r.execute().fetch()
expected = s_raw.transform(['cumsum', lambda x: x + 1])
pd.testing.assert_frame_equal(result, expected)
# test transform on string dtype
df_raw = pd.DataFrame({'col1': ['str'] * 10, 'col2': ['string'] * 10})
df = from_pandas_df(df_raw, chunk_size=3)
r = df['col1'].transform(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = df_raw['col1'].transform(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
r = df.transform(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = df_raw.transform(lambda x: x + '_suffix')
pd.testing.assert_frame_equal(result, expected)
r = df['col2'].transform(lambda x: x + '_suffix', dtype=np.dtype('str'))
result = r.execute().fetch()
expected = df_raw['col2'].transform(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
finally:
options.chunk_store_limit = old_chunk_store_limit
@pytest.mark.skipif(pa is None, reason='pyarrow not installed')
def test_transform_with_arrow_dtype_execution(setup):
df1 = pd.DataFrame({'a': [1, 2, 1],
'b': ['a', 'b', 'a']})
df = from_pandas_df(df1)
df['b'] = df['b'].astype('Arrow[string]')
r = df.transform({'b': lambda x: x + '_suffix'})
result = r.execute().fetch()
expected = df1.transform({'b': lambda x: x + '_suffix'})
pd.testing.assert_frame_equal(result, expected)
s1 = df1['b']
s = from_pandas_series(s1)
s = s.astype('arrow_string')
r = s.transform(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = s1.transform(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
def test_string_method_execution(setup):
s = pd.Series(['s1,s2', 'ef,', 'dd', np.nan])
s2 = pd.concat([s, s, s])
series = from_pandas_series(s, chunk_size=2)
series2 = from_pandas_series(s2, chunk_size=2)
# test getitem
r = series.str[:3]
result = r.execute().fetch()
expected = s.str[:3]
pd.testing.assert_series_equal(result, expected)
# test split, expand=False
r = series.str.split(',', n=2)
result = r.execute().fetch()
expected = s.str.split(',', n=2)
pd.testing.assert_series_equal(result, expected)
# test split, expand=True
r = series.str.split(',', expand=True, n=1)
result = r.execute().fetch()
expected = s.str.split(',', expand=True, n=1)
pd.testing.assert_frame_equal(result, expected)
# test rsplit
r = series.str.rsplit(',', expand=True, n=1)
result = r.execute().fetch()
expected = s.str.rsplit(',', expand=True, n=1)
pd.testing.assert_frame_equal(result, expected)
# test cat all data
r = series2.str.cat(sep='/', na_rep='e')
result = r.execute().fetch()
expected = s2.str.cat(sep='/', na_rep='e')
assert result == expected
# test cat list
r = series.str.cat(['a', 'b', np.nan, 'c'])
result = r.execute().fetch()
expected = s.str.cat(['a', 'b', np.nan, 'c'])
pd.testing.assert_series_equal(result, expected)
# test cat series
r = series.str.cat(series.str.capitalize(), join='outer')
result = r.execute().fetch()
expected = s.str.cat(s.str.capitalize(), join='outer')
pd.testing.assert_series_equal(result, expected)
# test extractall
r = series.str.extractall(r"(?P<letter>[ab])(?P<digit>\d)")
result = r.execute().fetch()
expected = s.str.extractall(r"(?P<letter>[ab])(?P<digit>\d)")
pd.testing.assert_frame_equal(result, expected)
# test extract, expand=False
r = series.str.extract(r'[ab](\d)', expand=False)
result = r.execute().fetch()
expected = s.str.extract(r'[ab](\d)', expand=False)
pd.testing.assert_series_equal(result, expected)
# test extract, expand=True
r = series.str.extract(r'[ab](\d)', expand=True)
result = r.execute().fetch()
expected = s.str.extract(r'[ab](\d)', expand=True)
pd.testing.assert_frame_equal(result, expected)
def test_datetime_method_execution(setup):
# test datetime
s = pd.Series([pd.Timestamp('2020-1-1'),
pd.Timestamp('2020-2-1'),
np.nan])
series = from_pandas_series(s, chunk_size=2)
r = series.dt.year
result = r.execute().fetch()
expected = s.dt.year
pd.testing.assert_series_equal(result, expected)
r = series.dt.strftime('%m-%d-%Y')
result = r.execute().fetch()
expected = s.dt.strftime('%m-%d-%Y')
pd.testing.assert_series_equal(result, expected)
# test timedelta
s = pd.Series([pd.Timedelta('1 days'),
pd.Timedelta('3 days'),
np.nan])
series = from_pandas_series(s, chunk_size=2)
r = series.dt.days
result = r.execute().fetch()
expected = s.dt.days
pd.testing.assert_series_equal(result, expected)
def test_isin_execution(setup):
# one chunk in multiple chunks
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=10)
sb = from_pandas_series(b, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
# multiple chunk in one chunks
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
sb = from_pandas_series(b, chunk_size=4)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
# multiple chunk in multiple chunks
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
sb = from_pandas_series(b, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = np.array([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
sb = tensor(b, chunk_size=3)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = {2, 1, 9, 3} # set
sa = from_pandas_series(a, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(10, 3)))
df = from_pandas_df(raw, chunk_size=(5, 2))
# set
b = {2, 1, raw[1][0]}
r = df.isin(b)
result = r.execute().fetch()
expected = raw.isin(b)
pd.testing.assert_frame_equal(result, expected)
# mars object
b = tensor([2, 1, raw[1][0]], chunk_size=2)
r = df.isin(b)
result = r.execute().fetch()
expected = raw.isin([2, 1, raw[1][0]])
pd.testing.assert_frame_equal(result, expected)
# dict
b = {1: tensor([2, 1, raw[1][0]], chunk_size=2),
2: [3, 10]}
r = df.isin(b)
result = r.execute().fetch()
expected = raw.isin({1: [2, 1, raw[1][0]], 2: [3, 10]})
pd.testing.assert_frame_equal(result, expected)
def test_cut_execution(setup):
session = setup
rs = np.random.RandomState(0)
raw = rs.random(15) * 1000
s = pd.Series(raw, index=[f'i{i}' for i in range(15)])
bins = [10, 100, 500]
ii = pd.interval_range(10, 500, 3)
labels = ['a', 'b']
t = tensor(raw, chunk_size=4)
series = from_pandas_series(s, chunk_size=4)
iii = from_pandas_index(ii, chunk_size=2)
# cut on Series
r = cut(series, bins)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s, bins))
r, b = cut(series, bins, retbins=True)
r_result = r.execute().fetch()
b_result = b.execute().fetch()
r_expected, b_expected = pd.cut(s, bins, retbins=True)
pd.testing.assert_series_equal(r_result, r_expected)
np.testing.assert_array_equal(b_result, b_expected)
# cut on tensor
r = cut(t, bins)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins)
assert len(result) == len(expected)
for r, e in zip(result, expected):
np.testing.assert_equal(r, e)
# one chunk
r = cut(s, tensor(bins, chunk_size=2), right=False, include_lowest=True)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s, bins, right=False, include_lowest=True))
# test labels
r = cut(t, bins, labels=labels)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins, labels=labels)
assert len(result) == len(expected)
for r, e in zip(result, expected):
np.testing.assert_equal(r, e)
r = cut(t, bins, labels=False)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins, labels=False)
np.testing.assert_array_equal(result, expected)
# test labels which is tensor
labels_t = tensor(['a', 'b'], chunk_size=1)
r = cut(raw, bins, labels=labels_t, include_lowest=True)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins, labels=labels, include_lowest=True)
assert len(result) == len(expected)
for r, e in zip(result, expected):
np.testing.assert_equal(r, e)
# test labels=False
r, b = cut(raw, ii, labels=False, retbins=True)
# result and expected is array whose dtype is CategoricalDtype
r_result, b_result = session.fetch(*session.execute(r, b))
r_expected, b_expected = pd.cut(raw, ii, labels=False, retbins=True)
for r, e in zip(r_result, r_expected):
np.testing.assert_equal(r, e)
pd.testing.assert_index_equal(b_result, b_expected)
# test bins which is md.IntervalIndex
r, b = cut(series, iii, labels=tensor(labels, chunk_size=1), retbins=True)
r_result = r.execute().fetch()
b_result = b.execute().fetch()
r_expected, b_expected = pd.cut(s, ii, labels=labels, retbins=True)
pd.testing.assert_series_equal(r_result, r_expected)
pd.testing.assert_index_equal(b_result, b_expected)
# test duplicates
bins2 = [0, 2, 4, 6, 10, 10]
r, b = cut(s, bins2, labels=False, retbins=True,
right=False, duplicates='drop')
r_result = r.execute().fetch()
b_result = b.execute().fetch()
r_expected, b_expected = pd.cut(s, bins2, labels=False, retbins=True,
right=False, duplicates='drop')
pd.testing.assert_series_equal(r_result, r_expected)
np.testing.assert_array_equal(b_result, b_expected)
# test integer bins
r = cut(series, 3)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s, 3))
r, b = cut(series, 3, right=False, retbins=True)
r_result, b_result = session.fetch(*session.execute(r, b))
r_expected, b_expected = pd.cut(s, 3, right=False, retbins=True)
pd.testing.assert_series_equal(r_result, r_expected)
np.testing.assert_array_equal(b_result, b_expected)
# test min max same
s2 = pd.Series([1.1] * 15)
r = cut(s2, 3)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s2, 3))
# test inf exist
s3 = s2.copy()
s3[-1] = np.inf
with pytest.raises(ValueError):
cut(s3, 3).execute()
def test_transpose_execution(setup):
raw = pd.DataFrame({"a": ['1', '2', '3'], "b": ['5', '-6', '7'], "c": ['1', '2', '3']})
# test 1 chunk
df = from_pandas_df(raw)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
# test multi chunks
df = from_pandas_df(raw, chunk_size=2)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
df = from_pandas_df(raw, chunk_size=2)
result = df.T.execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
# dtypes are varied
raw = pd.DataFrame({"a": [1.1, 2.2, 3.3], "b": [5, -6, 7], "c": [1, 2, 3]})
df = from_pandas_df(raw, chunk_size=2)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
raw = pd.DataFrame({"a": [1.1, 2.2, 3.3], "b": ['5', '-6', '7']})
df = from_pandas_df(raw, chunk_size=2)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
# Transposing from results of other operands
raw = pd.DataFrame(np.arange(0, 100).reshape(10, 10))
df = DataFrame(arange(0, 100, chunk_size=5).reshape(10, 10))
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
df = DataFrame(rand(100, 100, chunk_size=10))
raw = df.to_pandas()
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
def test_to_numeric_execition(setup):
rs = np.random.RandomState(0)
s = pd.Series(rs.randint(5, size=100))
s[rs.randint(100)] = np.nan
# test 1 chunk
series = from_pandas_series(s)
r = to_numeric(series)
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s))
# test multi chunks
series = from_pandas_series(s, chunk_size=20)
r = to_numeric(series)
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s))
# test object dtype
s = pd.Series(['1.0', 2, -3, '2.0'])
series = from_pandas_series(s)
r = to_numeric(series)
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s))
# test errors and downcast
s = pd.Series(['appple', 2, -3, '2.0'])
series = from_pandas_series(s)
r = to_numeric(series, errors='ignore', downcast='signed')
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s, errors='ignore', downcast='signed'))
# test list data
l = ['1.0', 2, -3, '2.0']
r = to_numeric(l)
np.testing.assert_array_equal(r.execute().fetch(),
pd.to_numeric(l))
def test_q_cut_execution(setup):
rs = np.random.RandomState(0)
raw = rs.random(15) * 1000
s = pd.Series(raw, index=[f'i{i}' for i in range(15)])
series = from_pandas_series(s)
r = qcut(series, 3)
result = r.execute().fetch()
expected = pd.qcut(s, 3)
pd.testing.assert_series_equal(result, expected)
r = qcut(s, 3)
result = r.execute().fetch()
expected = pd.qcut(s, 3)
| pd.testing.assert_series_equal(result, expected) | pandas.testing.assert_series_equal |
import sys
# import libraries
from sqlalchemy import create_engine
import pandas as pd
import numpy as np
import re
import pickle
import nltk
nltk.download(['punkt', 'wordnet', 'averaged_perceptron_tagger'])
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.metrics import precision_recall_fscore_support
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.multioutput import MultiOutputClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from termcolor import colored
url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
def load_data(database_filepath):
"""
Load data containing messages and categries from DB files.
Arguments:
database_filepath -> DB file containing data
Output:
X -> Predictors
y -> Targets
cat_cols -> categories to be predicted
"""
# load table form DB
engine = create_engine("sqlite:///{file}".format(file=database_filepath))
df = pd.read_sql_table('messages', engine)
# select message as predictor
X = df.message.values
# select categories as targets
cat_cols = ['related', 'request', 'offer',
'aid_related', 'medical_help', 'medical_products', 'search_and_rescue',
'security', 'military', 'child_alone', 'water', 'food', 'shelter',
'clothing', 'money', 'missing_people', 'refugees', 'death', 'other_aid',
'infrastructure_related', 'transport', 'buildings', 'electricity',
'tools', 'hospitals', 'shops', 'aid_centers', 'other_infrastructure',
'weather_related', 'floods', 'storm', 'fire', 'earthquake', 'cold',
'other_weather', 'direct_report']
y = df[cat_cols].values
return X, y, cat_cols
def tokenize(text):
"""
Function to replace URL strings, tokenize and lemmatize sentences.
Arguments:
text -> one message from the database as text
Output:
clean_tokens -> prepared text
"""
detected_urls = re.findall(url_regex, text)
for url in detected_urls:
text = text.replace(url, "urlplaceholder")
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
class StartingVerbExtractor(BaseEstimator, TransformerMixin):
"""
Class StartingVerbExtractor
"""
def starting_verb(self, text):
sentence_list = nltk.sent_tokenize(text)
for sentence in sentence_list:
pos_tags = nltk.pos_tag(tokenize(sentence))
first_word, first_tag = pos_tags[0]
if first_tag in ['VB', 'VBP'] or first_word == 'RT':
return True
return False
def fit(self, x, y=None):
return self
def transform(self, X):
X_tagged = pd.Series(X).apply(self.starting_verb)
return | pd.DataFrame(X_tagged) | pandas.DataFrame |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Written by <NAME>. <EMAIL>
import pandas as pd
import json
import urllib
import sys
import warnings
warnings.filterwarnings("ignore")
class SttTransformer(object):
def __init__(self, data):
self.data = data
self.level1 = self._extraction()[0]
self.info = self._extraction()[1]
def _html_tagger(self, string, tag):
assert type(string) is str, print("text input must be string.")
assert type(string) is str, print("tag input must be string.")
'''
tag information
--------------------------------------------------------
1. start : we add "<strong>" at the front part of input, which contains start time and speaker information
2. middle : we add "</strong><br>" at the front part of the input, which is usually real script.
3. end : we add "<br><br>" at the last part of the input, which is usually real script.
'''
if tag == 'start':
new_string = "<strong>" + string
return new_string
elif tag == 'middle':
new_string = "</strong><br>" + string
return new_string
elif tag == 'end':
new_string = string + " <br><br>"
return new_string
else:
print("there is no such {} tag").format(tag)
def _extraction(self):
level0 = list(self.data.values())
level1 = list(level0[2].values())
information = dict()
information['full_transcript'] = list(level1[0][0].values())[0]
information['speaker_num'] = level1[1]['speakers']
return level1, information
def _segmentation(self):
level1 = self.level1
level2 = level1[1]
level3 = level2['segments']
df = pd.DataFrame()
for script in level3:
s = script['start_time']
e = script['end_time']
l = script['speaker_label']
df = df.append([(l, s, e)], ignore_index=False)
df.columns = ['speaker', 'start_time', 'end_time']
return df
def parsing(self):
base_df = self._segmentation()
level1 = self.level1
level2 = level1[2]
df = pd.DataFrame(level2)
num_list = pd.merge(df[['start_time', 'alternatives', 'type']].reset_index().fillna('P'), base_df,\
on='start_time', how='outer').dropna()['index'].values
num_list = list(num_list)
num_list.append(len(df))
script_list = []
for i in range(len(num_list) - 1):
start_row = num_list[i]
next_row = num_list[i + 1]
string = ''
for script in range(start_row, next_row):
content = df.iloc[script, 2][0]['content']
if script == start_row:
string = string + content
else:
if df.iloc[script, -1] == 'pronunciation':
string = string + ' ' + content
elif df.iloc[script, -1] == 'punctuation':
string = string + content
script_list.append(string)
final_df = pd.concat([base_df.reset_index().iloc[:, 1:], | pd.DataFrame(script_list, columns=['text']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Functions for printing tables or other text-based results
Authors: <NAME>, <NAME>
URL: https://github.com/adriapr/crowdairway
"""
import pandas as pd
import numpy as np
import os.path
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.api as sm
from sklearn import linear_model
from sklearn.metrics import mean_squared_error, r2_score
import scipy.stats
import analysis as crowdanalysis
#Print statistics about results
def print_result(df_res_valid, df_res_invalid):
num_valid = df_res_valid['result_id'].count()
num_invalid = df_res_invalid['result_id'].count()
num_total = num_valid + num_invalid
print('Total ' + str(num_total))
print('Valid ' + str(num_valid))
print('Invalid ' + str(num_invalid))
print('Invalid + excluded % {0:.{1}f} '.format(num_invalid / num_total * 100, 1))
print('Invalid % preliminary {0:.{1}f} '.format(610/900*100, 1))
# Why are results invalid?
cond_inside = df_res_invalid['inside'] == True
cond_resized = df_res_invalid['resized'] == True
cond_one = df_res_invalid['num_annot'] == 1
cond_two = df_res_invalid['num_annot'] == 2
cond_many = df_res_invalid['num_annot'] >= 3
cond_see = df_res_invalid['cantsee'] == True
# Not inside, not resized, 1 annotation - did not see airway or spam
val2 = df_res_invalid['result_id'].loc[~cond_inside & ~cond_resized & cond_one].count()
val22 = df_res_invalid['result_id'].loc[cond_see].count()
# Not inside and not resized, 2+ annotations - probably tried to annotate
val3 = df_res_invalid['result_id'].loc[~cond_inside & ~cond_resized & (cond_two | cond_many)].count()
# Inside but not resized, or vice versa - probably tried to annotate
val4 = df_res_invalid['result_id'].loc[cond_inside & ~cond_resized].count()
val5 = df_res_invalid['result_id'].loc[~cond_inside & cond_resized].count()
# Resized and inside, but not one pair - technically valid, but now exluded for simpler analysis
val6 = df_res_invalid['result_id'].loc[cond_inside & cond_resized & ~cond_two].count()
print('Only one annotation (did not see airway or did not read): ' + str(val2))
print('Did not see airway: ' + str(val22))
print('Two ore more annotations, but did not read instructions: ' + str(val3+val4+val5))
print('Excluded for simpler analysis: ' + str(val6 / num_total * 100))
print('Truly invalid: ' + str( (num_invalid - val6) / num_total * 100))
#Print statistics about workers
def print_worker(df_res):
res_per_worker=df_res.groupby(['result_creator']).count()[['result_id']]
res = df_res['result_creator'].value_counts(ascending=True)
print('Total workers: ' + str(res_per_worker.count()))
print('Minimum results per worker: ' + str(res.min()))
print('Maximum results per worker: ' + str(res.max()))
def print_corr_table(df_random, df_median, df_best, df_truth, df_res_valid):
"""Print all correlations"""
# Without combining
df_res_truth = | pd.merge(df_res_valid, df_truth, on='task_id', how='outer') | pandas.merge |
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 30 09:08:26 2017
@author: hp
"""
'''
SeriousDlqin2yrsY/N超过90天或更糟的逾期拖欠
RevolvingUtilizationOfUnsecuredLines
无担保放款的循环利用:除了不动产和像车贷那样除以信用额度总和的无分期付款债务的信用卡和个人信用额度总额
NumberOfTime30-59DaysPastDueNotWorse35-59天逾期但不糟糕次数
DebtRatio负债比率
NumberOfOpenCreditLinesAndLoans
开放式信贷和贷款数量,开放式贷款(分期付款如汽车贷款或抵押贷款)和信贷(如信用卡)的数量
NumberOfTimes90DaysLate
90天逾期次数:借款者有90天或更高逾期的次数
NumberRealEstateLoansOrLines
不动产贷款或额度数量:抵押贷款和不动产放款包括房屋净值信贷额度
NumberOfTime60-89DaysPastDueNotWorse
60-89天逾期但不糟糕次数:借款人在在过去两年内有60-89天逾期还款但不糟糕的次数
NumberOfDependents
家属数量:不包括本人在内的家属数量
'''
import re
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn.preprocessing as preprocessing
from sklearn.ensemble import RandomForestRegressor
from sklearn.grid_search import GridSearchCV
from sklearn.grid_search import RandomizedSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn import linear_model
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegressionCV
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.cross_validation import cross_val_score
from sklearn import cross_validation
from sklearn.metrics import confusion_matrix
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import precision_recall_curve, roc_curve, auc
from sklearn.metrics import roc_auc_score
from sklearn import tree
from sklearn.cluster import KMeans
from sklearn.tree import export_graphviz
import pydotplus
from IPython.display import Image
from sklearn.neighbors import NearestNeighbors
import math
from scipy import stats
from sklearn.utils.multiclass import type_of_target
from sklearn.cross_validation import train_test_split
data_train=pd.read_csv('...cs-training.csv')
data_test=pd.read_csv('...cs-test.csv')
data_train=data_train.ix[:,1:]
data_test=data_test.ix[:,1:]
data=pd.concat([data_train,data_test])
data.reset_index(inplace=True)
data.drop('index',axis=1,inplace=True)
data=data.reindex_axis(data_train.columns,axis=1)
#缺失值填充
data_test[data_test.columns[data_test.isnull().any()].tolist()].isnull().sum()
#monthlyincome
data_nul=data.drop(['SeriousDlqin2yrs','NumberOfDependents'],axis=1)
train=data_nul[(data_nul['MonthlyIncome'].notnull())]
test=data_nul[(data_nul['MonthlyIncome'].isnull())]
train_x=train.drop(['MonthlyIncome'],axis=1)
train_y=train['MonthlyIncome']
test_x=test.drop(['MonthlyIncome'],axis=1)
gbMod = GradientBoostingRegressor(loss='ls', learning_rate=0.1, n_estimators=30, subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_depth=3, init=None,
random_state=None, max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False)
gbMod.fit(train_x,train_y)
m=gbMod.predict(test_x)
gbMod.feature_importances_
new=[]
for x in m :
if x<=0:
new.append(0)
else:
new.append(x)
data.loc[(data['MonthlyIncome'].isnull()),'MonthlyIncome']=new
data_nul=data.drop(['SeriousDlqin2yrs'],axis=1)
train=data_nul[(data_nul['NumberOfDependents'].notnull())]
test=data_nul[(data_nul['NumberOfDependents'].isnull())]
train_x=train.drop(['NumberOfDependents'],axis=1)
train_y=train['NumberOfDependents']
test_x=test.drop(['NumberOfDependents'],axis=1)
gbMod.fit(train_x,train_y)
m=gbMod.predict(test_x)
new=[]
for x in m :
if x<=0:
new.append(0)
else:
new.append(x)
data.loc[(data['NumberOfDependents'].isnull()),'NumberOfDependents']=new
data['fuzhaijine']=data['DebtRatio']*data['MonthlyIncome']
data['shifouweiyue']=(data['NumberOfTime60-89DaysPastDueNotWorse']+data['NumberOfTimes90DaysLate']+data['NumberOfTime30-59DaysPastDueNotWorse'])/(data['NumberOfTime60-89DaysPastDueNotWorse']+data['NumberOfTimes90DaysLate']+data['NumberOfTime30-59DaysPastDueNotWorse'])
new=[]
for x in data['shifouweiyue']:
if x==1:
new.append(1)
else:
new.append(0)
data['shifouweiyue']=new
data_test=data[data['SeriousDlqin2yrs'].isnull()]
#采样
class Smote:
def __init__(self,samples,N=10,k=3):
self.n_samples,self.n_attrs=samples.shape
self.N=N
self.k=k
self.samples=samples
self.newindex=0
# self.synthetic=np.zeros((self.n_samples*N,self.n_attrs))
def over_sampling(self):
N=int(self.N/100)
self.synthetic = np.zeros((self.n_samples * N, self.n_attrs))
neighbors=NearestNeighbors(n_neighbors=self.k).fit(self.samples)
print ('neighbors',neighbors)
for i in range(len(self.samples)):
nnarray=neighbors.kneighbors(self.samples[i].reshape(1,-1),return_distance=False)[0]
#print nnarray
self._populate(N,i,nnarray)
return self.synthetic
# for each minority class samples,choose N of the k nearest neighbors and generate N synthetic samples.
def _populate(self,N,i,nnarray):
for j in range(N):
nn=random.randint(0,self.k-1)
dif=self.samples[nnarray[nn]]-self.samples[i]
gap=random.random()
self.synthetic[self.newindex]=self.samples[i]+gap*dif
self.newindex+=1
a=np.array(data.iloc[:len(data_train),:][data['SeriousDlqin2yrs']==1])
s=Smote(a,N=500)
data_train_sampling=s.over_sampling()
data_train_sampling=pd.DataFrame(data_train_sampling,columns=list(data.columns))
#负样本随机采样
data_train_samplingz=(data.iloc[:len(data_train),:][data_train['SeriousDlqin2yrs']==0]).sample(n=60000)
train_data_sampling=pd.concat([data_train_sampling,data_train_samplingz,data.iloc[:len(data_train),:][data_train['SeriousDlqin2yrs']==1]])
train_data_sampling[['SeriousDlqin2yrs','age','NumberOfTime30-59DaysPastDueNotWorse','NumberOfOpenCreditLinesAndLoans','NumberOfTimes90DaysLate',
'NumberRealEstateLoansOrLines','NumberOfTime60-89DaysPastDueNotWorse','NumberOfDependents']] = train_data_sampling[['SeriousDlqin2yrs','age','NumberOfTime30-59DaysPastDueNotWorse','NumberOfOpenCreditLinesAndLoans','NumberOfTimes90DaysLate',
'NumberRealEstateLoansOrLines','NumberOfTime60-89DaysPastDueNotWorse','NumberOfDependents']].round()
train_data_sampling['SeriousDlqin2yrs'].value_counts()
train_data_sampling.to_csv('C:/Users/hp/Desktop/在家学习/信用评分/sampling.csv')
cut_data=pd.DataFrame()
#age
train_data_sampling=train_data_sampling.drop(data[data['age']==0].index)
train_data_sampling.reset_index(inplace=True)
train_data_sampling.drop('index',axis=1,inplace=True)
k_age=KMeans(n_clusters=9,random_state=4,init='random')
k_age.fit_transform(train_data_sampling[['age']])
k_age.cluster_centers_#28 35 40 47 54 61 68 77 86
cut_age=pd.cut(train_data_sampling.age,bins=[0,28,35,40,47,54,61,68,77,86,110])
pd.crosstab(train_data_sampling.loc[:,'age'],train_data_sampling.loc[:,'SeriousDlqin2yrs'])
#RevolvingUtilizationOfUnsecuredLines保留变量,因为相关性不高
'''
k_Rev=KMeans(n_clusters=4,random_state=4,init='random')
k_Rev.fit_transform(data[['RevolvingUtilizationOfUnsecuredLines']])
k_Rev.cluster_centers_
'''
cut_Rev=pd.qcut(train_data_sampling.RevolvingUtilizationOfUnsecuredLines,q=5)
cut_Rev.value_counts()
#NumberOfTime30-59DaysPastDueNotWorse
max=train_data_sampling.loc[(train_data_sampling['NumberOfTime30-59DaysPastDueNotWorse']!=98)&(train_data_sampling['NumberOfTime30-59DaysPastDueNotWorse']!=96),'NumberOfTime30-59DaysPastDueNotWorse'].max()
New=[]
for val in train_data_sampling['NumberOfTime30-59DaysPastDueNotWorse']:
if ((val == 98) | (val == 96)):
New.append(max)
else:
New.append(val)
train_data_sampling['NumberOfTime30-59DaysPastDueNotWorse']=New
cut_NumberOf3059Time=pd.cut(train_data_sampling['NumberOfTime30-59DaysPastDueNotWorse'],bins=[-np.inf,0,1,2,4,np.inf])
#cut_NumberOf3059Time=pd.qcut(train_data_sampling['NumberOfTime30-59DaysPastDueNotWorse'],q=5)
cut_NumberOf3059Time.value_counts()
#DebtRatio
cut_ratio=pd.qcut(train_data_sampling.DebtRatio,q=5)
cut_ratio.value_counts()
#MonthlyIncome
cut_income=pd.qcut(train_data_sampling.MonthlyIncome,q=10)
cut_income.value_counts()
#NumberOfOpenCreditLinesAndLoans
train_data_sampling['NumberOfOpenCreditLinesAndLoans'].value_counts()
cut_loans=pd.qcut(train_data_sampling['NumberOfOpenCreditLinesAndLoans'],q=10)
cut_loans.value_counts()
#NumberOfTimes90DaysLate
max=train_data_sampling.loc[(train_data_sampling['NumberOfTimes90DaysLate']!=98)&(train_data_sampling['NumberOfTimes90DaysLate']!=96),'NumberOfTimes90DaysLate'].max()
New=[]
for val in train_data_sampling['NumberOfTimes90DaysLate']:
if ((val == 98) | (val == 96)):
New.append(max)
else:
New.append(val)
train_data_sampling['NumberOfTimes90DaysLate']=New
cut_NumberOf90time=pd.cut(train_data_sampling['NumberOfTimes90DaysLate'],bins=[-np.inf,0,1,2,4,np.inf])
cut_NumberOf90time.value_counts()
#NumberRealEstateLoansOrLines
cut_EstateLoansOrLines=pd.cut(train_data_sampling['NumberRealEstateLoansOrLines'],bins=[-np.inf,0,1,2,4,np.inf])
cut_EstateLoansOrLines.value_counts()
#NumberOfTime60-89DaysPastDueNotWorse
cut_NumberOfTime6089Days=pd.cut(train_data_sampling['NumberOfTime60-89DaysPastDueNotWorse'],bins=[-np.inf,0,1,2,4,np.inf])
cut_NumberOfTime6089Days.value_counts()
#NumberOfDependents
cut_Dependents=pd.cut(train_data_sampling['NumberOfDependents'],bins=[-np.inf,0,1,2,np.inf])
cut_Dependents.value_counts()
#fuzhaijine
cut_fuzhaijine=pd.qcut(train_data_sampling['fuzhaijine'],q=5)
cut_fuzhaijine.value_counts()
#shifouweiyue
new=[]
for x in train_data_sampling.shifouweiyue:
if x<0.5:
new.append(0)
else:
new.append(1)
train_data_sampling.shifouweiyue=new
train_data_sampling_cut=train_data_sampling.copy()
train_data_sampling_cut['age']=cut_age
train_data_sampling_cut['RevolvingUtilizationOfUnsecuredLines']=cut_Rev
train_data_sampling_cut['NumberOfTime30-59DaysPastDueNotWorse']=cut_NumberOf3059Time
train_data_sampling_cut['DebtRatio']=cut_ratio
train_data_sampling_cut['MonthlyIncome']=cut_income
train_data_sampling_cut['NumberOfOpenCreditLinesAndLoans']=cut_loans
train_data_sampling_cut['NumberOfTimes90DaysLate']=cut_NumberOf90time
train_data_sampling_cut['NumberRealEstateLoansOrLines']=cut_EstateLoansOrLines
train_data_sampling_cut['NumberOfTime60-89DaysPastDueNotWorse']=cut_NumberOfTime6089Days
train_data_sampling_cut['NumberOfDependents']=cut_Dependents
train_data_sampling_cut['fuzhaijine']=cut_fuzhaijine
train_data_sampling_cut['shifouweiyue']=train_data_sampling['shifouweiyue']
train_data_sampling_cut['SeriousDlqin2yrs'].value_counts()
'''
tree1=tree.DecisionTreeClassifier(max_depth=6,min_samples_split=1000)
#data_tree=pd.concat([train_data_sampling['age'],train_data_sampling['SeriousDlqin2yrs']],axis=1)
tree2=tree1.fit(train_data_sampling[['MonthlyIncome']],train_data_sampling['SeriousDlqin2yrs'])
dot_data = tree.export_graphviz(tree2, out_file=None,
feature_names='MonthlyIncome',
class_names='SeriousDlqin2yrs',
filled=True, rounded=True,
special_characters=True)
graph = pydotplus.graph_from_dot_data(dot_data)
#Image(graph.create_png())
graph.write_pdf('C:/Users/hp/Desktop/在家学习/信用评分/w.pdf')
'''
#计算woe
totalgood = len(train_data_sampling_cut[train_data_sampling_cut['SeriousDlqin2yrs']==0])
totalbad = len(train_data_sampling_cut[train_data_sampling_cut['SeriousDlqin2yrs']==1])
def getwoe(a,p,q):
good=len(train_data_sampling[(a>p)&(a<=q)&(train_data_sampling['SeriousDlqin2yrs']==0)])
bad=len(train_data_sampling[(a>p)&(a<=q)&(train_data_sampling['SeriousDlqin2yrs']==1)])
WOE=np.log((bad/totalbad)/(good/totalgood))
return WOE
def getgoodlen(a,p,q):
good=len(train_data_sampling[(a>p)&(a<=q)&(train_data_sampling['SeriousDlqin2yrs']==0)])
goodlen=good/totalgood
return goodlen
def getbadlen(a,p,q):
bad=len(train_data_sampling[(a>p)&(a<=q)&(train_data_sampling['SeriousDlqin2yrs']==1)])
badlen=bad/totalgood
return badlen
#data.loc[(data1[data1['MonthlyIncome']>9000]).index,'MonthlyIncome']
woe_train_data=train_data_sampling.copy()
getwoe(train_data_sampling['age'],0,28)
pd.crosstab(train_data_sampling_cut['SeriousDlqin2yrs'],train_data_sampling_cut['age'])
woe_age1=getwoe(train_data_sampling['age'],0,28)
woe_age2=getwoe(train_data_sampling['age'],28,35)
woe_age3=getwoe(train_data_sampling['age'],35,40)
woe_age4=getwoe(train_data_sampling['age'],40,47)
woe_age5=getwoe(train_data_sampling['age'],47,54)
woe_age6=getwoe(train_data_sampling['age'],54,61)
woe_age7=getwoe(train_data_sampling['age'],61,68)
woe_age8=getwoe(train_data_sampling['age'],68,77)
woe_age9=getwoe(train_data_sampling['age'],77,86)
woe_age10=getwoe(train_data_sampling['age'],86,110)
woe_age=[woe_age1,woe_age2,woe_age3,woe_age4,woe_age5,woe_age6,woe_age7,woe_age8,woe_age9,woe_age10]
woe_train_data.loc[train_data_sampling['age']<=28,'age']=woe_age1
woe_train_data.loc[(train_data_sampling['age']>28)&(train_data_sampling['age']<=35),'age']=woe_age2
woe_train_data.loc[(train_data_sampling['age']>35)&(train_data_sampling['age']<=40),'age']=woe_age3
woe_train_data.loc[(train_data_sampling['age']>40)&(train_data_sampling['age']<=47),'age']=woe_age4
woe_train_data.loc[(train_data_sampling['age']>47)&(train_data_sampling['age']<=54),'age']=woe_age5
woe_train_data.loc[(train_data_sampling['age']>54)&(train_data_sampling['age']<=61),'age']=woe_age6
woe_train_data.loc[(train_data_sampling['age']>61)&(train_data_sampling['age']<=68),'age']=woe_age7
woe_train_data.loc[(train_data_sampling['age']>68)&(train_data_sampling['age']<=77),'age']=woe_age8
woe_train_data.loc[(train_data_sampling['age']>77)&(train_data_sampling['age']<=86),'age']=woe_age9
woe_train_data.loc[(train_data_sampling['age']>86)&(train_data_sampling['age']<=111),'age']=woe_age10
woe_train_data.age.value_counts()
iv_age1=(getbadlen(train_data_sampling['age'],0,28)-getgoodlen(train_data_sampling['age'],0,28))*woe_age1
iv_age2=(getbadlen(train_data_sampling['age'],28,35)-getgoodlen(train_data_sampling['age'],28,35))*woe_age2
iv_age3=(getbadlen(train_data_sampling['age'],35,40)-getgoodlen(train_data_sampling['age'],35,40))*woe_age3
iv_age4=(getbadlen(train_data_sampling['age'],40,47)-getgoodlen(train_data_sampling['age'],40,47))*woe_age4
iv_age5=(getbadlen(train_data_sampling['age'],47,54)-getgoodlen(train_data_sampling['age'],47,54))*woe_age5
iv_age6=(getbadlen(train_data_sampling['age'],54,61)-getgoodlen(train_data_sampling['age'],54,61))*woe_age6
iv_age7=(getbadlen(train_data_sampling['age'],61,68)-getgoodlen(train_data_sampling['age'],61,68))*woe_age7
iv_age8=(getbadlen(train_data_sampling['age'],68,77)-getgoodlen(train_data_sampling['age'],68,77))*woe_age8
iv_age9=(getbadlen(train_data_sampling['age'],77,86)-getgoodlen(train_data_sampling['age'],77,86))*woe_age9
iv_age10=(getbadlen(train_data_sampling['age'],86,110)-getgoodlen(train_data_sampling['age'],86,110))*woe_age10
iv_age=iv_age1+iv_age2+iv_age3+iv_age4+iv_age5+iv_age6+iv_age7+iv_age8+iv_age9+iv_age10#0.25819490968759973
#RevolvingUtilizationOfUnsecuredLines
pd.crosstab(train_data_sampling_cut['SeriousDlqin2yrs'],train_data_sampling_cut['RevolvingUtilizationOfUnsecuredLines'])
woe_Revolving1=np.log((3198/totalbad)/(20834/totalgood))
woe_Revolving2=np.log((6745/totalbad)/(17285/totalgood))
woe_Revolving3=np.log((13531/totalbad)/(10500/totalgood))
woe_Revolving4=np.log((18043/totalbad)/(5989/totalgood))
woe_Revolving5=np.log((18639/totalbad)/(5391/totalgood))
woe_train_data['RevolvingUtilizationOfUnsecuredLines'].max()
woe_train_data.loc[(train_data_sampling['RevolvingUtilizationOfUnsecuredLines']<=0.0535)&(train_data_sampling['RevolvingUtilizationOfUnsecuredLines']>=0),'RevolvingUtilizationOfUnsecuredLines']=woe_Revolving1
woe_train_data.loc[(train_data_sampling['RevolvingUtilizationOfUnsecuredLines']>0.0535)&(train_data_sampling['RevolvingUtilizationOfUnsecuredLines']<=0.281),'RevolvingUtilizationOfUnsecuredLines']=woe_Revolving2
woe_train_data.loc[(train_data_sampling['RevolvingUtilizationOfUnsecuredLines']>0.281)&(train_data_sampling['RevolvingUtilizationOfUnsecuredLines']<=0.652),'RevolvingUtilizationOfUnsecuredLines']=woe_Revolving3
woe_train_data.loc[(train_data_sampling['RevolvingUtilizationOfUnsecuredLines']>0.652)&(train_data_sampling['RevolvingUtilizationOfUnsecuredLines']<=0.967),'RevolvingUtilizationOfUnsecuredLines']=woe_Revolving4
woe_train_data.loc[(train_data_sampling['RevolvingUtilizationOfUnsecuredLines']>0.967)&(train_data_sampling['RevolvingUtilizationOfUnsecuredLines']<=60000),'RevolvingUtilizationOfUnsecuredLines']=woe_Revolving5
woe_train_data['RevolvingUtilizationOfUnsecuredLines'].value_counts()
iv_Revolv1=(3198/totalbad-20834/totalgood)*woe_Revolving1
iv_Revolv2=(6745/totalbad-17285/totalgood)*woe_Revolving2
iv_Revolv3=(13531/totalbad-10500/totalgood)*woe_Revolving3
iv_Revolv4=(18043/totalbad-5989/totalgood)*woe_Revolving4
iv_Revolv5=(18639/totalbad-5391/totalgood)*woe_Revolving5
iv_Revolv=iv_Revolv1+iv_Revolv2+iv_Revolv3+iv_Revolv4+iv_Revolv5#1.2229730587073095
#NumberOfTime30-59DaysPastDueNotWorse
pd.crosstab(train_data_sampling_cut['SeriousDlqin2yrs'],train_data_sampling_cut['NumberOfTime30-59DaysPastDueNotWorse'])
woe_30591=np.log((28490/totalbad)/(51935/totalgood))
woe_30592=np.log((16626/totalbad)/(5743/totalgood))
woe_30593=np.log((7862/totalbad)/(1460/totalgood))
woe_30594=np.log((5133/totalbad)/(670/totalgood))
woe_30595=np.log((2045/totalbad)/(191/totalgood))
woe_train_data['NumberOfTime30-59DaysPastDueNotWorse'].max()
woe_train_data.loc[train_data_sampling['NumberOfTime30-59DaysPastDueNotWorse']==0,'NumberOfTime30-59DaysPastDueNotWorse']=woe_30591
woe_train_data.loc[(train_data_sampling['NumberOfTime30-59DaysPastDueNotWorse']==1),'NumberOfTime30-59DaysPastDueNotWorse']=woe_30592
woe_train_data.loc[(train_data_sampling['NumberOfTime30-59DaysPastDueNotWorse']>1)&(train_data_sampling['NumberOfTime30-59DaysPastDueNotWorse']<=2),'NumberOfTime30-59DaysPastDueNotWorse']=woe_30593
woe_train_data.loc[(train_data_sampling['NumberOfTime30-59DaysPastDueNotWorse']>2)&(train_data_sampling['NumberOfTime30-59DaysPastDueNotWorse']<=4),'NumberOfTime30-59DaysPastDueNotWorse']=woe_30594
woe_train_data.loc[(train_data_sampling['NumberOfTime30-59DaysPastDueNotWorse']>4)&(train_data_sampling['NumberOfTime30-59DaysPastDueNotWorse']<=97),'NumberOfTime30-59DaysPastDueNotWorse']=woe_30595
woe_train_data['NumberOfTime30-59DaysPastDueNotWorse'].value_counts()
iv_30591=(28490/totalbad-51935/totalgood)*woe_30591
iv_30592=(16626/totalbad-5743/totalgood)*woe_30592
iv_30593=(7862/totalbad-1460/totalgood)*woe_30593
iv_30594=(5133/totalbad-670/totalgood)*woe_30594
iv_30595=(2045/totalbad-191/totalgood)*woe_30595
iv_3059=iv_30591+iv_30592+iv_30593+iv_30594+iv_30595#0.83053544388188838
#DebtRatio
woe_train_data['DebtRatio'].max()
pd.crosstab(train_data_sampling_cut['SeriousDlqin2yrs'],train_data_sampling_cut['DebtRatio'])
woe_Ratio1=np.log((10577/totalbad)/(13454/totalgood))
woe_Ratio2=np.log((11320/totalbad)/(12711/totalgood))
woe_Ratio3=np.log((12385/totalbad)/(11646/totalgood))
woe_Ratio4=np.log((14783/totalbad)/(9251/totalgood))
woe_Ratio5=np.log((11091/totalbad)/(12937/totalgood))
woe_train_data.loc[train_data_sampling['DebtRatio']<=0.153,'DebtRatio']=-woe_Ratio1
woe_train_data.loc[(train_data_sampling['DebtRatio']>0.153)&(train_data_sampling['DebtRatio']<=0.311),'DebtRatio']=woe_Ratio2
woe_train_data.loc[(train_data_sampling['DebtRatio']>0.311)&(train_data_sampling['DebtRatio']<=0.5),'DebtRatio']=woe_Ratio3
woe_train_data.loc[(train_data_sampling['DebtRatio']>0.5)&(train_data_sampling['DebtRatio']<=1.49),'DebtRatio']=woe_Ratio4
woe_train_data.loc[(train_data_sampling['DebtRatio']>1.49)&(train_data_sampling['DebtRatio']<=400000),'DebtRatio']=woe_Ratio5
woe_train_data['DebtRatio'].value_counts()
iv_Ratio1=(10577/totalbad-13454/totalgood)*woe_Ratio1
iv_Ratio2=(11320/totalbad-12711/totalgood)*woe_Ratio2
iv_Ratio3=(12385/totalbad-11646/totalgood)*woe_Ratio3
iv_Ratio4=(14783/totalbad-9251/totalgood)*woe_Ratio4
iv_Ratio5=(11091/totalbad-12937/totalgood)*woe_Ratio5
iv_Ratio=iv_Ratio1+iv_Ratio2+iv_Ratio3+iv_Ratio4+iv_Ratio5#0.062844824089719628
#MonthlyIncome
pd.crosstab(train_data_sampling_cut['SeriousDlqin2yrs'],train_data_sampling_cut['MonthlyIncome'])
woe_incom1=np.log((6134/totalbad)/(5886/totalgood))
woe_incom2=np.log((5942/totalbad)/(6185/totalgood))
woe_incom3=np.log((7055/totalbad)/(5243/totalgood))
woe_incom4=np.log((7016/totalbad)/(5605/totalgood))
woe_incom5=np.log((6120/totalbad)/(4898/totalgood))
woe_incom6=np.log((6384/totalbad)/(5626/totalgood))
woe_incom7=np.log((6167/totalbad)/(5860/totalgood))
woe_incom8=np.log((5555/totalbad)/(6452/totalgood))
woe_incom9=np.log((5145/totalbad)/(6868/totalgood))
woe_incom10=np.log((4638/totalbad)/(7376/totalgood))
woe_train_data.loc[train_data_sampling['MonthlyIncome']<=1140.342,'MonthlyIncome']=woe_incom1
woe_train_data.loc[(train_data_sampling['MonthlyIncome']>1140.342)&(train_data_sampling['MonthlyIncome']<=1943.438),'MonthlyIncome']=woe_incom2
woe_train_data.loc[(train_data_sampling['MonthlyIncome']>1943.438)&(train_data_sampling['MonthlyIncome']<=2800.0),'MonthlyIncome']=woe_incom3
woe_train_data.loc[(train_data_sampling['MonthlyIncome']>2800.0)&(train_data_sampling['MonthlyIncome']<=3500.0),'MonthlyIncome']=woe_incom4
woe_train_data.loc[(train_data_sampling['MonthlyIncome']>3500.0)&(train_data_sampling['MonthlyIncome']<=4225.0),'MonthlyIncome']=woe_incom5
woe_train_data.loc[(train_data_sampling['MonthlyIncome']>4225.0)&(train_data_sampling['MonthlyIncome']<=5125.153),'MonthlyIncome']=woe_incom6
woe_train_data.loc[(train_data_sampling['MonthlyIncome']>5125.153)&(train_data_sampling['MonthlyIncome']<=6184.002),'MonthlyIncome']=woe_incom7
woe_train_data.loc[(train_data_sampling['MonthlyIncome']>6184.002)&(train_data_sampling['MonthlyIncome']<=7675.0),'MonthlyIncome']=woe_incom8
woe_train_data.loc[(train_data_sampling['MonthlyIncome']>7675.0)&(train_data_sampling['MonthlyIncome']<=10166.0),'MonthlyIncome']=woe_incom9
woe_train_data.loc[(train_data_sampling['MonthlyIncome']>10166.0),'MonthlyIncome']=woe_incom10
woe_train_data.MonthlyIncome.value_counts()
iv_incom1=(6134/totalbad-5886/totalgood)*woe_incom1
iv_incom2=(5942/totalbad-6185/totalgood)*woe_incom2
iv_incom3=(7055/totalbad-5243/totalgood)*woe_incom3
iv_incom4=(7016/totalbad-5605/totalgood)*woe_incom4
iv_incom5=(6120/totalbad-4898/totalgood)*woe_incom5
iv_incom6=(6384/totalbad-5626/totalgood)*woe_incom6
iv_incom7=(6167/totalbad-5860/totalgood)*woe_incom7
iv_incom8=(5555/totalbad-6452/totalgood)*woe_incom8
iv_incom9=(5145/totalbad-6868/totalgood)*woe_incom9
iv_incom10=(4638/totalbad-7376/totalgood)*woe_incom10
iv_incom=iv_incom1+iv_incom2+iv_incom3+iv_incom4+iv_incom5+iv_incom6+iv_incom7+iv_incom8+iv_incom9+iv_incom10#0.05260337229962106
#NumberOfOpenCreditLinesAndLoans
pd.crosstab(train_data_sampling_cut['SeriousDlqin2yrs'],train_data_sampling_cut['NumberOfOpenCreditLinesAndLoans'])
woe_Loans1=np.log((9379/totalbad)/(4883/totalgood))
woe_Loans2=np.log((8800/totalbad)/(8259/totalgood))
woe_Loans3=np.log((5067/totalbad)/(5146/totalgood))
woe_Loans4=np.log((4660/totalbad)/(5509/totalgood))
woe_Loans5=np.log((4522/totalbad)/(5302/totalgood))
woe_Loans6=np.log((8005/totalbad)/(9696/totalgood))
woe_Loans7=np.log((3590/totalbad)/(3916/totalgood))
woe_Loans8=np.log((5650/totalbad)/(6123/totalgood))
woe_Loans9=np.log((5409/totalbad)/(5627/totalgood))
woe_Loans10=np.log((5074/totalbad)/(5538/totalgood))
woe_train_data.loc[woe_train_data['NumberOfOpenCreditLinesAndLoans']<=2.0,'NumberOfOpenCreditLinesAndLoans']=woe_Loans1
woe_train_data.loc[(woe_train_data['NumberOfOpenCreditLinesAndLoans']>2.0)&(woe_train_data['NumberOfOpenCreditLinesAndLoans']<=4.0),'NumberOfOpenCreditLinesAndLoans']=woe_Loans2
woe_train_data.loc[(woe_train_data['NumberOfOpenCreditLinesAndLoans']>4.0)&(woe_train_data['NumberOfOpenCreditLinesAndLoans']<=5.0),'NumberOfOpenCreditLinesAndLoans']=woe_Loans3
woe_train_data.loc[(woe_train_data['NumberOfOpenCreditLinesAndLoans']>5.0)&(woe_train_data['NumberOfOpenCreditLinesAndLoans']<=6.0),'NumberOfOpenCreditLinesAndLoans']=woe_Loans4
woe_train_data.loc[(woe_train_data['NumberOfOpenCreditLinesAndLoans']>6.0)&(woe_train_data['NumberOfOpenCreditLinesAndLoans']<=7.0),'NumberOfOpenCreditLinesAndLoans']=woe_Loans5
woe_train_data.loc[(woe_train_data['NumberOfOpenCreditLinesAndLoans']>7.0)&(woe_train_data['NumberOfOpenCreditLinesAndLoans']<=9.0),'NumberOfOpenCreditLinesAndLoans']=woe_Loans6
woe_train_data.loc[(woe_train_data['NumberOfOpenCreditLinesAndLoans']>9.0)&(woe_train_data['NumberOfOpenCreditLinesAndLoans']<=10.0),'NumberOfOpenCreditLinesAndLoans']=woe_Loans7
woe_train_data.loc[(woe_train_data['NumberOfOpenCreditLinesAndLoans']>10.0)&(woe_train_data['NumberOfOpenCreditLinesAndLoans']<=12.0),'NumberOfOpenCreditLinesAndLoans']=woe_Loans8
woe_train_data.loc[(woe_train_data['NumberOfOpenCreditLinesAndLoans']>12.0)&(woe_train_data['NumberOfOpenCreditLinesAndLoans']<=15.0),'NumberOfOpenCreditLinesAndLoans']=woe_Loans9
woe_train_data.loc[(woe_train_data['NumberOfOpenCreditLinesAndLoans']>15.0),'NumberOfOpenCreditLinesAndLoans']=woe_Loans10
woe_train_data.NumberOfOpenCreditLinesAndLoans.value_counts()
iv_Loans1=(9379/totalbad-4883/totalgood)*woe_Loans1
iv_Loans2=(8800/totalbad-8259/totalgood)*woe_Loans2
iv_Loans3=(5067/totalbad-5146/totalgood)*woe_Loans3
iv_Loans4=(4660/totalbad-5509/totalgood)*woe_Loans4
iv_Loans5=(4522/totalbad-5302/totalgood)*woe_Loans5
iv_Loans6=(8005/totalbad-9696/totalgood)*woe_Loans6
iv_Loans7=(3590/totalbad-3916/totalgood)*woe_Loans7
iv_Loans8=(5650/totalbad-6123/totalgood)*woe_Loans8
iv_Loans9=(5409/totalbad-5627/totalgood)*woe_Loans9
iv_Loans10=(5074/totalbad-5538/totalgood)*woe_Loans10
iv_Loans=iv_Loans1+iv_Loans2+iv_Loans3+iv_Loans4+iv_Loans5+iv_Loans6+iv_Loans7+iv_Loans8+iv_Loans9+iv_Loans10#0.061174706202253015
#NumberOfTimes90DaysLate
woe_train_data['NumberOfTimes90DaysLate'].max()
pd.crosstab(train_data_sampling_cut['SeriousDlqin2yrs'],train_data_sampling_cut['NumberOfTimes90DaysLate'])
woe_901=np.log((38146/totalbad)/(38146/totalgood))
woe_902=np.log((12389/totalbad)/(1521/totalgood))
woe_903=np.log((4774/totalbad)/(344/totalgood))
woe_904=np.log((3085/totalbad)/(179/totalgood))
woe_905=np.log((1762/totalbad)/(95/totalgood))
woe_train_data.loc[train_data_sampling['NumberOfTimes90DaysLate']==0.0,'NumberOfTimes90DaysLate']=woe_901
woe_train_data.loc[(train_data_sampling['NumberOfTimes90DaysLate']==1.0),'NumberOfTimes90DaysLate']=woe_902
woe_train_data.loc[(train_data_sampling['NumberOfTimes90DaysLate']>1.0)&(train_data_sampling['NumberOfTimes90DaysLate']<=2.0),'NumberOfTimes90DaysLate']=woe_903
woe_train_data.loc[(train_data_sampling['NumberOfTimes90DaysLate']>2.0)&(train_data_sampling['NumberOfTimes90DaysLate']<=4.0),'NumberOfTimes90DaysLate']=woe_904
woe_train_data.loc[(train_data_sampling['NumberOfTimes90DaysLate']>4.0)&(train_data_sampling['NumberOfTimes90DaysLate']<=97),'NumberOfTimes90DaysLate']=woe_905
woe_train_data.NumberOfTimes90DaysLate.value_counts()
iv_901=(38146/totalbad-4883/totalgood)*woe_901
iv_902=(12389/totalbad-1521/totalgood)*woe_902
iv_903=(4774/totalbad-344/totalgood)*woe_903
iv_904=(3085/totalbad-179/totalgood)*woe_904
iv_905=(1762/totalbad-95/totalgood)*woe_905
iv_90=iv_901+iv_902+iv_903+iv_904+iv_905#0.55829418354740168
#NumberRealEstateLoansOrLines
pd.crosstab(train_data_sampling_cut['SeriousDlqin2yrs'],train_data_sampling_cut['NumberRealEstateLoansOrLines'])
woe_Lines1=np.log((26932/totalbad)/(22100/totalgood))
woe_Lines2=np.log((17936/totalbad)/(21270/totalgood))
woe_Lines3=np.log((10526/totalbad)/(12656/totalgood))
woe_Lines4=np.log((3621/totalbad)/(3429/totalgood))
woe_Lines5=np.log((1141/totalbad)/(544/totalgood))
woe_train_data.loc[train_data_sampling['NumberRealEstateLoansOrLines']<=0.0,'NumberRealEstateLoansOrLines']=woe_Lines1
woe_train_data.loc[(train_data_sampling['NumberRealEstateLoansOrLines']>0.0)&(train_data_sampling['NumberRealEstateLoansOrLines']<=1.0),'NumberRealEstateLoansOrLines']=woe_Lines2
woe_train_data.loc[(train_data_sampling['NumberRealEstateLoansOrLines']>1.0)&(train_data_sampling['NumberRealEstateLoansOrLines']<=2.0),'NumberRealEstateLoansOrLines']=woe_Lines3
woe_train_data.loc[(train_data_sampling['NumberRealEstateLoansOrLines']>2.0)&(train_data_sampling['NumberRealEstateLoansOrLines']<=4.0),'NumberRealEstateLoansOrLines']=woe_Lines4
woe_train_data.loc[(train_data_sampling['NumberRealEstateLoansOrLines']>4.0)&(train_data_sampling['NumberRealEstateLoansOrLines']<=54),'NumberRealEstateLoansOrLines']=woe_Lines5
woe_train_data.NumberRealEstateLoansOrLines.value_counts()
iv_Lines1=(26932/totalbad-22100/totalgood)*woe_Lines1
iv_Lines2=(17936/totalbad-21270/totalgood)*woe_Lines2
iv_Lines3=(10526/totalbad-12656/totalgood)*woe_Lines3
iv_Lines4=(3621/totalbad-3429/totalgood)*woe_Lines4
iv_Lines5=(1141/totalbad-544/totalgood)*woe_Lines5
iv_Lines=iv_Lines1+iv_Lines2+iv_Lines3+iv_Lines4+iv_Lines5#0.039425418770289836
woe_train_data['NumberRealEstateLoansOrLines'].max()
#NumberOfTime60-89DaysPastDueNotWorse
woe_train_data['NumberOfTime60-89DaysPastDueNotWorse'].min()
pd.crosstab(train_data_sampling_cut['SeriousDlqin2yrs'],train_data_sampling_cut['NumberOfTime60-89DaysPastDueNotWorse'])
woe_60891=np.log((42678/totalbad)/(57972/totalgood))
woe_60892=np.log((12210/totalbad)/(1653/totalgood))
woe_60893=np.log((3103/totalbad)/(248/totalgood))
woe_60894=np.log((1117/totalbad)/(77/totalgood))
woe_60895=np.log((1048/totalbad)/(49/totalgood))
woe_train_data.loc[(train_data_sampling['NumberOfTime60-89DaysPastDueNotWorse']<=0.0),'NumberOfTime60-89DaysPastDueNotWorse']=woe_60891
woe_train_data.loc[(train_data_sampling['NumberOfTime60-89DaysPastDueNotWorse']>0.0)&(train_data_sampling['NumberOfTime60-89DaysPastDueNotWorse']<=1.0),'NumberOfTime60-89DaysPastDueNotWorse']=woe_60892
woe_train_data.loc[(train_data_sampling['NumberOfTime60-89DaysPastDueNotWorse']>1.0)&(train_data_sampling['NumberOfTime60-89DaysPastDueNotWorse']<=2.0),'NumberOfTime60-89DaysPastDueNotWorse']=woe_60893
woe_train_data.loc[(train_data_sampling['NumberOfTime60-89DaysPastDueNotWorse']>2.0)&(train_data_sampling['NumberOfTime60-89DaysPastDueNotWorse']<=4.0),'NumberOfTime60-89DaysPastDueNotWorse']=woe_60894
woe_train_data.loc[(train_data_sampling['NumberOfTime60-89DaysPastDueNotWorse']>4.0)&(train_data_sampling['NumberOfTime60-89DaysPastDueNotWorse']<=98),'NumberOfTime60-89DaysPastDueNotWorse']=woe_60895
woe_train_data['NumberOfTime60-89DaysPastDueNotWorse'].value_counts()
iv_60891=(42678/totalbad-22100/totalgood)*woe_60891
iv_60892=(12210/totalbad-21270/totalgood)*woe_60892
iv_60893=(3103/totalbad-248/totalgood)*woe_60893
iv_60894=(1117/totalbad-77/totalgood)*woe_60894
iv_60895=(1048/totalbad-49/totalgood)*woe_60895
iv_6089=iv_60891+iv_60892+iv_60893+iv_60894+iv_60895#-0.19122287642712696
#NumberOfDependents
woe_train_data['NumberOfDependents'].max()
| pd.crosstab(train_data_sampling_cut['SeriousDlqin2yrs'],train_data_sampling_cut['NumberOfDependents']) | pandas.crosstab |
# The following code is based on the ProcHarvester implementation
# See https://github.com/IAIK/ProcHarvester/tree/master/code/analysis%20tool
import pandas as pd
import numpy as np
import config
import math
import distance_computation
def init_dist_matrices(file_contents):
dist_matrices = []
for fileContent in file_contents:
dist_matrices.append(np.full((len(fileContent.records), len(fileContent.records)), np.nan))
return dist_matrices
def predict_based_on_distance_matrix(file_contents, Y_train, test_index, train_index, dist_matrices):
# predict based on k nearest distances majority vote
# If there are multiple input files, then k gets multiplied by the number of files
elsecase = 0
ifcase = 0
# Determine indices of k nearest neighbours
k_nearest_indices_rows = []
for file_cnt, fileContent in enumerate(file_contents):
comp_cnt = 0
dist_array = dist_matrices[file_cnt] # everything NaN at the beginning (160x160)
print("_array).shape", np.array(dist_array).shape)
for training_sample_cnt, row_cnt in enumerate(test_index):
test_to_train_distances = np.zeros(len(train_index)) # 160 measurements, 8-folds -> size 140
for test_sample_cnt, col_cnt in enumerate(train_index):
# lazy computation of distances
dist = dist_array[row_cnt, col_cnt]
if math.isnan(dist):
dist = distance_computation.dtw(fileContent.records[row_cnt], fileContent.records[col_cnt])
dist_array[row_cnt, col_cnt] = dist
dist_array[col_cnt, row_cnt] = dist
comp_cnt += 1
ifcase += 1
else:
elsecase += 1
test_to_train_distances[test_sample_cnt] = dist
sorted_indices = np.argsort(test_to_train_distances)
k_smallest_dist_indices = sorted_indices
print("ifcaes", ifcase)
print("elsecase", elsecase)
print("")
if len(k_nearest_indices_rows) <= training_sample_cnt:
k_nearest_indices_rows.append(k_smallest_dist_indices)
else:
extended = np.append(k_nearest_indices_rows[training_sample_cnt], k_smallest_dist_indices)
k_nearest_indices_rows[training_sample_cnt] = extended
sample_cnt = len(fileContent.records)
print(fileContent.file_name + ": " + str(sample_cnt) + "x" + str(sample_cnt) + " distance matrix - " + str(
comp_cnt) + " computations done")
# Predict based on k nearest indices rows
predictions = []
single_predictions = []
first_pred = []
second_pred = []
third_pred = []
for test_set_cnt in range(0, len(test_index)):
k_smallest_dist_indices = k_nearest_indices_rows[test_set_cnt]
k_nearest_labels = []
for index in k_smallest_dist_indices:
k_nearest_labels.append(Y_train.iloc[index])
k_nearest_labels = pd.Series(k_nearest_labels)
# label_cnts = tmp_lbls.groupby(tmp_lbls, sort=False).count()
label_cnts = k_nearest_labels[0:config.K_NEAREST_CNT].value_counts(sort=False).reindex(
pd.unique(k_nearest_labels))
prediction = label_cnts.idxmax()
predictions.append(prediction)
k_nearest_labels = pd.unique(k_nearest_labels)
for idx in range(0, len(k_nearest_labels)):
add = True
for check_idx in range(0, idx):
if single_predictions[check_idx][len(single_predictions[check_idx]) - 1] == k_nearest_labels[idx]:
add = False
break
if idx >= len(single_predictions):
single_predictions.append([])
if add:
single_predictions[idx].append(k_nearest_labels[idx])
else:
single_predictions[idx].append("already_found")
first_pred.append(k_nearest_labels[0])
if k_nearest_labels[1] != k_nearest_labels[0]:
second_pred.append(k_nearest_labels[1])
else:
assert False
second_pred.append("already_found")
if k_nearest_labels[2] != k_nearest_labels[0] and k_nearest_labels[2] != k_nearest_labels[1]:
third_pred.append(k_nearest_labels[2])
else:
assert False
third_pred.append("already_found")
return | pd.Series(predictions) | pandas.Series |
import os
import csv
import re
import csv
import math
from collections import defaultdict
from scipy.signal import butter, lfilter
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from statistics import mean
from scipy.stats import kurtosis, skew
from sklearn import metrics
from numpy import mean
from numpy import std
class MbientDataParser:
def __init__(self, dir_path, features):
self.dir_path = dir_path
self.features = features
self.participants = 1 # Change suitably
self.trials = 1 # Change suitably
self.falls = ['F']
self.adls = ['NF']
self.data = []
def fetch_data(self):
for participant in range(1, self.participants+1):
for trial in range(1, self.trials+1):
for fall in self.falls:
label = True
accfilename = fall + '_' + str(participant) + '_' + str(trial) + '_acc.csv'
gyrfilename = fall + '_' + str(participant) + '_' + str(trial) + '_gyr.csv'
parserObj = MbientDataManager(accfilename, gyrfilename, self.dir_path, label, self.features)
if self.features:
feature, found = parserObj.read_data()
if found:
self.data.append(feature)
else:
data, found = parserObj.read_data()
if found:
self.data.append([data, label])
for adl in self.adls:
label = False
accfilename = adl + '_' + str(participant) + '_' + str(trial) + '_acc.csv'
gyrfilename = adl + '_' + str(participant) + '_' + str(trial) + '_gyr.csv'
parserObj = MbientDataManager(accfilename, gyrfilename, self.dir_path, label, self.features)
if self.features:
feature, found = parserObj.read_data()
if found:
self.data.append(feature)
else:
data, found = parserObj.read_data()
if found:
self.data.append([data, label])
return self.data
class MbientDataManager:
def __init__(self, accfilename, gyrfilename, filepath, label, features):
self.accfilename = accfilename
self.gyrfilename = gyrfilename
self.filepath = filepath
self.label = label
self.features = features
def read_data(self):
try:
acc_csv_data = pd.read_csv(self.filepath+self.accfilename)
invalid_idx = acc_csv_data.index[acc_csv_data['elapsed (s)'].astype(int) > 100].tolist()
acc_csv_data.drop(index = invalid_idx, inplace=True, axis=0)
acc_csv_data.drop('epoch (ms)', inplace=True, axis=1)
acc_csv_data.drop('time (-11:00)', inplace=True, axis=1)
acc_csv_data.drop('elapsed (s)', inplace=True, axis=1)
gyr_csv_data = pd.read_csv(self.filepath+self.gyrfilename)
invalid_idx_2 = gyr_csv_data.index[gyr_csv_data['elapsed (s)'].astype(int) > 100].tolist()
gyr_csv_data.drop(index = invalid_idx_2, inplace=True, axis=0)
gyr_csv_data.drop('epoch (ms)', inplace=True, axis=1)
gyr_csv_data.drop('time (-11:00)', inplace=True, axis=1)
gyr_csv_data.drop('elapsed (s)', inplace=True, axis=1)
acc_x_data = acc_csv_data['x-axis (g)'].to_numpy()
acc_y_data = acc_csv_data['y-axis (g)'].to_numpy()
acc_z_data = acc_csv_data['z-axis (g)'].to_numpy()
gyr_x_data = gyr_csv_data['x-axis (deg/s)'].to_numpy()
gyr_y_data = gyr_csv_data['y-axis (deg/s)'].to_numpy()
gyr_z_data = gyr_csv_data['z-axis (deg/s)'].to_numpy()
acc_data, gyr_data = pd.DataFrame(), pd.DataFrame()
if not self.features:
main_data = pd.DataFrame()
main_data['x-axis (g)'] = pd.Series(acc_x_data).dropna()
main_data['y-axis (g)'] = pd.Series(acc_y_data).dropna()
main_data['z-axis (g)'] = pd.Series(acc_z_data).dropna()
main_data['x-axis (deg/s)'] = pd.Series(gyr_x_data).dropna()
main_data['y-axis (deg/s)'] = pd.Series(gyr_y_data).dropna()
main_data['z-axis (deg/s)'] = pd.Series(gyr_z_data).dropna()
# print(main_data)
return main_data, True
acc_data['fx'] = pd.Series(self.butterworth_low_pass(acc_x_data, 5.0, 200.0, 4)).dropna()
acc_data['fy'] = pd.Series(self.butterworth_low_pass(acc_y_data, 5.0, 200.0, 4)).dropna()
acc_data['fz'] = pd.Series(self.butterworth_low_pass(acc_z_data, 5.0, 200.0, 4)).dropna()
gyr_data['fx'] = pd.Series(gyr_x_data).dropna()
gyr_data['fy'] = | pd.Series(gyr_y_data) | pandas.Series |
import logging
import math
import re
from collections import Counter
import numpy as np
import pandas as pd
from certa.utils import diff
def get_original_prediction(r1, r2, predict_fn):
r1r2 = get_row(r1, r2)
return predict_fn(r1r2)[['nomatch_score', 'match_score']].values[0]
def get_row(r1, r2, lprefix='ltable_', rprefix='rtable_'):
r1_df = pd.DataFrame(data=[r1.values], columns=r1.index)
r2_df = | pd.DataFrame(data=[r2.values], columns=r2.index) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""Demo59_GradientDescentSigmoid.ipynb
# **<NAME>**
We need sound conceptual foundation to be good Machine Learning Artists
## Leggo
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
print(tf.__version__)
def sigmoid(x):
return 1/(1+np.exp(-x))
def sigmoid_hat(x):
return sigmoid(x) * (1 - sigmoid(x))
from sklearn.datasets.samples_generator import make_moons
from sklearn.datasets.samples_generator import make_circles
from sklearn.datasets.samples_generator import make_blobs
# generate 2d classification dataset
n = 500
X, y = make_moons(n_samples=n, noise=0.1)
# scatter plot, dots colored by class value
df = pd.DataFrame(dict(x=X[:,0], y=X[:,1], label=y))
colors = {0:'red', 1:'blue'}
fig, ax = plt.subplots()
grouped = df.groupby('label')
for key, group in grouped:
group.plot(ax=ax, kind='scatter', x='x', y='y', label=key, color=colors[key])
plt.show()
datadict = {'X1': X[:,0],'X2' : X[:,1], 'target': y}
data = | pd.DataFrame(data=datadict) | pandas.DataFrame |
import collections
from distutils.version import LooseVersion
import inspect
import logging
from numbers import Number
import numpy as np
import time
import mlflow
from mlflow.entities import Metric, Param
from mlflow.tracking.client import MlflowClient
from mlflow.utils import _chunk_dict, _truncate_dict
from mlflow.utils.autologging_utils import try_mlflow_log
from mlflow.utils.file_utils import TempDir
from mlflow.utils.mlflow_tags import MLFLOW_PARENT_RUN_ID
from mlflow.utils.validation import (
MAX_PARAMS_TAGS_PER_BATCH,
MAX_METRICS_PER_BATCH,
MAX_ENTITIES_PER_BATCH,
MAX_ENTITY_KEY_LENGTH,
MAX_PARAM_VAL_LENGTH,
)
_logger = logging.getLogger(__name__)
# The earliest version we're guaranteed to support. Autologging utilities may not work properly
# on scikit-learn older than this version.
_MIN_SKLEARN_VERSION = "0.20.3"
# The prefix to note that all calculated metrics and artifacts are solely based on training datasets
_TRAINING_PREFIX = "training_"
_SAMPLE_WEIGHT = "sample_weight"
# _SklearnArtifact represents a artifact (e.g confusion matrix) that will be computed and
# logged during the autologging routine for a particular model type (eg, classifier, regressor).
_SklearnArtifact = collections.namedtuple(
"_SklearnArtifact", ["name", "function", "arguments", "title"]
)
# _SklearnMetric represents a metric (e.g, precision_score) that will be computed and
# logged during the autologging routine for a particular model type (eg, classifier, regressor).
_SklearnMetric = collections.namedtuple("_SklearnMetric", ["name", "function", "arguments"])
def _get_estimator_info_tags(estimator):
"""
:return: A dictionary of MLflow run tag keys and values
describing the specified estimator.
"""
return {
"estimator_name": estimator.__class__.__name__,
"estimator_class": (estimator.__class__.__module__ + "." + estimator.__class__.__name__),
}
def _get_arg_names(f):
# `inspect.getargspec` doesn't return a wrapped function's argspec
# See: https://hynek.me/articles/decorators#mangled-signatures
return list(inspect.signature(f).parameters.keys())
def _get_args_for_metrics(fit_func, fit_args, fit_kwargs):
"""
Get arguments to pass to metric computations in the following steps.
1. Extract X and y from fit_args and fit_kwargs.
2. If the sample_weight argument exists in fit_func,
extract it from fit_args or fit_kwargs and return (X, y, sample_weight),
otherwise return (X, y)
:param fit_func: A fit function object.
:param fit_args: Positional arguments given to fit_func.
:param fit_kwargs: Keyword arguments given to fit_func.
:returns: A tuple of either (X, y, sample_weight), where `y` and `sample_weight` may be
`None` if the specified `fit_args` and `fit_kwargs` do not specify labels or
a sample weighting.
"""
def _get_Xy(args, kwargs, X_var_name, y_var_name):
# corresponds to: model.fit(X, y)
if len(args) >= 2:
return args[:2]
# corresponds to: model.fit(X, <y_var_name>=y)
if len(args) == 1:
return args[0], kwargs.get(y_var_name)
# corresponds to: model.fit(<X_var_name>=X, <y_var_name>=y)
return kwargs[X_var_name], kwargs.get(y_var_name)
def _get_sample_weight(arg_names, args, kwargs):
sample_weight_index = arg_names.index(_SAMPLE_WEIGHT)
# corresponds to: model.fit(X, y, ..., sample_weight)
if len(args) > sample_weight_index:
return args[sample_weight_index]
# corresponds to: model.fit(X, y, ..., sample_weight=sample_weight)
if _SAMPLE_WEIGHT in kwargs:
return kwargs[_SAMPLE_WEIGHT]
return None
fit_arg_names = _get_arg_names(fit_func)
# In most cases, X_var_name and y_var_name become "X" and "y", respectively.
# However, certain sklearn models use different variable names for X and y.
# E.g., see: https://scikit-learn.org/stable/modules/generated/sklearn.multioutput.MultiOutputClassifier.html#sklearn.multioutput.MultiOutputClassifier.fit # noqa: E501
X_var_name, y_var_name = fit_arg_names[:2]
Xy = _get_Xy(fit_args, fit_kwargs, X_var_name, y_var_name)
sample_weight = (
_get_sample_weight(fit_arg_names, fit_args, fit_kwargs)
if (_SAMPLE_WEIGHT in fit_arg_names)
else None
)
return (*Xy, sample_weight)
def _get_metrics_value_dict(metrics_list):
metric_value_dict = {}
for metric in metrics_list:
try:
metric_value = metric.function(**metric.arguments)
except Exception as e:
_log_warning_for_metrics(metric.name, metric.function, e)
else:
metric_value_dict[metric.name] = metric_value
return metric_value_dict
def _get_classifier_metrics(fitted_estimator, prefix, X, y_true, sample_weight):
"""
Compute and record various common metrics for classifiers
For (1) precision score:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_score.html
(2) recall score:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html
(3) f1_score:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html
By default, we choose the parameter `labels` to be `None`, `pos_label` to be `1`,
`average` to be `weighted` to compute the weighted precision score.
For (4) accuracy score:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.accuracy_score.html
we choose the parameter `normalize` to be `True` to output the percentage of accuracy,
as opposed to `False` that outputs the absolute correct number of sample prediction
We log additional metrics if certain classifier has method `predict_proba`
(5) log loss:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.log_loss.html
(6) roc_auc_score:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_auc_score.html
By default, for roc_auc_score, we pick `average` to be `weighted`, `multi_class` to be `ovo`,
to make the output more insensitive to dataset imbalance.
Steps:
1. Extract X and y_true from fit_args and fit_kwargs, and compute y_pred.
2. If the sample_weight argument exists in fit_func (accuracy_score by default
has sample_weight), extract it from fit_args or fit_kwargs as
(y_true, y_pred, ...... sample_weight), otherwise as (y_true, y_pred, ......)
3. return a dictionary of metric(name, value)
:param fitted_estimator: The already fitted classifier
:param fit_args: Positional arguments given to fit_func.
:param fit_kwargs: Keyword arguments given to fit_func.
:return: dictionary of (function name, computed value)
"""
import sklearn
y_pred = fitted_estimator.predict(X)
classifier_metrics = [
_SklearnMetric(
name=prefix + "precision_score",
function=sklearn.metrics.precision_score,
arguments=dict(
y_true=y_true, y_pred=y_pred, average="weighted", sample_weight=sample_weight
),
),
_SklearnMetric(
name=prefix + "recall_score",
function=sklearn.metrics.recall_score,
arguments=dict(
y_true=y_true, y_pred=y_pred, average="weighted", sample_weight=sample_weight
),
),
_SklearnMetric(
name=prefix + "f1_score",
function=sklearn.metrics.f1_score,
arguments=dict(
y_true=y_true, y_pred=y_pred, average="weighted", sample_weight=sample_weight
),
),
_SklearnMetric(
name=prefix + "accuracy_score",
function=sklearn.metrics.accuracy_score,
arguments=dict(
y_true=y_true, y_pred=y_pred, normalize=True, sample_weight=sample_weight
),
),
]
if hasattr(fitted_estimator, "predict_proba"):
y_pred_proba = fitted_estimator.predict_proba(X)
classifier_metrics.extend(
[
_SklearnMetric(
name=prefix + "log_loss",
function=sklearn.metrics.log_loss,
arguments=dict(y_true=y_true, y_pred=y_pred_proba, sample_weight=sample_weight),
),
]
)
if _is_metric_supported("roc_auc_score"):
# For binary case, the parameter `y_score` expect scores must be
# the scores of the class with the greater label.
if len(y_pred_proba[0]) == 2:
y_pred_proba = y_pred_proba[:, 1]
classifier_metrics.extend(
[
_SklearnMetric(
name=prefix + "roc_auc_score",
function=sklearn.metrics.roc_auc_score,
arguments=dict(
y_true=y_true,
y_score=y_pred_proba,
average="weighted",
sample_weight=sample_weight,
multi_class="ovo",
),
),
]
)
return _get_metrics_value_dict(classifier_metrics)
def _get_classifier_artifacts(fitted_estimator, prefix, X, y_true, sample_weight):
"""
Draw and record various common artifacts for classifier
For all classifiers, we always log:
(1) confusion matrix:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.plot_confusion_matrix.html
For only binary classifiers, we will log:
(2) precision recall curve:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.plot_precision_recall_curve.html
(3) roc curve:
https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
Steps:
1. Extract X and y_true from fit_args and fit_kwargs, and split into train & test datasets.
2. If the sample_weight argument exists in fit_func (accuracy_score by default
has sample_weight), extract it from fit_args or fit_kwargs as
(y_true, y_pred, sample_weight, multioutput), otherwise as (y_true, y_pred, multioutput)
3. return a list of artifacts path to be logged
:param fitted_estimator: The already fitted regressor
:param fit_args: Positional arguments given to fit_func.
:param fit_kwargs: Keyword arguments given to fit_func.
:return: List of artifacts to be logged
"""
import sklearn
if not _is_plotting_supported():
return []
classifier_artifacts = [
_SklearnArtifact(
name=prefix + "confusion_matrix",
function=sklearn.metrics.plot_confusion_matrix,
arguments=dict(
estimator=fitted_estimator,
X=X,
y_true=y_true,
sample_weight=sample_weight,
normalize="true",
cmap="Blues",
),
title="Normalized confusion matrix",
),
]
# The plot_roc_curve and plot_precision_recall_curve can only be
# supported for binary classifier
if len(set(y_true)) == 2:
classifier_artifacts.extend(
[
_SklearnArtifact(
name=prefix + "roc_curve",
function=sklearn.metrics.plot_roc_curve,
arguments=dict(
estimator=fitted_estimator, X=X, y=y_true, sample_weight=sample_weight,
),
title="ROC curve",
),
_SklearnArtifact(
name=prefix + "precision_recall_curve",
function=sklearn.metrics.plot_precision_recall_curve,
arguments=dict(
estimator=fitted_estimator, X=X, y=y_true, sample_weight=sample_weight,
),
title="Precision recall curve",
),
]
)
return classifier_artifacts
def _get_regressor_metrics(fitted_estimator, prefix, X, y_true, sample_weight):
"""
Compute and record various common metrics for regressors
For (1) (root) mean squared error:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html
(2) mean absolute error:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_absolute_error.html
(3) r2 score:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.r2_score.html
By default, we choose the parameter `multioutput` to be `uniform_average`
to average outputs with uniform weight.
Steps:
1. Extract X and y_true from fit_args and fit_kwargs, and compute y_pred.
2. If the sample_weight argument exists in fit_func (accuracy_score by default
has sample_weight), extract it from fit_args or fit_kwargs as
(y_true, y_pred, sample_weight, multioutput), otherwise as (y_true, y_pred, multioutput)
3. return a dictionary of metric(name, value)
:param fitted_estimator: The already fitted regressor
:param fit_args: Positional arguments given to fit_func.
:param fit_kwargs: Keyword arguments given to fit_func.
:return: dictionary of (function name, computed value)
"""
import sklearn
y_pred = fitted_estimator.predict(X)
regressor_metrics = [
_SklearnMetric(
name=prefix + "mse",
function=sklearn.metrics.mean_squared_error,
arguments=dict(
y_true=y_true,
y_pred=y_pred,
sample_weight=sample_weight,
multioutput="uniform_average",
),
),
_SklearnMetric(
name=prefix + "mae",
function=sklearn.metrics.mean_absolute_error,
arguments=dict(
y_true=y_true,
y_pred=y_pred,
sample_weight=sample_weight,
multioutput="uniform_average",
),
),
_SklearnMetric(
name=prefix + "r2_score",
function=sklearn.metrics.r2_score,
arguments=dict(
y_true=y_true,
y_pred=y_pred,
sample_weight=sample_weight,
multioutput="uniform_average",
),
),
]
# To be compatible with older versions of scikit-learn (below 0.22.2), where
# `sklearn.metrics.mean_squared_error` does not have "squared" parameter to calculate `rmse`,
# we compute it through np.sqrt(<value of mse>)
metrics_value_dict = _get_metrics_value_dict(regressor_metrics)
metrics_value_dict[prefix + "rmse"] = np.sqrt(metrics_value_dict[prefix + "mse"])
return metrics_value_dict
def _log_warning_for_metrics(func_name, func_call, err):
msg = (
func_call.__qualname__
+ " failed. The metric "
+ func_name
+ "will not be recorded."
+ " Metric error: "
+ str(err)
)
_logger.warning(msg)
def _log_warning_for_artifacts(func_name, func_call, err):
msg = (
func_call.__qualname__
+ " failed. The artifact "
+ func_name
+ " will not be recorded."
+ " Artifact error: "
+ str(err)
)
_logger.warning(msg)
def _log_specialized_estimator_content(
fitted_estimator, run_id, prefix, X, y_true=None, sample_weight=None
):
import sklearn
mlflow_client = MlflowClient()
metrics = dict()
if y_true is not None:
try:
if sklearn.base.is_classifier(fitted_estimator):
metrics = _get_classifier_metrics(
fitted_estimator, prefix, X, y_true, sample_weight
)
elif sklearn.base.is_regressor(fitted_estimator):
metrics = _get_regressor_metrics(fitted_estimator, prefix, X, y_true, sample_weight)
except Exception as err:
msg = (
"Failed to autolog metrics for "
+ fitted_estimator.__class__.__name__
+ ". Logging error: "
+ str(err)
)
_logger.warning(msg)
else:
# batch log all metrics
try_mlflow_log(
mlflow_client.log_batch,
run_id,
metrics=[
Metric(key=str(key), value=value, timestamp=int(time.time() * 1000), step=0)
for key, value in metrics.items()
],
)
if sklearn.base.is_classifier(fitted_estimator):
try:
artifacts = _get_classifier_artifacts(
fitted_estimator, prefix, X, y_true, sample_weight
)
except Exception as e:
msg = (
"Failed to autolog artifacts for "
+ fitted_estimator.__class__.__name__
+ ". Logging error: "
+ str(e)
)
_logger.warning(msg)
return
with TempDir() as tmp_dir:
for artifact in artifacts:
try:
display = artifact.function(**artifact.arguments)
display.ax_.set_title(artifact.title)
artifact_path = "{}.png".format(artifact.name)
filepath = tmp_dir.path(artifact_path)
display.figure_.savefig(filepath)
import matplotlib.pyplot as plt
plt.close(display.figure_)
except Exception as e:
_log_warning_for_artifacts(artifact.name, artifact.function, e)
try_mlflow_log(mlflow_client.log_artifacts, run_id, tmp_dir.path())
return metrics
def _log_estimator_content(estimator, run_id, prefix, X, y_true=None, sample_weight=None):
"""
Logs content for the given estimator, which includes metrics and artifacts that might be
tailored to the estimator's type (e.g., regression vs classification). Training labels
are required for metric computation; metrics will be omitted if labels are not available.
:param estimator: The estimator used to compute metrics and artifacts.
:param run_id: The run under which the content is logged.
:param prefix: A prefix used to name the logged content. Typically it's 'training_' for
training-time content and user-controlled for evaluation-time content.
:param X: The data samples.
:param y_true: Labels.
:param sample_weight: Per-sample weights used in the computation of metrics and artifacts.
:return: A dict of the computed metrics.
"""
metrics = _log_specialized_estimator_content(
fitted_estimator=estimator,
run_id=run_id,
prefix=prefix,
X=X,
y_true=y_true,
sample_weight=sample_weight,
)
if hasattr(estimator, "score") and y_true is not None:
try:
# Use the sample weight only if it is present in the score args
score_arg_names = _get_arg_names(estimator.score)
score_args = (
(X, y_true, sample_weight) if _SAMPLE_WEIGHT in score_arg_names else (X, y_true)
)
score = estimator.score(*score_args)
except Exception as e:
msg = (
estimator.score.__qualname__
+ " failed. The 'training_score' metric will not be recorded. Scoring error: "
+ str(e)
)
_logger.warning(msg)
else:
score_key = prefix + "score"
try_mlflow_log(mlflow.log_metric, score_key, score)
metrics[score_key] = score
return metrics
def _get_meta_estimators_for_autologging():
"""
:return: A list of meta estimator class definitions
(e.g., `sklearn.model_selection.GridSearchCV`) that should be included
when patching training functions for autologging
"""
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.pipeline import Pipeline
return [
GridSearchCV,
RandomizedSearchCV,
Pipeline,
]
def _is_parameter_search_estimator(estimator):
"""
:return: `True` if the specified scikit-learn estimator is a parameter search estimator,
such as `GridSearchCV`. `False` otherwise.
"""
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
parameter_search_estimators = [
GridSearchCV,
RandomizedSearchCV,
]
return any(
[
isinstance(estimator, param_search_estimator)
for param_search_estimator in parameter_search_estimators
]
)
def _log_parameter_search_results_as_artifact(cv_results_df, run_id):
"""
Records a collection of parameter search results as an MLflow artifact
for the specified run.
:param cv_results_df: A Pandas DataFrame containing the results of a parameter search
training session, which may be obtained by parsing the `cv_results_`
attribute of a trained parameter search estimator such as
`GridSearchCV`.
:param run_id: The ID of the MLflow Run to which the artifact should be recorded.
"""
with TempDir() as t:
results_path = t.path("cv_results.csv")
cv_results_df.to_csv(results_path, index=False)
try_mlflow_log(MlflowClient().log_artifact, run_id, results_path)
def _create_child_runs_for_parameter_search(cv_estimator, parent_run, child_tags=None):
"""
Creates a collection of child runs for a parameter search training session.
Runs are reconstructed from the `cv_results_` attribute of the specified trained
parameter search estimator - `cv_estimator`, which provides relevant performance
metrics for each point in the parameter search space. One child run is created
for each point in the parameter search space. For additional information, see
`https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html`_. # noqa: E501
:param cv_estimator: The trained parameter search estimator for which to create
child runs.
:param parent_run: A py:class:`mlflow.entities.Run` object referring to the parent
parameter search run for which child runs should be created.
:param child_tags: An optional dictionary of MLflow tag keys and values to log
for each child run.
"""
import pandas as pd
client = MlflowClient()
# Use the start time of the parent parameter search run as a rough estimate for the
# start time of child runs, since we cannot precisely determine when each point
# in the parameter search space was explored
child_run_start_time = parent_run.info.start_time
child_run_end_time = int(time.time() * 1000)
seed_estimator = cv_estimator.estimator
# In the unlikely case that a seed of a parameter search estimator is,
# itself, a parameter search estimator, we should avoid logging the untuned
# parameters of the seeds's seed estimator
should_log_params_deeply = not _is_parameter_search_estimator(seed_estimator)
# Each row of `cv_results_` only provides parameters that vary across
# the user-specified parameter grid. In order to log the complete set
# of parameters for each child run, we fetch the parameters defined by
# the seed estimator and update them with parameter subset specified
# in the result row
base_params = seed_estimator.get_params(deep=should_log_params_deeply)
cv_results_df = | pd.DataFrame.from_dict(cv_estimator.cv_results_) | pandas.DataFrame.from_dict |
'''GDELTeda.py
Project: WGU Data Management/Analytics Undergraduate Capstone
<NAME>
August 2021
Class for collecting Pymongo and Pandas operations to automate EDA on
subsets of GDELT records (Events/Mentions, GKG, or joins).
Basic use should be by import and implementation within an IDE, or by editing
section # C00 and running this script directly.
Primary member functions include descriptive docstrings for their intent and
use.
WARNING: project file operations are based on relative pathing from the
'scripts' directory this Python script is located in, given the creation of
directories 'GDELTdata' and 'EDAlogs' parallel to 'scripts' upon first
GDELTbase and GDELTeda class initializations. If those directories are not
already present, a fallback method for string-literal directory reorientation
may be found in '__init__()' at this tag: # A02b - Project directory path.
Specification for any given user's main project directory should be made for
that os.chdir() call.
See also GDELTbase.py, tag # A01a - backup path specification, as any given
user's project directory must be specified there, also.
Contents:
A00 - GDELTeda
A01 - shared class data
A02 - __init__ with instanced data
A02a - Project directory maintenance
A02b - Project directory path specification
Note: Specification at A02b should be changed to suit a user's desired
directory structure, given their local filesystem.
B00 - class methods
B01 - batchEDA()
B02 - eventsBatchEDA()
B03 - mentionsBatchEDA()
B04 - gkgBatchEDA()
Note: see GDELTedaGKGhelpers.py for helper function code & docs
B05 - realtimeEDA()
B06 - loopEDA()
C00 - main w/ testing
C01 - previously-run GDELT realtime EDA testing
'''
import json
import multiprocessing
import numpy as np
import os
import pandas as pd
import pymongo
import shutil
import wget
from datetime import datetime, timedelta, timezone
from GDELTbase import GDELTbase
from GDELTedaGKGhelpers import GDELTedaGKGhelpers
from pandas_profiling import ProfileReport
from pprint import pprint as pp
from time import time, sleep
from urllib.error import HTTPError
from zipfile import ZipFile as zf
# A00
class GDELTeda:
'''Collects Pymongo and Pandas operations for querying GDELT records
subsets and performing semi-automated EDA.
Shared class data:
-----------------
logPath - dict
Various os.path objects for EDA log storage.
configFilePaths - dict
Various os.path objects for pandas_profiling.ProfileReport
configuration files, copied to EDA log storage directories upon
__init__, for use in report generation.
Instanced class data:
--------------------
gBase - GDELTbase instance
Used for class member functions, essential for realtimeEDA().
Class methods:
-------------
batchEDA()
eventsBatchEDA()
mentionsBatchEDA()
gkgBatchEDA()
realtimeEDA()
loopEDA()
Helper functions from GDELTedaGKGhelpers.py used in gkgBatchEDA():
pullMainGKGcolumns()
applyDtypes()
convertDatetimes()
convertGKGV15Tone()
mainReport()
locationsReport()
countsReport()
themesReport()
personsReport()
organizationsReport()
'''
# A01 - shared class data
# These paths are set relative to the location of this script, one directory
# up and in 'EDAlogs' parallel to the script directory, which can be named
# arbitrarily.
logPath = {}
logPath['base'] = os.path.join(os.path.abspath(__file__),
os.path.realpath('..'),
'EDAlogs')
logPath['events'] = {}
logPath['events'] = {
'table' : os.path.join(logPath['base'], 'events'),
'batch' : os.path.join(logPath['base'], 'events', 'batch'),
'realtime' : os.path.join(logPath['base'], 'events', 'realtime'),
}
logPath['mentions'] = {
'table' : os.path.join(logPath['base'], 'mentions'),
'batch' : os.path.join(logPath['base'], 'mentions', 'batch'),
'realtime' : os.path.join(logPath['base'], 'mentions', 'realtime'),
}
logPath['gkg'] = {
'table' : os.path.join(logPath['base'], 'gkg'),
'batch' : os.path.join(logPath['base'], 'gkg', 'batch'),
'realtime' : os.path.join(logPath['base'], 'gkg', 'realtime'),
}
# Turns out, the following isn't the greatest way of keeping track
# of each configuration file. It's easiest to just leave them in the
# exact directories where ProfileReport.to_html() is aimed (via
# os.chdir()), since it's pesky maneuvering outside parameters into
# multiprocessing Pool.map() calls.
# Still, these can and are used in realtimeEDA(), since the size of
# just the most recent datafiles should permit handling them without
# regard for Pandas DataFrame RAM impact (it's greedy, easiest method
# for mitigation is multiprocessing threads, that shouldn't be
# necessary for realtimeEDA()).
# Regardless, all these entries are for copying ProfileReport config
# files to their appropriate directories for use, given base-copies
# present in the 'scripts' directory. Those base copies may be edited
# in 'scripts', since each file will be copied from there.
configFilePaths = {}
configFilePaths['events'] = {
'batch' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTeventsEDAconfig_batch.yaml"),
'realtime' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTeventsEDAconfig_realtime.yaml"),
}
configFilePaths['mentions'] = {
'batch' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTmentionsEDAconfig_batch.yaml"),
'realtime' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTmentionsEDAconfig_realtime.yaml"),
}
configFilePaths['gkg'] = {}
configFilePaths['gkg']['batch'] = {
'main' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgMainEDAconfig_batch.yaml"),
'locations' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgLocationsEDAconfig_batch.yaml"),
'counts' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgCountsEDAconfig_batch.yaml"),
'themes' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgThemesEDAconfig_batch.yaml"),
'persons' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgPersonsEDAconfig_batch.yaml"),
'organizations' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgOrganizationsEDAconfig_batch.yaml"),
}
configFilePaths['gkg']['realtime'] = {
'main' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgMainEDAconfig_realtime.yaml"),
'locations' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgLocationsEDAconfig_realtime.yaml"),
'counts' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgCountsEDAconfig_realtime.yaml"),
'themes' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgThemesEDAconfig_realtime.yaml"),
'persons' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgPersonsEDAconfig_realtime.yaml"),
'organizations' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgOrganizationsEDAconfig_realtime.yaml"),
}
# A02
def __init__(self, tableList = ['events', 'mentions', 'gkg']):
'''GDELTeda class initialization, takes a list of GDELT tables to
perform EDA on. Instantiates a GDELTbase() instance for use by class
methods and checks for presence of EDAlogs directories, creating them if
they aren't present, and copying all ProfileReport-required config files
to their applicable directories.
Parameters:
----------
tableList - list of strings, default ['events','mentions','gkg']
Controls detection and creation of .../EDALogs/... subdirectories for
collection of Pandas Profiling ProfileReport HTML EDA document output.
Also controls permission for class member functions to perform
operations on tables specified by those functions' tableList parameters
as a failsafe against a lack of project directories required for those
operations, specifically output of HTML EDA documents.
output:
------
Produces exhaustive EDA for GDELT record subsets for specified tables
through Pandas Profiling ProfileReport-output HTML documents.
All procedurally automated steps towards report generation are shown
in console output during script execution.
'''
# instancing tables for operations to be passed to member functions
self.tableList = tableList
print("Instantiating GDELTeda...\n")
self.gBase = GDELTbase()
if 'events' not in tableList and \
'mentions' not in tableList and \
'gkg' not in tableList:
print("Error! 'tableList' values do not include a valid GDELT table.",
"\nPlease use one or more of 'events', 'mentions', and/or 'gkg'.")
# instancing trackers for realtimeEDA() and loopEDA()
self.realtimeStarted = False
self.realtimeLooping = False
self.realtimeWindow = 0
self.lastRealDatetime = ''
self.nextRealDatetime = ''
# A02a - Project EDA log directories confirmation and/or creation, and
# Pandas Profiling ProfileReport configuration file copying from 'scripts'
# directory.
print(" Checking log directory...")
if not os.path.isdir(self.logPath['base']):
print(" Doesn't exist! Making...")
# A02b - Project directory path
# For obvious reasons, any user of this script should change this
# string to suit their needs. The directory described with this string
# should be one directory above the location of the 'scripts' directory
# this file should be in. If this file is not in 'scripts', unpredictable
# behavior may occur, and no guarantees of functionality are intended for
# such a state.
os.chdir('C:\\Users\\urf\\Projects\\WGU capstone')
os.mkdir(self.logPath['base'])
for table in tableList:
# switch to EDAlogs directory
os.chdir(self.logPath['base'])
# Branch: table subdirectories not found, create all
if not os.path.isdir(self.logPath[table]['table']):
print("Did not find .../EDAlogs/", table, "...")
print(" Creating .../EDAlogs/", table, "...")
os.mkdir(self.logPath[table]['table'])
os.chdir(self.logPath[table]['table'])
print(" Creating .../EDAlogs/", table, "/batch")
os.mkdir(self.logPath[table]['batch'])
print(" Creating .../EDAlogs/", table, "/realtime")
os.mkdir(self.logPath[table]['realtime'])
os.chdir(self.logPath[table]['realtime'])
# Branch: table subdirectories found, create batch/realtime directories
# if not present.
else:
print(" Found .../EDAlogs/", table,"...")
os.chdir(self.logPath[table]['table'])
if not os.path.isdir(self.logPath[table]['batch']):
print(" Did not find .../EDAlogs/", table, "/batch , creating...")
os.mkdir(self.logPath[table]['batch'])
if not os.path.isdir(self.logPath[table]['realtime']):
print(" Did not find .../EDAlogs/", table, "/realtime , creating...")
os.mkdir(self.logPath[table]['realtime'])
os.chdir(self.logPath[table]['realtime'])
# Copying pandas_profiling.ProfileReport configuration files
print(" Copying configuration files...\n")
if table == 'gkg':
# There's a lot of these, but full normalization of GKG is
# prohibitively RAM-expensive, so reports need to be generated for
# both the main columns and the main columns normalized for each
# variable-length subfield.
shutil.copy(self.configFilePaths[table]['realtime']['main'],
self.logPath[table]['realtime'])
shutil.copy(self.configFilePaths[table]['realtime']['locations'],
self.logPath[table]['realtime'])
shutil.copy(self.configFilePaths[table]['realtime']['counts'],
self.logPath[table]['realtime'])
shutil.copy(self.configFilePaths[table]['realtime']['themes'],
self.logPath[table]['realtime'])
shutil.copy(self.configFilePaths[table]['realtime']['persons'],
self.logPath[table]['realtime'])
shutil.copy(self.configFilePaths[table]['realtime']['organizations'],
self.logPath[table]['realtime'])
os.chdir(self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch']['main'],
self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch']['locations'],
self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch']['counts'],
self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch']['themes'],
self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch']['persons'],
self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch']['organizations'],
self.logPath[table]['batch'])
else:
shutil.copy(self.configFilePaths[table]['realtime'],
self.logPath[table]['realtime'])
os.chdir(self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch'],
self.logPath[table]['batch'])
# B00 - class methods
# B01
def batchEDA(self, tableList = ['events','mentions','gkg']):
'''Reshapes and re-types GDELT records for generating Pandas
Profiling ProfileReport()-automated, simple EDA reports from Pandas
DataFrames, from MongoDB-query-cursors.
WARNING: extremely RAM, disk I/O, and processing intensive. Be aware of
what resources are available for these operations at runtime.
Relies on Python multiprocessing.Pool.map() calls against class member
functions eventsBatchEDA() and mentionsBatchEDA(), and a regular call on
gkgBatchEDA(), which uses multiprocessing.Pool.map() calls within it.
Parameters:
----------
tableList - list of strings, default ['events','mentions','gkg']
Permits limiting analysis to one or more tables.
Output:
------
Displays progress through the function's operations via console output
while producing Pandas Profiling ProfileReport.to_file() html documents
for
'''
if tableList != self.tableList:
print("\n Error: this GDELTeda object may have been initialized\n",
" without checking for the presence of directories\n",
" required for this function's operations.\n",
" Please check GDELTeda parameters and try again.")
for table in tableList:
print("\n------------------------------------------------------------\n")
print("Executing batch EDA on GDELT table", table, "records...")
# WARNING: RAM, PROCESSING, and DISK I/O INTENSIVE
# Events and Mentions are both much easier to handle than GKG, so
# they're called in their own collective function threads with
# multiprocessing.Pool(1).map().
if table == 'events':
os.chdir(self.logPath['events']['batch'])
pool = multiprocessing.Pool(1)
eventsReported = pool.map(self.eventsBatchEDA(), ['batch'])
pool.close()
pool.join()
if table == 'mentions':
os.chdir(self.logPath['mentions']['batch'])
pool = multiprocessing.Pool(1)
mentionsReported = pool.map(self.mentionsBatchEDA(), ['batch'])
pool.close()
pool.join()
if table == 'gkg':
# Here's the GKG bottleneck! Future investigation of parallelization
# improvements may yield gains here, as normalization of all subfield
# and variable-length measures is very RAM expensive, given the
# expansion in records required.
# So, current handling of GKG subfield and variable-length measures
# is isolating most operations in their own process threads within
# gkgBatchEDA() execution, forcing deallocation of those resources upon
# each Pool.close(), as with Events and Mentions table operations above
# which themselves do not require any additional subfield handling.
os.chdir(self.logPath['gkg']['batch'])
self.gkgBatchEDA()
# B02
def eventsBatchEDA(mode):
'''Performs automatic EDA on GDELT Events record subsets. See
function batchEDA() for "if table == 'events':" case handling and how
this function is invoked as a multiprocessing.Pool.map() call, intended
to isolate its RAM requirements for deallocation upon Pool.close().
In its current state, this function can handle collections of GDELT
Events records up to at least the size of the batch EDA test subset used
in this capstone project, the 30 day period from 05/24/2020 to
06/22/2020.
Parameters:
----------
mode - arbitrary
This parameter is included to meet Python multiprocessing.Pool.map()
function requirements. As such, it is present only to receive a
parameter determined by map(), e.g. one iteration of the function will
execute.
Output:
------
Console displays progress through steps with time taken throughout,
and function generates EDA profile html documents in appropriate project
directories.
'''
columnNames = [
'GLOBALEVENTID',
'Actor1Code',
'Actor1Name',
'Actor1CountryCode',
'Actor1Type1Code',
'Actor1Type2Code',
'Actor1Type3Code',
'Actor2Code',
'Actor2Name',
'Actor2CountryCode',
'Actor2Type1Code',
'Actor2Type2Code',
'Actor2Type3Code',
'IsRootEvent',
'EventCode',
'EventBaseCode',
'EventRootCode',
'QuadClass',
'AvgTone',
'Actor1Geo_Type',
'Actor1Geo_FullName',
'Actor1Geo_Lat',
'Actor1Geo_Long',
'Actor2Geo_Type',
'Actor2Geo_FullName',
'Actor2Geo_Lat',
'Actor2Geo_Long',
'ActionGeo_Type',
'ActionGeo_FullName',
'ActionGeo_Lat',
'ActionGeo_Long',
'DATEADDED',
'SOURCEURL',
]
columnTypes = {
'GLOBALEVENTID' : type(1),
'Actor1Code': pd.StringDtype(),
'Actor1Name': pd.StringDtype(),
'Actor1CountryCode': pd.StringDtype(),
'Actor1Type1Code' : pd.StringDtype(),
'Actor1Type2Code' : pd.StringDtype(),
'Actor1Type3Code' : pd.StringDtype(),
'Actor2Code': pd.StringDtype(),
'Actor2Name': pd.StringDtype(),
'Actor2CountryCode': pd.StringDtype(),
'Actor2Type1Code' : pd.StringDtype(),
'Actor2Type2Code' : pd.StringDtype(),
'Actor2Type3Code' : pd.StringDtype(),
'IsRootEvent': type(True),
'EventCode': pd.StringDtype(),
'EventBaseCode': pd.StringDtype(),
'EventRootCode': pd.StringDtype(),
'QuadClass': type(1),
'AvgTone': type(1.1),
'Actor1Geo_Type': type(1),
'Actor1Geo_FullName': pd.StringDtype(),
'Actor1Geo_Lat': pd.StringDtype(),
'Actor1Geo_Long': | pd.StringDtype() | pandas.StringDtype |
"""The pandas client implementation."""
from __future__ import absolute_import
import re
from functools import partial
import dateutil.parser
import numpy as np
import pandas as pd
import pytz
import toolz
from multipledispatch import Dispatcher
from pandas.api.types import CategoricalDtype, DatetimeTZDtype
from pkg_resources import parse_version
import ibis.client as client
import ibis.common.exceptions as com
import ibis.expr.datatypes as dt
import ibis.expr.operations as ops
import ibis.expr.schema as sch
import ibis.expr.types as ir
from .core import execute_and_reset
infer_pandas_dtype = pd.api.types.infer_dtype
_ibis_dtypes = toolz.valmap(
np.dtype,
{
dt.Boolean: np.bool_,
dt.Null: np.object_,
dt.Array: np.object_,
dt.String: np.object_,
dt.Binary: np.object_,
dt.Date: 'datetime64[ns]',
dt.Time: 'timedelta64[ns]',
dt.Timestamp: 'datetime64[ns]',
dt.Int8: np.int8,
dt.Int16: np.int16,
dt.Int32: np.int32,
dt.Int64: np.int64,
dt.UInt8: np.uint8,
dt.UInt16: np.uint16,
dt.UInt32: np.uint32,
dt.UInt64: np.uint64,
dt.Float32: np.float32,
dt.Float64: np.float64,
dt.Decimal: np.object_,
dt.Struct: np.object_,
},
)
_numpy_dtypes = toolz.keymap(
np.dtype,
{
'bool': dt.boolean,
'int8': dt.int8,
'int16': dt.int16,
'int32': dt.int32,
'int64': dt.int64,
'uint8': dt.uint8,
'uint16': dt.uint16,
'uint32': dt.uint32,
'uint64': dt.uint64,
'float16': dt.float16,
'float32': dt.float32,
'float64': dt.float64,
'double': dt.double,
'unicode': dt.string,
'str': dt.string,
'datetime64': dt.timestamp,
'datetime64[ns]': dt.timestamp,
'timedelta64': dt.interval,
'timedelta64[ns]': dt.Interval('ns'),
},
)
_inferable_pandas_dtypes = {
'string': dt.string,
'bytes': dt.string,
'floating': dt.float64,
'integer': dt.int64,
'mixed-integer': dt.binary,
'mixed-integer-float': dt.float64,
'decimal': dt.binary,
'complex': dt.binary,
'categorical': dt.binary,
'boolean': dt.boolean,
'datetime64': dt.timestamp,
'datetime': dt.timestamp,
'date': dt.date,
'timedelta64': dt.interval,
'timedelta': dt.interval,
'time': dt.time,
'period': dt.binary,
'mixed': dt.binary,
'empty': dt.binary,
'unicode': dt.string,
}
@dt.dtype.register(np.dtype)
def from_numpy_dtype(value):
try:
return _numpy_dtypes[value]
except KeyError:
raise TypeError(
'numpy dtype {!r} is not supported in the pandas backend'.format(
value
)
)
@dt.dtype.register(DatetimeTZDtype)
def from_pandas_tzdtype(value):
return dt.Timestamp(timezone=str(value.tz))
@dt.dtype.register(CategoricalDtype)
def from_pandas_categorical(value):
return dt.Category()
@dt.infer.register(
(np.generic,)
+ tuple(
frozenset(
np.signedinteger.__subclasses__()
+ np.unsignedinteger.__subclasses__() # np.int64, np.uint64, etc.
)
) # we need this because in Python 2 int is a parent of np.integer
)
def infer_numpy_scalar(value):
return dt.dtype(value.dtype)
def _infer_pandas_series_contents(s: pd.Series) -> dt.DataType:
"""Infer the type of the **contents** of a pd.Series.
No dispatch for this because there is no class representing "the contents
of a Series". Instead, this is meant to be used internally, mainly by
`infer_pandas_series`.
Parameters
----------
s : pd.Series
The Series whose contents we want to know the type of
Returns
-------
dtype : dt.DataType
The dtype of the contents of the Series
"""
if s.dtype == np.object_:
inferred_dtype = infer_pandas_dtype(s, skipna=True)
if inferred_dtype in {'mixed', 'decimal'}:
# We need to inspect an element to determine the Ibis dtype
value = s.iloc[0]
if isinstance(value, (np.ndarray, list, pd.Series)):
# Defer to individual `infer` functions for these
return dt.infer(value)
else:
return dt.dtype('binary')
else:
return _inferable_pandas_dtypes[inferred_dtype]
else:
return dt.dtype(s.dtype)
@dt.infer.register(pd.Series)
def infer_pandas_series(s):
"""Infer the type of a pd.Series.
Note that the returned datatype will be an array type, which corresponds
to the fact that a Series is a collection of elements. Please use
`_infer_pandas_series_contents` if you are interested in the datatype
of the **contents** of the Series.
"""
return dt.Array(_infer_pandas_series_contents(s))
@dt.infer.register(pd.Timestamp)
def infer_pandas_timestamp(value):
if value.tz is not None:
return dt.Timestamp(timezone=str(value.tz))
else:
return dt.timestamp
@dt.infer.register(np.ndarray)
def infer_array(value):
# In this function, by default we'll directly map the dtype of the
# np.array to a corresponding Ibis dtype (see bottom)
np_dtype = value.dtype
# However, there are some special cases where we can't use the np.array's
# dtype:
if np_dtype.type == np.object_:
# np.array dtype is `dtype('O')`, which is ambiguous.
inferred_dtype = infer_pandas_dtype(value, skipna=True)
return dt.Array(_inferable_pandas_dtypes[inferred_dtype])
elif np_dtype.type == np.str_:
# np.array dtype is `dtype('<U1')` (for np.arrays containing strings),
# which is ambiguous.
return dt.Array(dt.string)
# The dtype of the np.array is not ambiguous, and can be used directly.
return dt.Array(dt.dtype(np_dtype))
@sch.schema.register(pd.Series)
def schema_from_series(s):
return sch.schema(tuple(s.iteritems()))
@sch.infer.register(pd.DataFrame)
def infer_pandas_schema(df, schema=None):
schema = schema if schema is not None else {}
pairs = []
for column_name, pandas_dtype in df.dtypes.iteritems():
if not isinstance(column_name, str):
raise TypeError(
'Column names must be strings to use the pandas backend'
)
if column_name in schema:
ibis_dtype = dt.dtype(schema[column_name])
else:
ibis_dtype = _infer_pandas_series_contents(df[column_name])
pairs.append((column_name, ibis_dtype))
return sch.schema(pairs)
def ibis_dtype_to_pandas(ibis_dtype):
"""Convert ibis dtype to the pandas / numpy alternative"""
assert isinstance(ibis_dtype, dt.DataType)
if isinstance(ibis_dtype, dt.Timestamp) and ibis_dtype.timezone:
return | DatetimeTZDtype('ns', ibis_dtype.timezone) | pandas.api.types.DatetimeTZDtype |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2020-2021 Alibaba Group Holding Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
import pytest
import numpy as np
import vineyard
from vineyard.core import default_builder_context, default_resolver_context
from vineyard.data import register_builtin_types
register_builtin_types(default_builder_context, default_resolver_context)
def test_pandas_dataframe(vineyard_client):
df = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [5, 6, 7, 8]})
object_id = vineyard_client.put(df)
pd.testing.assert_frame_equal(df, vineyard_client.get(object_id))
def test_pandas_dataframe_int_columns(vineyard_client):
df = pd.DataFrame({1: [1, 2, 3, 4], 2: [5, 6, 7, 8]})
object_id = vineyard_client.put(df)
pd.testing.assert_frame_equal(df, vineyard_client.get(object_id))
def test_pandas_dataframe_mixed_columns(vineyard_client):
df = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [5, 6, 7, 8], 1: [9, 10, 11, 12], 2: [13, 14, 15, 16]})
object_id = vineyard_client.put(df)
pd.testing.assert_frame_equal(df, vineyard_client.get(object_id))
def test_dataframe_reindex(vineyard_client):
df = pd.DataFrame(np.random.rand(10, 5), columns=['c1', 'c2', 'c3', 'c4', 'c5'])
expected = df.reindex(index=np.arange(10, 1, -1))
object_id = vineyard_client.put(expected)
pd.testing.assert_frame_equal(expected, vineyard_client.get(object_id))
def test_dataframe_set_index(vineyard_client):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]], index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
expected = df1.set_index('y', drop=True)
object_id = vineyard_client.put(expected)
pd.testing.assert_frame_equal(expected, vineyard_client.get(object_id))
def test_sparse_array(vineyard_client):
arr = np.random.randn(10)
arr[2:5] = np.nan
arr[7:8] = np.nan
sparr = pd.arrays.SparseArray(arr)
object_id = vineyard_client.put(sparr)
pd.testing.assert_extension_array_equal(sparr, vineyard_client.get(object_id))
def test_dataframe_with_sparse_array(vineyard_client):
df = pd.DataFrame(np.random.randn(100, 4), columns=['x', 'y', 'z', 'a'])
df.iloc[:98] = np.nan
sdf = df.astype( | pd.SparseDtype("float", np.nan) | pandas.SparseDtype |
from typing import Dict, Any, Optional
import os
import json
CONFIG_LOCATION = os.path.abspath(os.path.join(__file__, os.pardir, os.pardir, "data", "path_config.json"))
with open(CONFIG_LOCATION) as _json_file:
CONFIG = json.load(_json_file)
os.environ["OMP_NUM_THREADS"] = "8"
import copy
import numpy as np
import pandas as pd
import argparse
import logging
import pickle
logger = logging.getLogger("s2and")
from tqdm import tqdm
os.environ["S2AND_CACHE"] = os.path.join(CONFIG["main_data_dir"], ".feature_cache")
from sklearn.cluster import DBSCAN
from s2and.data import ANDData
from s2and.featurizer import featurize, FeaturizationInfo
from s2and.model import PairwiseModeler, Clusterer, FastCluster
from s2and.eval import pairwise_eval, cluster_eval
from s2and.consts import FEATURIZER_VERSION, DEFAULT_CHUNK_SIZE, NAME_COUNTS_PATH
from s2and.file_cache import cached_path
from hyperopt import hp
search_space = {
"eps": hp.uniform("choice", 0, 1),
}
pd.set_option("display.max_rows", None)
pd.set_option("display.max_columns", None)
pd.set_option("display.width", None)
pd.set_option("display.max_colwidth", None)
DATA_DIR = CONFIG["main_data_dir"]
FEATURES_TO_USE = [
"name_similarity",
"affiliation_similarity",
"email_similarity",
"coauthor_similarity",
"venue_similarity",
"year_diff",
"title_similarity",
"reference_features",
"misc_features",
"name_counts",
"embedding_similarity",
"journal_similarity",
"advanced_name_similarity",
]
NAMELESS_FEATURES_TO_USE = [
feature_name
for feature_name in FEATURES_TO_USE
if feature_name not in {"name_similarity", "advanced_name_similarity", "name_counts"}
]
FEATURIZER_INFO = FeaturizationInfo(features_to_use=FEATURES_TO_USE, featurizer_version=FEATURIZER_VERSION)
NAMELESS_FEATURIZER_INFO = FeaturizationInfo(
features_to_use=NAMELESS_FEATURES_TO_USE, featurizer_version=FEATURIZER_VERSION
)
PAIRWISE_ONLY_DATASETS = {"medline"}
BLOCK_TYPE = "original"
N_TRAIN_PAIRS_SIZE = 100000
N_VAL_TEST_SIZE = 10000
N_ITER = 25
USE_CACHE = True
PREPROCESS = True
def sota_helper(dataset, experiment_name, random_seed, use_s2_clusters=False):
dataset_name = dataset["name"]
pairwise_metrics = pairwise_eval(
dataset["X_test"],
dataset["y_test"],
dataset["pairwise_modeler"],
os.path.join(DATA_DIR, "experiments", experiment_name, "sota", f"seed_{random_seed}", "figs"),
f"{dataset_name}",
FEATURIZER_INFO.get_feature_names(),
nameless_classifier=dataset["nameless_pairwise_modeler"],
nameless_X=dataset["nameless_X_test"],
nameless_feature_names=NAMELESS_FEATURIZER_INFO.get_feature_names(),
)
if dataset_name not in PAIRWISE_ONLY_DATASETS:
cluster_metrics, b3_metrics_per_signature = cluster_eval(
dataset["anddata"],
dataset["clusterer"],
split="test",
use_s2_clusters=use_s2_clusters,
)
else:
cluster_metrics = {
"B3 (P, R, F1)": (None, None, None),
"Cluster (P, R F1)": (None, None, None),
"Cluster Macro (P, R, F1)": (None, None, None),
"Pred bigger ratio (mean, count)": (None, None),
"True bigger ratio (mean, count)": (None, None),
}
b3_metrics_per_signature = None
metrics = {"pairwise": pairwise_metrics, "cluster": cluster_metrics}
logger.info(f"{dataset_name}_sota_: {metrics}")
if not os.path.exists(
os.path.join(DATA_DIR, "experiments", experiment_name, "sota", f"seed_{random_seed}", "metrics")
):
os.makedirs(
os.path.join(
DATA_DIR,
"experiments",
experiment_name,
"sota",
f"seed_{random_seed}",
"metrics",
)
)
with open(
os.path.join(
DATA_DIR,
"experiments",
experiment_name,
"sota",
f"seed_{random_seed}",
"metrics",
f"{dataset_name}_sota.json",
),
"w",
) as _json_file:
json.dump(metrics, _json_file, indent=4)
return pairwise_metrics, cluster_metrics, b3_metrics_per_signature
def main(
experiment_name: str,
dont_use_nameless_model: bool,
n_jobs: int,
dont_use_monotone_constraints: bool,
linkage: str,
use_dbscan: bool,
negative_one_for_nan: bool,
random_seed: int,
inspire_split: int,
inspire_only: bool,
aminer_only: bool,
):
USE_NAMELESS_MODEL = not dont_use_nameless_model
N_JOBS = n_jobs
USE_MONOTONE_CONSTRAINTS = not dont_use_monotone_constraints
logger.info(
(
f"USE_NAMELESS_MODEL={USE_NAMELESS_MODEL}, "
f"N_JOBS={N_JOBS}, "
f"USE_MONOTONE_CONSTRAINTS={USE_MONOTONE_CONSTRAINTS}, "
f"linkage={linkage}, "
f"use_dbscan={use_dbscan}, "
f"negative_one_for_nan={negative_one_for_nan}, "
f"random_seed={random_seed}"
)
)
if inspire_only:
DATASET_NAMES = ["inspire"]
elif aminer_only:
DATASET_NAMES = ["aminer"]
else:
DATASET_NAMES = [
"kisti",
"pubmed",
"medline",
]
FIXED_BLOCK = ["aminer"]
FIXED_SIGNATURE = ["inspire"]
if negative_one_for_nan:
MONOTONE_CONSTRAINTS = None
NAMELESS_MONOTONE_CONSTRAINTS = None
NAN_VALUE = -1
else:
MONOTONE_CONSTRAINTS = FEATURIZER_INFO.lightgbm_monotone_constraints
NAMELESS_MONOTONE_CONSTRAINTS = NAMELESS_FEATURIZER_INFO.lightgbm_monotone_constraints
NAN_VALUE = np.nan
with open(cached_path(NAME_COUNTS_PATH), "rb") as f:
(
first_dict,
last_dict,
first_last_dict,
last_first_initial_dict,
) = pickle.load(f)
name_counts = {
"first_dict": first_dict,
"last_dict": last_dict,
"first_last_dict": first_last_dict,
"last_first_initial_dict": last_first_initial_dict,
}
logger.info("loaded name counts")
datasets: Dict[str, Any] = {}
for dataset_name in tqdm(DATASET_NAMES, desc="Processing datasets and fitting base models"):
logger.info("")
logger.info(f"processing dataset {dataset_name}")
clusters_path: Optional[str] = None
train_blocks: Optional[str] = None
val_blocks: Optional[str] = None
test_blocks: Optional[str] = None
train_pairs_path: Optional[str] = None
val_pairs_path: Optional[str] = None
test_pairs_path: Optional[str] = None
train_signatures: Optional[str] = None
val_signatures: Optional[str] = None
test_signatures: Optional[str] = None
if dataset_name in FIXED_BLOCK:
logger.info("FIXED BLOCK")
train_blocks_fname: str = "train_keys.json"
val_blocks_fname: str = "val_keys.json"
test_blocks_fname: str = "test_keys.json"
logger.info(f"File names, FIXED BLOCK {train_blocks_fname, val_blocks_fname, test_blocks_fname}")
clusters_path = os.path.join(DATA_DIR, dataset_name, dataset_name + "_clusters.json")
train_blocks = os.path.join(DATA_DIR, dataset_name, train_blocks_fname)
if not os.path.exists(os.path.join(DATA_DIR, dataset_name, val_blocks_fname)):
val_blocks = None
test_blocks = os.path.join(DATA_DIR, dataset_name, test_blocks_fname)
elif dataset_name in FIXED_SIGNATURE:
train_sign_fname: str = "train_keys_" + str(inspire_split) + ".json"
val_sign_fname: str = "val_keys_" + str(inspire_split) + ".json"
test_sign_fname: str = "test_keys_" + str(inspire_split) + ".json"
logger.info(f"File names, FIXED_SIGNATURE {train_sign_fname, val_sign_fname, test_sign_fname}")
clusters_path = os.path.join(DATA_DIR, dataset_name, dataset_name + "_clusters.json")
train_signatures = os.path.join(DATA_DIR, dataset_name, train_sign_fname)
if not os.path.exists(os.path.join(DATA_DIR, dataset_name, val_sign_fname)):
val_signatures = None
test_signatures = os.path.join(DATA_DIR, dataset_name, test_sign_fname)
elif dataset_name not in PAIRWISE_ONLY_DATASETS:
logger.info("CLUSTER with random split")
clusters_path = os.path.join(DATA_DIR, dataset_name, dataset_name + "_clusters.json")
else:
logger.info("Pairwise model")
train_pairs_path = os.path.join(DATA_DIR, dataset_name, "train_pairs.csv")
val_pairs_path = os.path.join(DATA_DIR, dataset_name, "val_pairs.csv")
if not os.path.exists(val_pairs_path):
val_pairs_path = None
test_pairs_path = os.path.join(DATA_DIR, dataset_name, "test_pairs.csv")
logger.info(f"loading dataset {dataset_name}")
if dataset_name == "inspire" or dataset_name == "kisti":
unit_of_data_split = "signatures"
else:
unit_of_data_split = "blocks"
if dataset_name == "kisti":
train_ratio = 0.4
val_ratio = 0.1
test_ratio = 0.5
else:
train_ratio = 0.8
val_ratio = 0.1
test_ratio = 0.1
logger.info(f"ratios {train_ratio, val_ratio, test_ratio}")
logger.info(f"block keys {train_blocks, val_blocks, test_blocks}")
logger.info(f"signature keys {train_signatures, val_signatures, test_signatures}")
anddata = ANDData(
signatures=os.path.join(DATA_DIR, dataset_name, dataset_name + "_signatures.json"),
papers=os.path.join(DATA_DIR, dataset_name, dataset_name + "_papers.json"),
name=dataset_name,
mode="train",
specter_embeddings=os.path.join(DATA_DIR, dataset_name, dataset_name + "_specter.pickle"),
clusters=clusters_path,
block_type=BLOCK_TYPE,
train_pairs=train_pairs_path,
val_pairs=val_pairs_path,
test_pairs=test_pairs_path,
train_pairs_size=N_TRAIN_PAIRS_SIZE,
val_pairs_size=N_VAL_TEST_SIZE,
test_pairs_size=N_VAL_TEST_SIZE,
n_jobs=N_JOBS,
load_name_counts=name_counts,
preprocess=PREPROCESS,
random_seed=random_seed,
train_blocks=train_blocks,
val_blocks=val_blocks,
test_blocks=test_blocks,
train_signatures=train_signatures,
val_signatures=val_signatures,
test_signatures=test_signatures,
train_ratio=train_ratio,
val_ratio=val_ratio,
test_ratio=test_ratio,
unit_of_data_split=unit_of_data_split,
)
logger.info(f"dataset {dataset_name} loaded")
logger.info(f"featurizing {dataset_name}")
train, val, test = featurize(anddata, FEATURIZER_INFO, n_jobs=N_JOBS, use_cache=USE_CACHE, chunk_size=DEFAULT_CHUNK_SIZE, nameless_featurizer_info=NAMELESS_FEATURIZER_INFO, nan_value=NAN_VALUE) # type: ignore
X_train, y_train, nameless_X_train = train
X_val, y_val, nameless_X_val = val
assert test is not None
X_test, y_test, nameless_X_test = test
logger.info(f"dataset {dataset_name} featurized")
pairwise_modeler: Optional[PairwiseModeler] = None
nameless_pairwise_modeler = None
cluster: Optional[Clusterer] = None
logger.info(f"fitting pairwise for {dataset_name}")
pairwise_modeler = PairwiseModeler(
n_iter=N_ITER,
monotone_constraints=MONOTONE_CONSTRAINTS if USE_MONOTONE_CONSTRAINTS else None,
random_state=random_seed,
)
pairwise_modeler.fit(X_train, y_train, X_val, y_val)
logger.info(f"pairwise fit for {dataset_name}")
if USE_NAMELESS_MODEL:
logger.info(f"nameless fitting pairwise for {dataset_name}")
nameless_pairwise_modeler = PairwiseModeler(
n_iter=N_ITER,
monotone_constraints=NAMELESS_MONOTONE_CONSTRAINTS if USE_MONOTONE_CONSTRAINTS else None,
random_state=random_seed,
)
nameless_pairwise_modeler.fit(nameless_X_train, y_train, nameless_X_val, y_val)
logger.info(f"nameless pairwise fit for {dataset_name}")
distances_for_sparsity = [1 - pred[1] for pred in pairwise_modeler.predict_proba(X_train)]
threshold = np.percentile(distances_for_sparsity, [10, 20, 30, 40, 50, 60, 70, 80, 90])
logger.info(f"Thresholds {threshold}")
if dataset_name not in PAIRWISE_ONLY_DATASETS:
logger.info(f"fitting clusterer for {dataset_name}")
cluster = Clusterer(
FEATURIZER_INFO,
pairwise_modeler.classifier,
cluster_model=FastCluster(linkage=linkage)
if not use_dbscan
else DBSCAN(min_samples=1, metric="precomputed"),
search_space=search_space,
n_jobs=N_JOBS,
use_cache=USE_CACHE,
nameless_classifier=nameless_pairwise_modeler.classifier
if nameless_pairwise_modeler is not None
else None,
nameless_featurizer_info=NAMELESS_FEATURIZER_INFO,
random_state=random_seed,
use_default_constraints_as_supervision=False,
)
cluster.fit(anddata)
logger.info(f"clusterer fit for {dataset_name}")
logger.info(f"{dataset_name} best clustering parameters: " + str(cluster.best_params))
dataset: Dict[str, Any] = {}
dataset["anddata"] = anddata
dataset["X_train"] = X_train
dataset["y_train"] = y_train
dataset["X_val"] = X_val
dataset["y_val"] = y_val
dataset["X_test"] = X_test
dataset["y_test"] = y_test
dataset["pairwise_modeler"] = pairwise_modeler
dataset["nameless_X_train"] = nameless_X_train
dataset["nameless_X_val"] = nameless_X_val
dataset["nameless_X_test"] = nameless_X_test
dataset["nameless_pairwise_modeler"] = nameless_pairwise_modeler
dataset["clusterer"] = cluster
dataset["name"] = anddata.name
datasets[dataset_name] = dataset
logger.info("")
logger.info("making evaluation grids")
b3_f1_grid = [["" for j in range(len(DATASET_NAMES) + 1)] for i in range(len(DATASET_NAMES) + 1)]
for i in range(max(len(DATASET_NAMES), len(DATASET_NAMES))):
if i < len(DATASET_NAMES):
b3_f1_grid[0][i + 1] = DATASET_NAMES[i]
if i < len(DATASET_NAMES):
b3_f1_grid[i + 1][0] = DATASET_NAMES[i]
pairwise_auroc_grid = copy.deepcopy(b3_f1_grid) # makes a copy of the grid
pairwise_f1_classification_grid = copy.deepcopy(b3_f1_grid) # makes a copy of the grid
pairwise_average_precisision_grid = copy.deepcopy(b3_f1_grid) # makes a copy of the grid
pairwise_macro_f1_grid = copy.deepcopy(b3_f1_grid) # makes a copy of the grid
# transfer of individual models
logger.info("starting individual model evaluation")
for _, source_dataset in tqdm(datasets.items(), desc="Evaluating individual models"):
logger.info("")
logger.info(f"evaluating source {source_dataset['name']} target {source_dataset['name']}")
pairwise_metrics, cluster_metrics, _ = sota_helper(source_dataset, experiment_name, random_seed)
b3_f1_grid[DATASET_NAMES.index(source_dataset["name"]) + 1][
DATASET_NAMES.index(source_dataset["name"]) + 1
] = cluster_metrics["B3 (P, R, F1)"][2]
pairwise_macro_f1_grid[DATASET_NAMES.index(source_dataset["name"]) + 1][
DATASET_NAMES.index(source_dataset["name"]) + 1
] = cluster_metrics["Cluster Macro (P, R, F1)"][2]
pairwise_auroc_grid[DATASET_NAMES.index(source_dataset["name"]) + 1][
DATASET_NAMES.index(source_dataset["name"]) + 1
] = pairwise_metrics["AUROC"]
pairwise_f1_classification_grid[DATASET_NAMES.index(source_dataset["name"]) + 1][
DATASET_NAMES.index(source_dataset["name"]) + 1
] = pairwise_metrics["F1"]
pairwise_average_precisision_grid[DATASET_NAMES.index(source_dataset["name"]) + 1][
DATASET_NAMES.index(source_dataset["name"]) + 1
] = pairwise_metrics["Average Precision"]
logger.info("finished individual model evaluation")
# union
logger.info("")
logger.info("writing results to disk")
print("B3 F1:")
b3_df = pd.DataFrame(b3_f1_grid)
print(b3_df)
print()
print("Pairwise Macro F1 (cluster):")
pairwise_macro_f1_df = pd.DataFrame(pairwise_macro_f1_grid)
print(pairwise_macro_f1_df)
print()
print("Pairwise AUROC:")
pairwise_df = pd.DataFrame(pairwise_auroc_grid)
print(pairwise_df)
print()
print("Pairwise classification F1:")
pairwise_classification_f1_df = pd.DataFrame(pairwise_f1_classification_grid)
print(pairwise_classification_f1_df)
print()
print("Pairwise AP:")
pairwise_ap_df = pd.DataFrame(pairwise_average_precisision_grid)
print(pairwise_ap_df)
print()
with open(
os.path.join(
DATA_DIR,
"experiments",
experiment_name,
"sota",
f"seed_{random_seed}",
"metrics",
"full_grid.json",
),
"w",
) as _json_file:
json.dump(
{
"b3": b3_f1_grid,
"pairwisef1": pairwise_macro_f1_grid,
"auroc": pairwise_auroc_grid,
"classificationf1": pairwise_f1_classification_grid,
"averageprecision": pairwise_average_precisision_grid,
},
_json_file,
)
b3_df.to_csv(
os.path.join(
DATA_DIR,
"experiments",
experiment_name,
"sota",
f"seed_{random_seed}",
"metrics",
"b3.csv",
),
index=False,
)
pairwise_macro_f1_df.to_csv(
os.path.join(
DATA_DIR,
"experiments",
experiment_name,
"sota",
f"seed_{random_seed}",
"metrics",
"pair_macro_f1_cluster.csv",
),
index=False,
)
pairwise_df.to_csv(
os.path.join(
DATA_DIR,
"experiments",
experiment_name,
"sota",
f"seed_{random_seed}",
"metrics",
"pairwise_auc.csv",
),
index=False,
)
pairwise_classification_f1_df.to_csv(
os.path.join(
DATA_DIR,
"experiments",
experiment_name,
"sota",
f"seed_{random_seed}",
"metrics",
"classification_f1.csv",
),
index=False,
)
pairwise_ap_df.to_csv(
os.path.join(
DATA_DIR,
"experiments",
experiment_name,
"sota",
f"seed_{random_seed}",
"metrics",
"average_precision.csv",
),
index=False,
)
return (
b3_f1_grid,
pairwise_macro_f1_grid,
pairwise_auroc_grid,
pairwise_f1_classification_grid,
pairwise_average_precisision_grid,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--experiment_name",
type=str,
help="The name of the experiment (for writing results to disk)",
)
parser.add_argument(
"--dont_use_nameless_model",
action="store_true",
help="Whether to use the nameless model",
)
parser.add_argument("--n_jobs", type=int, default=10, help="How many cpus to use")
parser.add_argument(
"--dont_use_monotone_constraints",
action="store_true",
help="Whether to use monotone constraints",
)
parser.add_argument("--linkage", type=str, default="average", help="What linkage function to use")
parser.add_argument(
"--use_dbscan",
action="store_true",
help="Whether to use DBSCAN instead of fastcluster",
)
parser.add_argument(
"--negative_one_for_nan",
action="store_true",
help="Whether to use -1 as nans and no monotone constraints",
)
parser.add_argument("--random_seed", nargs="+", default=[1], type=int)
parser.add_argument("--inspire_only", action="store_true")
parser.add_argument("--aminer_only", action="store_true")
parser.add_argument("--inspire_split", default=0, help="which split from inspire to evaluate 0/1/2", type=int)
args = parser.parse_args()
logger.info(args)
multi_b3_grid = []
multi_pairwise_macro_f1_grid = []
multi_pairwise_auroc_grid = []
multi_pairwise_class_f1_grid = []
multi_average_precision_grid = []
for seed in args.random_seed:
print("run with seed:", seed)
b3_f1_grid, pairwise_macro_f1, pairwise_auroc_grid, pairwise_class_f1, pairwise_average_precision = main(
args.experiment_name,
args.dont_use_nameless_model,
args.n_jobs,
args.dont_use_monotone_constraints,
args.linkage,
args.use_dbscan,
args.negative_one_for_nan,
int(seed),
args.inspire_split,
args.inspire_only,
args.aminer_only,
)
multi_b3_grid.append(b3_f1_grid)
multi_pairwise_macro_f1_grid.append(pairwise_macro_f1)
multi_pairwise_auroc_grid.append(pairwise_auroc_grid)
multi_pairwise_class_f1_grid.append(pairwise_class_f1)
multi_average_precision_grid.append(pairwise_average_precision)
average_b3_f1_grid = copy.deepcopy(multi_b3_grid[0])
average_pairwise_f1_macro_grid = copy.deepcopy(multi_pairwise_macro_f1_grid[0])
average_pairwise_auroc_grid = copy.deepcopy(multi_pairwise_auroc_grid[0])
average_pairwise_class_f1_grid = copy.deepcopy(multi_pairwise_class_f1_grid[0])
average_class_ap_grid = copy.deepcopy(multi_average_precision_grid[0])
index = -1
for b3, macro_f1, pairwise, class_f1, ap in zip(
multi_b3_grid,
multi_pairwise_macro_f1_grid,
multi_pairwise_auroc_grid,
multi_pairwise_class_f1_grid,
multi_average_precision_grid,
):
index += 1
if index == 0:
continue
for i in range(1, len(average_b3_f1_grid)):
for j in range(1, len(average_b3_f1_grid[0])):
if i == j:
if b3[i][j] is not None:
average_b3_f1_grid[i][j] += b3[i][j]
if macro_f1[i][j] is not None:
average_pairwise_f1_macro_grid[i][j] += macro_f1[i][j]
average_pairwise_auroc_grid[i][j] += pairwise[i][j]
average_pairwise_class_f1_grid[i][j] += class_f1[i][j]
average_class_ap_grid[i][j] += ap[i][j]
for i in range(1, len(average_b3_f1_grid)):
for j in range(1, len(average_b3_f1_grid[0])):
if i == j:
if average_b3_f1_grid[i][j] is not None:
average_b3_f1_grid[i][j] = average_b3_f1_grid[i][j] / len(multi_b3_grid)
if average_pairwise_f1_macro_grid[i][j] is not None:
average_pairwise_f1_macro_grid[i][j] = average_pairwise_f1_macro_grid[i][j] / len(
multi_pairwise_macro_f1_grid
)
average_pairwise_auroc_grid[i][j] = average_pairwise_auroc_grid[i][j] / len(multi_pairwise_auroc_grid)
average_pairwise_class_f1_grid[i][j] = average_pairwise_class_f1_grid[i][j] / len(
multi_pairwise_class_f1_grid
)
average_class_ap_grid[i][j] = average_class_ap_grid[i][j] / len(multi_average_precision_grid)
print("Average B3 F1:")
b3_df = pd.DataFrame(average_b3_f1_grid)
print(b3_df)
print()
print("Average pairwise macro F1:")
pair_macro_f1_df = | pd.DataFrame(average_pairwise_f1_macro_grid) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 18 11:31:56 2021
@author: nguy0936
I increased the number of layers from conv1 to embedding to see if more layers
could result in better performance. I did this for only set 1 - Hallett
"""
# load packages
import pandas as pd
import umap
import matplotlib.pyplot as plt
import numpy as np
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import xgboost as xgb
from xgboost import XGBClassifier
from xgboost import cv
from sklearn.metrics import roc_auc_score
from hyperopt import STATUS_OK, Trials, fmin, hp, tpe
from sklearn.metrics import roc_auc_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import roc_curve
##========== load low-high level features
mdir1 = 'R:\\CMPH-Windfarm Field Study\\Duc Phuc Nguyen\\4. Machine Listening\Data set\\set1\\'
mdir2 = 'R:\\CMPH-Windfarm Field Study\\Duc Phuc Nguyen\\4. Machine Listening\Data set\\set2\\'
def load_feature(mdir): # function to load data
conv1 = pd.read_csv(mdir + 'result_conv1.csv', header=None) # conv1
conv2 = | pd.read_csv(mdir + 'result_conv2.csv', header=None) | pandas.read_csv |
import sys
import urllib
if 'src' not in sys.path:
# https://stackoverflow.com/questions/10095037/why-use-sys-path-appendpath-instead-of-sys-path-insert1-path
sys.path.insert(1, "src")
import datetime as dt
import importlib.resources as pkg_resources
import os
import pathlib
import shutil
import pandas as pd
from invoke import task
from ecv_analytics import data
from ecv_analytics.scrape_recoveries import time_series_recovered
ROOT_DIR = os.getcwd()
BUILD_DIR = pathlib.Path("build")
WIKIPEDIA_RECOVERED = BUILD_DIR / 'scraping/wikipedia-recovered'
REPO = "https://github.com/obuchel/classification"
def reset_dir():
os.chdir(ROOT_DIR)
@task
def copy_index(c):
"""Copy the top-level index file to the site."""
site = BUILD_DIR / 'site'
shutil.copy("site/index.html", site)
@task(post=[copy_index])
def copy_us_counties(c):
reset_dir()
site = BUILD_DIR / 'site/us-counties'
repo = BUILD_DIR / 'repos/classification'
site.mkdir(parents=True, exist_ok=True)
c.run(f"rsync -a {repo}/output {site}")
files = [
# HTML pages
'classification_map.html', 'classification_map2.html',
# JS files
'map_impl.js', 'map_impl2.js',
# Icons
'green.png', 'orange.png', 'red.png', 'yellow.png',
# geo data
'counties5.json', 'states5.json', ]
for file in files:
shutil.copy(repo / file, site)
shutil.copy("site/us-counties/index.html", site)
@task(post=[copy_us_counties])
def us_counties(c):
reset_dir()
repos = BUILD_DIR / "repos"
repos.mkdir(parents=True, exist_ok=True)
if os.path.exists(repos / "classification"):
os.chdir(repos / "classification")
c.run("git pull")
else:
os.chdir(repos)
c.run("git clone {}".format(REPO))
os.chdir("classification")
print(os.getcwd())
c.run("python prepare_classification_counties_final.py", echo=True)
@task
def scrape_recovered_from_wikipedia(c, restart='US-AL',
output=WIKIPEDIA_RECOVERED):
"""Scrape time series of recovered cases from historic versions of Wikipedia pages.
:param restart Restart scraping at a state, given by its ISO code, e.g. US-VA for Virginia
:param output Write CSV files into this directory, defaults to 'build/scraping/wikipedia-recovered/YYYY-MM-DD_HHMM'
Example: https://en.wikipedia.org/w/index.php?title=COVID-19_pandemic_in_Wisconsin
"""
outdir = pathlib.Path(output)
outdir.mkdir(parents=True, exist_ok=True)
rundir = outdir / dt.datetime.now().strftime("%Y-%m-%d_%H%M")
rundir.mkdir(parents=True, exist_ok=True)
print("Writing output to {}".format(rundir))
states = pd.read_csv(
pkg_resources.open_text(data, "wikipedia_ISO_3166-2_US.csv"))
all_states = []
restart_index = list(states.Iso_3166_2).index(restart)
for index, row in states.iloc[restart_index:].iterrows():
if pd.isna(row['Wikipedia_Name']):
continue
time_series = time_series_recovered(row['Wikipedia_Name'],
name=row['Name'],
iso_code=row['Iso_3166_2'], limit=500)
filename = 'time_servies_recovered_wikipedia_{}.csv'.format(
row['Iso_3166_2'])
time_series.to_csv(rundir / filename)
all_states.append(time_series)
| pd.concat(all_states) | pandas.concat |
import streamlit as st
import json
import ast
import os
import datetime
from new_card_transformation import transform_card
import pandas as pd
import numpy as np
from functions import obtain_unique_values, load_models, predict_dummy_binary, predict_dummy_multiclass, predict_dummy_numeric
from streamlit_player import st_player
import nltk
# Download nltk english stopwords
nltk.download('stopwords')
# Show Magic's logo
st.sidebar.image('./static/Magic-The-Gathering-Logo.png', use_column_width=True)
# Title to display on the App
st.markdown("""
# Magic The Gathering
### Artificial Intelligence Models
Welcome to our **Magic: The Gathering card prediction models** with **Artificial Intelligence**!
*Our vision is to bring AI to Magic and help evolving this amazing game!*
""")
col4, col5 = st.columns(2)
# If they check this button, we show a much detailed description
if col4.checkbox('<<< HOW DOES IT WORK?'):
st.markdown("""
This site serves as demonstration to many Machine Learning models that are trained using the information from cards the Scryfall Magic The Gathering API.
**The process goes something like this**: First we get all Magic cards from the Scryfall API, we then transform those cards into data prepared for machine learning,
converting each card into hundreds of different data points. That's when we proceed to *show* that data to differe tmachine learning models alongside with the target
we want to predict. The model will start learning patterns hidden within the data and improve with the more data we feed it, until it's able by itself to provide
accurate predictions.
As of today, we have 3 types of models and use 3 different algorithms:
* **BINARY** models, **MULTICLASS** models and **NUMERIC** models, to define the type of prediction we want to do (a YES or NO question vs a specific category prediction)
* **Logistic/Linear Regression**, **Gradient Boosting** and **Deep Learning** (Embeddings and bidirectional LSTM network)
If you want to learn more about the process, [**here's an article**](https://towardsdatascience.com/artificial-intelligence-in-magic-the-gathering-4367e88aee11)
**How do you start testing the models?**
* **<<<** Look at the sidebar on the left. There's where you pick a card or create a card yourself!
* Select the data source
* FROM JSON: you provide the JSON file with your card (you can also download the template).
* USE FORM: you complete a form with your own card data, starting from a template.
* FROM DB: you load a REAL card from the app's database. You can also modify the card!
* **vvv** Look down below: Select the model you want to use
* Run the model with the **EXECUTE MODEL** button
""")
# Link to the YouTube channel
link = '[Watch all the videos in the YouTube channel](https://www.youtube.com/channel/UC3__atAqSUrIMNLg_-6aJBA)'
col5.markdown(link, unsafe_allow_html=True)
# Embed a video tutorial
st_player("https://youtu.be/30_tT_R7qtQ") # youtube video tutorial
# st.video("./static/Media1.mp4", format="video/mp4")
# Load the Card DB
card_db = | pd.read_csv("./datasets/WEBAPP_datasets_vow_20211220_FULL.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
from datetime import timedelta, time
import numpy as np
from pandas import (DatetimeIndex, Float64Index, Index, Int64Index,
NaT, Period, PeriodIndex, Series, Timedelta,
TimedeltaIndex, date_range, period_range,
timedelta_range, notnull)
import pandas.util.testing as tm
import pandas as pd
from pandas.lib import Timestamp
from .common import Base
class DatetimeLike(Base):
def test_shift_identity(self):
idx = self.create_index()
self.assert_index_equal(idx, idx.shift(0))
def test_str(self):
# test the string repr
idx = self.create_index()
idx.name = 'foo'
self.assertFalse("length=%s" % len(idx) in str(idx))
self.assertTrue("'foo'" in str(idx))
self.assertTrue(idx.__class__.__name__ in str(idx))
if hasattr(idx, 'tz'):
if idx.tz is not None:
self.assertTrue(idx.tz in str(idx))
if hasattr(idx, 'freq'):
self.assertTrue("freq='%s'" % idx.freqstr in str(idx))
def test_view(self):
super(DatetimeLike, self).test_view()
i = self.create_index()
i_view = i.view('i8')
result = self._holder(i)
tm.assert_index_equal(result, i)
i_view = i.view(self._holder)
result = self._holder(i)
tm.assert_index_equal(result, i_view)
class TestDatetimeIndex(DatetimeLike, tm.TestCase):
_holder = DatetimeIndex
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(index=tm.makeDateIndex(10))
self.setup_indices()
def create_index(self):
return date_range('20130101', periods=5)
def test_shift(self):
# test shift for datetimeIndex and non datetimeIndex
# GH8083
drange = self.create_index()
result = drange.shift(1)
expected = DatetimeIndex(['2013-01-02', '2013-01-03', '2013-01-04',
'2013-01-05',
'2013-01-06'], freq='D')
self.assert_index_equal(result, expected)
result = drange.shift(-1)
expected = DatetimeIndex(['2012-12-31', '2013-01-01', '2013-01-02',
'2013-01-03', '2013-01-04'],
freq='D')
self.assert_index_equal(result, expected)
result = drange.shift(3, freq='2D')
expected = DatetimeIndex(['2013-01-07', '2013-01-08', '2013-01-09',
'2013-01-10',
'2013-01-11'], freq='D')
self.assert_index_equal(result, expected)
def test_construction_with_alt(self):
i = pd.date_range('20130101', periods=5, freq='H', tz='US/Eastern')
i2 = DatetimeIndex(i, dtype=i.dtype)
self.assert_index_equal(i, i2)
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz=i.dtype.tz)
self.assert_index_equal(i, i2)
i2 = DatetimeIndex(i.tz_localize(None).asi8, dtype=i.dtype)
self.assert_index_equal(i, i2)
i2 = DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz=i.dtype.tz)
self.assert_index_equal(i, i2)
# localize into the provided tz
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz='UTC')
expected = i.tz_localize(None).tz_localize('UTC')
self.assert_index_equal(i2, expected)
# incompat tz/dtype
self.assertRaises(ValueError, lambda: DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz='US/Pacific'))
def test_pickle_compat_construction(self):
pass
def test_construction_index_with_mixed_timezones(self):
# GH 11488
# no tz results in DatetimeIndex
result = Index(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNone(result.tz)
# same tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00')
], tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
# different tz results in Index(dtype=object)
result = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertFalse(isinstance(result, DatetimeIndex))
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertFalse(isinstance(result, DatetimeIndex))
# passing tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 19:00'),
Timestamp('2011-01-03 00:00')],
tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# length = 1
result = Index([Timestamp('2011-01-01')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01')], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNone(result.tz)
# length = 1 with tz
result = Index(
[Timestamp('2011-01-01 10:00', tz='Asia/Tokyo')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00')], tz='Asia/Tokyo',
name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
def test_construction_index_with_mixed_timezones_with_NaT(self):
# GH 11488
result = Index([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNone(result.tz)
# same tz results in DatetimeIndex
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
pd.NaT,
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'), pd.NaT,
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
# different tz results in Index(dtype=object)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')],
name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertFalse(isinstance(result, DatetimeIndex))
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')], name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertFalse(isinstance(result, DatetimeIndex))
# passing tz results in DatetimeIndex
result = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01 19:00'),
pd.NaT, | Timestamp('2011-01-03 00:00') | pandas.lib.Timestamp |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 22 14:26:49 2018
@author: <NAME>
"""
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import pca
import read_attributes_signatures
import scipy.stats as stats
import matplotlib.cm as cm
import matplotlib.colors as mcolors
def plot_pca(plotting_df: pd.DataFrame, describers: list):
"""
Plots the results of the PCA, colored by
:return:
"""
# Basic set up
alpha = 0.6
fig = plt.Figure()
# Plotting
for describer in describers:
sns.lmplot(x="PC 1",
y="PC 2",
data=plotting_df,
hue=describer,
fit_reg=False,
legend=False,
palette="inferno",
scatter_kws={"s": 10})
ax = plt.gca()
# Put the legend out of the figure
if plotting_df[describer].dtype == float:
normalize = mcolors.Normalize(vmin=plotting_df[describer].min(), vmax=plotting_df[describer].max())
scalarmappaple = cm.ScalarMappable(norm=normalize, cmap=cm.inferno)
scalarmappaple.set_array(plotting_df[describer])
plt.colorbar(scalarmappaple)
else:
legend = plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.,
frameon=True, fancybox=True)
legend.get_frame().set_edgecolor("grey")
legend.get_frame().set_facecolor("white")
# Make plot nicer by removing the borders
ax.set_facecolor("white")
for spine in ax.spines.values():
spine.set_visible(False)
# Add correct descriptions
ax.set_title(describer, alpha=alpha)
ax.set_ylabel("PC 2", alpha=alpha)
ax.set_xlabel("PC 1", alpha=alpha)
ax.grid(color="grey", alpha=alpha)
plt.setp(ax.get_yticklabels(), alpha=alpha)
plt.setp(ax.get_xticklabels(), alpha=alpha)
ax.tick_params(axis=u'both', which=u'both',length=0)
# Save the plot
fig.tight_layout()
plt.savefig(describer.replace("\n","") + ".png", bbox_inches="tight")
plt.close()
if __name__ == "__main__":
variance = 0.8
pca_df = pca.pca_signatures(variance)
meta_df = read_attributes_signatures.read_meta()
att_df, sig_df = read_attributes_signatures.seperate_attributes_signatures(meta_df)
plotting_df = | pd.concat([pca_df, att_df], axis=1) | pandas.concat |
from Bio import SeqIO, Seq
from bisect import bisect_left
import re
import pyranges as pr
import Bio.Data.CodonTable
import pandas as pd
import logging
import gzip
from mimetypes import guess_type
from functools import partial
from pathlib import Path
from pandarallel import pandarallel
def get_codon_table():
codons_table = Bio.Data.CodonTable.standard_dna_table.forward_table
codons_stop = Bio.Data.CodonTable.standard_dna_table.stop_codons
codons_stop = {el: "*" for el in codons_stop}
codons_table = {**codons_table, **codons_stop}
return codons_table
def get_headers(records, pattern):
"""
Get information of the reads encoded in the headers
"""
for i, record in records:
m1 = re.match(pattern, record.id)
reads = {
"Chromosome": m1.group(2),
"Start": int(m1.group(6)),
"End": m1.group(7),
"Name": m1.group(0),
"Strand": m1.group(5),
"Start_read": int(m1.group(6)),
"End_read": int(m1.group(7)),
"Strand_read": m1.group(5),
"type": m1.group(4),
"read_length": m1.group(8),
"damage": m1.group(9),
}
yield reads
def get_prodigal_coords(x):
"""
Get prodigal coordinates from headers
"""
# get coords from prodigal fasta header
s = re.split("\#|\s", x.replace(" ", ""))
coords = [int(i) for i in s[1:4]]
return | pd.Series(coords) | pandas.Series |
import base64
import os
from datetime import datetime, timedelta
import jinja2
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import FixedFormatter, PercentFormatter
from matplotlib.dates import TU, WeekdayLocator
import pandas as pd
from compute_uptake_for_paper import at_risk_cols, cols, demographic_cols, other_cols
from ethnicities import ethnicities, high_level_ethnicities
from groups import groups, at_risk_groups
# Ensure SVGs are created reproducibly
mpl.rcParams["svg.hashsalt"] = 42
wave_column_headings = {
"total": "All",
"all_priority": "Priority groups",
"1": "In care home",
"2": "80+",
"3": "75-79",
"4": "CEV or 70-74",
"5": "65-69",
"6": "At risk",
"7": "60-64",
"8": "55-59",
"9": "50-54",
"0": "Other",
}
# Add thousands separators to all integers in HTML output
class IntArrayFormatter(pd.io.formats.format.GenericArrayFormatter):
def _format_strings(self):
return [f" {v:,}" for v in self.values]
pd.io.formats.format.IntArrayFormatter = IntArrayFormatter
def run(base_path, earliest_date, latest_date):
backend = base_path.rstrip("/").split("/")[-1]
titles = get_titles()
label_maps = get_label_maps()
tables_path = f"{base_path}/tables"
charts_path = f"{base_path}/charts"
reports_path = f"{base_path}/reports"
os.makedirs(tables_path, exist_ok=True)
os.makedirs(charts_path, exist_ok=True)
os.makedirs(reports_path, exist_ok=True)
for key in ["dose_1", "dose_2", "pf", "az"]:
in_path = f"{base_path}/cumulative_coverage/all/{key}/all_{key}_by_group.csv"
generate_summary_table_for_all(
in_path, tables_path, key, earliest_date, latest_date
)
generate_charts_for_all(in_path, charts_path, key, earliest_date, latest_date)
generate_report_for_all(
backend,
tables_path,
charts_path,
reports_path,
key,
earliest_date,
latest_date,
)
for wave in range(1, 9 + 1):
in_path = f"{base_path}/cumulative_coverage/group_{wave}/{key}"
generate_summary_table_for_wave(
in_path,
tables_path,
wave,
key,
earliest_date,
latest_date,
titles,
label_maps,
)
generate_charts_for_wave(
in_path,
charts_path,
wave,
key,
earliest_date,
latest_date,
titles,
label_maps,
)
generate_report_for_wave(
backend,
tables_path,
charts_path,
reports_path,
wave,
key,
earliest_date,
latest_date,
)
def generate_summary_table_for_all(
in_path, tables_path, key, earliest_date, latest_date
):
uptake = load_uptake(in_path, earliest_date, latest_date)
last_week_date = uptake.index[-9]
summary = pd.DataFrame(
{
"latest": uptake.iloc[-2],
"last_week": uptake.iloc[-8],
"total": uptake.iloc[-1],
}
)
summary.loc["total"] = summary.sum()
summary["latest_pc"] = 100 * summary["latest"] / summary["total"]
summary["last_week_pc"] = 100 * summary["last_week"] / summary["total"]
summary["in_last_week_pc"] = summary["latest_pc"] - summary["last_week_pc"]
columns = {
"latest_pc": f"Vaccinated at {latest_date} (%)",
"latest": f"Vaccinated at {latest_date} (n)",
"total": "Population",
"last_week_pc": f"Vaccinated at {last_week_date} (%)",
"in_last_week_pc": "Vaccinated in past week (%)",
}
summary = summary[list(columns)]
summary.rename(columns=columns, inplace=True)
rows = {str(wave): f"Group {wave}" for wave in range(1, 9 + 1)}
rows["0"] = "Other"
rows["total"] = "Population"
summary = summary.loc[list(rows)]
summary.rename(index=rows, inplace=True)
summary.to_csv(f"{tables_path}/all_{key}.csv", float_format="%.1f%%")
def generate_charts_for_all(in_path, charts_path, key, earliest_date, latest_date):
uptake = load_uptake(in_path, earliest_date, latest_date)
uptake_total = uptake.iloc[:-1] / 1_000_000
uptake_total["total"] = uptake_total.loc[:, "0":"9"].sum(axis=1)
uptake_total["all_priority"] = uptake_total.loc[:, "1":"9"].sum(axis=1)
uptake_total = uptake_total.loc[:, ["total", "all_priority", "0"]]
uptake_total = uptake_total[
[col for col in wave_column_headings if col in uptake_total.columns]
]
uptake_total.rename(columns=wave_column_headings, inplace=True)
plot_chart(
uptake_total,
"Total number of patients vaccinated (million)",
f"{charts_path}/all_{key}_total.png",
is_percent=False,
)
uptake_pc = 100 * uptake / uptake.loc["total"]
uptake_pc.drop("total", inplace=True)
uptake_pc.fillna(0, inplace=True)
columns = {str(wave): f"Group {wave}" for wave in range(1, 9 + 1)}
columns["0"] = "Other"
uptake_pc = uptake_pc[
[col for col in wave_column_headings if col in uptake_pc.columns]
]
uptake_pc.rename(columns=wave_column_headings, inplace=True)
plot_chart(
uptake_pc,
"Proportion of patients vaccinated",
f"{charts_path}/all_{key}_percent.png",
)
def generate_report_for_all(
backend, tables_path, charts_path, out_path, key, earliest_date, latest_date
):
subtitle = {
"dose_1": "First dose",
"dose_2": "Second dose",
"pf": "Pfizer",
"az": "AstraZeneca",
}[key]
summary = pd.read_csv(f"{tables_path}/all_{key}.csv", index_col=0)
charts = []
with open(f"{charts_path}/all_{key}_total.png", "rb") as f:
charts.append(base64.b64encode(f.read()).decode("utf8"))
with open(f"{charts_path}/all_{key}_percent.png", "rb") as f:
charts.append(base64.b64encode(f.read()).decode("utf8"))
ctx = {
"subtitle": subtitle,
"backend": backend,
"latest_date": latest_date,
"charts": charts,
"table": summary.to_html(
classes=["table", "table-sm"], border="0", float_format="%.1f%%"
),
}
with open("templates/summary.html") as f:
template = jinja2.Template(f.read())
with open(f"{out_path}/all_{key}.html", "w") as f:
f.write(template.render(ctx))
def generate_summary_table_for_wave(
in_path, out_path, wave, key, earliest_date, latest_date, titles, label_maps
):
uptake = load_uptake(
f"{in_path}/group_{wave}_{key}_by_sex.csv", earliest_date, latest_date
)
if uptake is None:
return
last_week_date = uptake.index[-9]
overall_summary = compute_summary(uptake)
summaries = {"Overall": pd.DataFrame({"-": overall_summary.sum(axis=0)}).transpose()}
at_risk_summary = {}
other_summary = {}
for col in cols:
title = titles[col]
labels = label_maps[col]
uptake = load_uptake(
f"{in_path}/group_{wave}_{key}_by_{col}.csv", earliest_date, latest_date
)
if col in demographic_cols:
summaries[title] = compute_summary(uptake, labels)
elif col in at_risk_cols:
summary = compute_summary(uptake)
if "True" in summary.index:
at_risk_summary[titles[col]] = summary.loc["True"]
elif col in other_cols:
summary = compute_summary(uptake)
if "True" in summary.index:
other_summary[titles[col]] = summary.loc["True"]
else:
assert False, col
summaries["Clinical Risk Groups"] = pd.DataFrame.from_dict(
at_risk_summary, orient="index"
)
summaries["Other Groups"] = pd.DataFrame.from_dict(other_summary, orient="index")
summaries = {k: v for k, v in summaries.items() if not v.empty}
summary = pd.concat(summaries.values(), keys=summaries.keys())
summary["latest_pc"] = 100 * summary["latest"] / summary["total"]
summary["last_week_pc"] = 100 * summary["last_week"] / summary["total"]
summary["in_last_week_pc"] = summary["latest_pc"] - summary["last_week_pc"]
columns = {
"latest_pc": f"Vaccinated at {latest_date} (%)",
"latest": f"Vaccinated at {latest_date} (n)",
"total": "Population",
"last_week_pc": f"Vaccinated at {last_week_date} (%)",
"in_last_week_pc": "Vaccinated in past week (%)",
}
summary = summary[list(columns)]
summary.rename(columns=columns, inplace=True)
summary.to_csv(f"{out_path}/group_{wave}_{key}.csv", float_format="%.1f%%")
def compute_summary(uptake, labels=None):
latest_date = datetime.strptime(uptake.index[-2], "%Y-%m-%d").date()
last_week_date = datetime.strptime(uptake.index[-9], "%Y-%m-%d").date()
assert latest_date - last_week_date == timedelta(days=7)
summary = pd.DataFrame(
{
"latest": uptake.iloc[-2],
"last_week": uptake.iloc[-8],
"total": uptake.iloc[-1],
}
)
if labels is not None:
summary.rename(index=labels, inplace=True)
return summary
def generate_charts_for_wave(
in_path, out_path, wave, key, earliest_date, latest_date, titles, label_maps
):
for col in cols:
title = f"Vaccination coverage in Priority Group {wave}\nby {titles[col]}"
labels = label_maps[col]
uptake = load_uptake(
f"{in_path}/group_{wave}_{key}_by_{col}.csv", earliest_date, latest_date
)
if uptake is None:
return
cohort_average = 100 * uptake.sum(axis=1).iloc[-2] / uptake.sum(axis=1).iloc[-1]
uptake_pc = compute_uptake_percent(uptake, labels)
plot_chart(
uptake_pc, title, f"{out_path}/group_{wave}_{key}_{col}.png", cohort_average,
)
if col == "ethnicity":
plot_chart(
uptake_pc,
title,
f"{out_path}/group_{wave}_{key}_{col}_highlighting_bangladeshi_ethnicity.png",
cohort_average,
highlight_bangladeshi_ethnicity=True,
)
def compute_uptake_percent(uptake, labels):
uptake_pc = 100 * uptake / uptake.loc["total"]
uptake_pc.drop("total", inplace=True)
uptake_pc.fillna(0, inplace=True)
if set(uptake_pc.columns) == {"True", "False"}:
# This ensures that chart series are always same colour.
uptake_pc = uptake_pc[["True", "False"]]
else:
# Sort DataFrame columns so that legend is in the same order as chart series.
uptake_pc.sort_values(
uptake_pc.last_valid_index(), axis=1, ascending=False, inplace=True
)
uptake_pc.rename(columns=labels, inplace=True)
return uptake_pc
def generate_report_for_wave(
backend, tables_path, charts_path, out_path, wave, key, earliest_date, latest_date
):
subtitle = {
"dose_1": "First dose",
"dose_2": "Second dose",
"pf": "Pfizer",
"az": "AstraZeneca",
}[key]
subtitle = f"{subtitle} / Priority Group {wave}"
try:
summary = pd.read_csv(f"{tables_path}/group_{wave}_{key}.csv", index_col=[0, 1])
except FileNotFoundError:
return
charts = []
for col in cols:
with open(f"{charts_path}/group_{wave}_{key}_{col}.png", "rb") as f:
charts.append(base64.b64encode(f.read()).decode("utf8"))
ctx = {
"subtitle": subtitle,
"backend": backend,
"latest_date": latest_date,
"charts": charts,
"table": summary.to_html(
classes=["table", "table-sm"], border="0", float_format="%.1f%%"
),
}
with open("templates/summary.html") as f:
template = jinja2.Template(f.read())
with open(f"{out_path}/group_{wave}_{key}.html", "w") as f:
f.write(template.render(ctx))
def get_titles():
titles = {
"sex": "Sex",
"ethnicity": "Ethnicity",
"high_level_ethnicity": "Ethnicity (broad categories)",
"imd_band": "Index of Multiple Deprivation",
}
titles.update(groups)
titles.update(at_risk_groups)
return titles
def get_label_maps():
labels = {
"sex": {"F": "Female", "M": "Male"},
"ethnicity": {str(k): v for k, v in ethnicities.items()},
"high_level_ethnicity": {str(k): v for k, v in high_level_ethnicities.items()},
"imd_band": {
"0": "Unknown",
"1": "1 (most deprived)",
"2": "2",
"3": "3",
"4": "4",
"5": "5 (least deprived)",
},
}
for group in groups:
labels[group] = {"False": "no", "True": "yes"}
for group in at_risk_groups:
labels[group] = {"False": "no", "True": "yes"}
return labels
def plot_chart(
df,
title,
out_path,
cohort_average=None,
is_percent=True,
highlight_bangladeshi_ethnicity=False,
):
df.index = pd.to_datetime(df.index)
ax = plt.gca()
if highlight_bangladeshi_ethnicity:
for col in df.columns:
if "Bangladeshi" in col:
c, alpha, thickness = "r", 1, 3
label = "Asian or Asian British - Bangladeshi"
elif "Asian or Asian British" in col:
c, alpha, thickness = "r", 0.6, 1
label = "Asian or Asian British"
else:
c, alpha, thickness = "b", 0.3, 1
label = "Other ethnicities"
ax.plot(df[col], alpha=alpha, c=c, label=label, linewidth=thickness)
handles, labels = ax.get_legend_handles_labels()
by_label = dict(zip(labels, handles))
ax.legend(
by_label.values(),
by_label.keys(),
bbox_to_anchor=(1.05, 1),
loc=2,
borderaxespad=0,
)
else:
df.plot(ax=ax)
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0)
ax.set_title(title)
# Add x-axis ticks for each Tuesday (the day that vaccines were first made
# available.)
week_days = df.loc[df.index.dayofweek == TU.weekday]
tick_labels = [
d.strftime("%d %b %Y")
if ix == 0 or d.month == 1 and d.day <= 7
else d.strftime("%d %b")
for ix, d in enumerate(week_days.index)
]
ax.xaxis.set_major_locator(WeekdayLocator(byweekday=TU, interval=1))
ax.xaxis.set_major_formatter(FixedFormatter(tick_labels))
ax.xaxis.set_tick_params(rotation=90)
ax.set_ylim(ymin=0)
if is_percent:
ax.yaxis.set_major_formatter(PercentFormatter())
ax.set_ylim(ymax=100)
if cohort_average is not None:
ax.axhline(cohort_average, color="k", linestyle="--", alpha=0.5)
ax.text(df.index[0], cohort_average * 1.02, "latest overall cohort rate")
plt.savefig(out_path, dpi=300, bbox_inches="tight")
plt.close()
def load_uptake(path, earliest_date, latest_date):
try:
uptake = | pd.read_csv(path, index_col=0) | pandas.read_csv |
import os
import fnmatch
from utils_thai import file2date
from bs4 import BeautifulSoup
from utils_scraping import parse_file, pptx2chartdata, sanitize_filename
from covid_data import briefing_case_types, briefing_deaths, briefing_deaths_provinces, briefing_deaths_summary, briefing_documents, get_tests_by_area_chart_pptx, test_dav_files, vac_manuf_given, vac_slides_files, vaccination_daily, vaccination_reports_files2, vaccination_tables, get_tests_by_area_pdf
import pandas as pd
import pytest
from utils_pandas import export, import_csv
import dateutil
# def write_csv(df, input, parser_name):
# export(df, f"{input.rsplit('.', 1)[0]}.{parser_name}", csv_only=True)
# def find_files(dir, pat):
# dir_path = os.path.dirname(os.path.realpath(__file__))
# dir_path = os.path.join(dir_path, dir)
# for root, dir, files in os.walk(dir_path):
# for file in fnmatch.filter(files, pat):
# base, ext = file.rsplit(".", 1)
# testdf = None
# csvs = fnmatch.filter(files, f"{base}*.csv")
# if not csvs:
# yield os.path.join(root, file), testdf, None
# continue
# for check in csvs:
# _, func, ext = check.rsplit(".", 2)
# try:
# testdf = import_csv(check.rsplit(".", 1)[0], dir=root, index=["Date"])
# except pd.errors.EmptyDataError:
# pass
# yield os.path.join(root, file), testdf, func
def dl_files(target_dir, dl_gen, check=False):
"find csv files and match them to dl files, either by filename or date"
dir_path = os.path.dirname(os.path.realpath(__file__))
dir_path = os.path.join(dir_path, target_dir)
downloads = {}
for url, date, get_file in dl_gen(check):
fname = sanitize_filename(url.rsplit("/", 1)[-1])
fname, _ = fname.rsplit(".", 1) # remove ext
if date is not None:
sdate = str(date.date())
downloads[sdate] = (sdate, get_file)
# put in file so test is identified if no date
downloads[fname] = (str(date.date()) if date is not None else fname, get_file)
tests = []
missing = False
for root, dir, files in os.walk(dir_path):
for test in fnmatch.filter(files, "*.json"):
base, ext = test.rsplit(".", 1)
# special format of name with .2021-08-01 to help make finding test files easier
if "." in base:
rest, dateish = base.rsplit(".", 1)
if file2date(dateish):
base = rest
# throw away date since rest is file to check against
try:
testdf = pd.read_json(os.path.join(root, test), orient="table")
except ValueError:
testdf = None
# try:
# testdf = import_csv(check.rsplit(".", 1)[0], dir=root, index=["Date"])
# except pd.errors.EmptyDataError:
# testdf = None
date, get_file = downloads.get(base, (None, None))
if get_file is None:
missing = True
tests.append((date, testdf, get_file))
if missing and not check:
# files not cached yet so try again
return dl_files(target_dir, dl_gen, check=True)
else:
return tests
# @pytest.mark.parametrize("input, testdf, parser", find_files("testing_moph", "*.pptx"))
# def test_pptx(input, testdf, parser):
# data = pd.DataFrame()
# raw = pd.DataFrame()
# for chart, title, series, pagenum in pptx2chartdata(input):
# data, raw = get_tests_by_area_chart_pptx(input, title, series, data, raw)
# pd.testing.assert_frame_equal(testdf, data)
# 021-07-05 0.0
# 2021-07-06 0.0
# 2021-07-07 0.0
# 2021-07-08 0.0
# 2021-07-09 0.0
# 2021-07-10 0.0
# 2021-07-11 0.0
@pytest.mark.parametrize("fname, testdf, get_file", dl_files("vaccination_daily", vaccination_reports_files2))
def test_vac_reports(fname, testdf, get_file):
assert get_file is not None
file = get_file() # Actually download
assert file is not None
df = pd.DataFrame(columns=["Date"]).set_index(["Date"])
for page in parse_file(file):
df = vaccination_daily(df, None, file, page)
# df.to_json(f"tests/vaccination_daily/{fname}.{str(df.index.max().date())}.json", orient='table', indent=2)
pd.testing.assert_frame_equal(testdf, df, check_dtype=False)
@pytest.mark.parametrize("fname, testdf, get_file", dl_files("vaccination_tables", vaccination_reports_files2))
def test_vac_tables(fname, testdf, get_file):
assert get_file is not None
file = get_file() # Actually download
assert file is not None
df = pd.DataFrame(columns=["Date"]).set_index(["Date"])
for page in parse_file(file):
df = vaccination_tables(df, None, page, file)
# df.to_json(f"tests/vaccination_tables/{fname}.{str(df.index.max()[0].date())}.json", orient='table', indent=2)
pd.testing.assert_frame_equal(testdf, df, check_dtype=False)
@pytest.mark.parametrize("fname, testdf, get_file", dl_files("vac_manuf_given", vac_slides_files))
def test_vac_manuf_given(fname, testdf, get_file):
assert get_file is not None
file = get_file() # Actually download
assert file is not None
df = | pd.DataFrame(columns=["Date"]) | pandas.DataFrame |
"""This code is related to chapter 2 of the book."""
# %%
# Importing Libraries
from scipy.stats import expon, loguniform
from sklearn.model_selection import RandomizedSearchCV
from sklearn.svm import SVR
from sklearn.tree import plot_tree
import matplotlib.image as mpimg
from scipy import stats
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import cross_val_score
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LinearRegression
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import OrdinalEncoder
from sklearn.impute import SimpleImputer
from pandas.plotting import scatter_matrix
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import train_test_split
from zlib import crc32
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
import tarfile
import urllib.request
import joblib
# %% [markdown]
# # Get the Data
# ## Download the Data
# %%
DOWNLOAD_ROOT = "https://raw.githubusercontent.com/ageron/handson-ml2/master/"
HOUSING_PATH = os.path.join("datasets", "housing")
HOUSING_URL = DOWNLOAD_ROOT + "datasets/housing/housing.tgz"
def fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH):
"""
Fetch the data to the housing_path.
housing_url -- url of the housing data
housing_path -- an special path to save the data
"""
os.makedirs(housing_path, exist_ok=True)
tgz_path = os.path.join(housing_path, "housing.tgz")
urllib.request.urlretrieve(housing_url, tgz_path)
housing_tgz = tarfile.open(tgz_path)
housing_tgz.extractall(path=housing_path)
housing_tgz.close()
# Calling the function fetch_housing_data
fetch_housing_data()
# %%
# loading the data
def load_housing_data(housing_path=HOUSING_PATH):
"""
Load the dataset using pandas library.
Args:
housing_path ([string]): Defaults to HOUSING_PATH.
Returns:
<class 'function'>:
This function returns a pandas DataFrame object
containing all the data.
"""
csv_path = os.path.join(housing_path, "housing.csv")
return pd.read_csv(csv_path)
# %%
housing = load_housing_data()
housing.head()
# %% [markdown]
# ## Take a Quick Look at the Data Structure
# %%
housing.info()
# %%
housing["ocean_proximity"].value_counts()
# %%
housing.describe()
# %%
housing.hist(bins=50, figsize=(20, 15))
plt.show()
# %% [markdown]
# ## Create a Test Set
# %%
def split_train_test(data, test_ratio):
"""
Split test data from the dataset.
Args:
data([DataFrame]): The main dataset
containing train data and test data
test_ratio([float]): the ratio of the test data
Returns:
train set([DataFrame])
test set([DataFrame])
This method randomizes an indices array with numpy random function.
The length of array is the same as length of data.
Then it calculates test set size and splits indices array
to train indices and test indices.
Finally it returns the train set and test set.
"""
shuffled_indices = np.random.permutation(len(data))
test_set_size = int(len(data) * test_ratio)
test_indices = shuffled_indices[:test_set_size]
train_indices = shuffled_indices[test_set_size:]
return data.iloc[train_indices], data.iloc[test_indices]
# calling the split_train_test function
train_set, test_set = split_train_test(housing, 0.2)
# %%
len(train_set)
# %%
len(test_set)
# %%
def test_set_check(identifier, test_ratio):
"""Check the identifier with maximum hash value."""
return crc32(np.int64(identifier)) & 0xffffffff < test_ratio * 2**32
# %%
def split_train_test_by_id(data, test_ratio, id_column):
"""Split the data by id_column."""
ids = data[id_column]
in_test_set = ids.apply(lambda id_: test_set_check(id_, test_ratio))
return data.loc[~in_test_set], data.loc[in_test_set]
# %%
housing_with_id = housing.reset_index() # adds an `index` column
train_set, test_set = split_train_test_by_id(housing_with_id, 0.2, "index")
# %%
housing_with_id["id"] = housing["longitude"] * 1000 + housing["latitude"]
train_set, test_set = split_train_test_by_id(housing_with_id, 0.2, "id")
# %%
train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)
# %%
housing["income_cat"] = pd.cut(housing["median_income"],
bins=[0., 1.5, 3.0, 4.5, 6., np.inf],
labels=[1, 2, 3, 4, 5])
housing["income_cat"].hist()
# %%
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
# using "income_cat" attribute to split the train and test set
for train_index, test_index in split.split(housing, housing["income_cat"]):
"""
We mention the target variable in the second argument of split function,
to split the data according to this variable.
"""
strat_train_set = housing.loc[train_index]
strat_test_set = housing.loc[test_index]
strat_test_set["income_cat"].value_counts() / len(strat_test_set)
# %%
# removing the redundant attribute
for set_ in (strat_train_set, strat_test_set):
set_.drop("income_cat", axis=1, inplace=True)
# %% [markdown]
# # Discover and Visualize the Data to Gain Insights
# ## Visualizing Geographical Data
# %%
# replacing the housing variable with stratified train set
housing = strat_train_set.copy()
# %%
housing.plot(kind="scatter", x="longitude", y="latitude")
# %%
housing.plot(kind="scatter", x="longitude", y="latitude", alpha=0.1)
# %%
housing.plot(kind="scatter", x="longitude", y="latitude", alpha=0.4,
s=housing["population"]/100, label="population", figsize=(10, 7),
c="median_house_value", cmap=plt.get_cmap("jet"), colorbar=True,
)
plt.legend()
# %% [markdown]
# ## Looking for Correlations
# %%
corr_matrix = housing.corr()
corr_matrix["median_house_value"].sort_values(ascending=False)
# %%
attributes = ["median_house_value", "median_income", "total_rooms",
"housing_median_age"]
| scatter_matrix(housing[attributes], figsize=(12, 8)) | pandas.plotting.scatter_matrix |
# coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2017 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
QUANTAXIS 指标计算
分为7类76个指标
- 趋势指标
- 震荡指标
- 成交量指标
- 均价线指标
- 动量指标
- 通道型指标
- 大盘指标
"""
"""趋势类"""
import numpy as np
import pandas as pd
from .formula import SMA
# TODO
# 基于无状态的pd结构的指标
def QA_indicator_dma(data, f=10, s=50, N=10):
"""
平行线差指标
中短期指标/趋势指标
通过计算两条基准周期不同的移动平均线的差值,来判断当前买入卖出能量的大小和未来价格走势的趋势
"""
_dma = pd.Series(data).rolling(f).mean() - \
pd.Series(data).rolling(s).mean()
_ama = pd.Series(_dma).rolling(N).mean()
return _dma, _ama
def QA_indicator_dmi(data):
"""
趋向指标
中长期指标
用于判断多空力量由于受到价格波动的影响而发生的由均衡到失衡的过程
"""
pass
def QA_indicator_dpo(data, N=20, M=6):
"""
区间振荡
数个短周波的组合,构成一个长周波。观察短周波的运动规律,可以估计长周波峰谷出现的时机。
例如:
四个短期循环底部,构成一个长期循环底部。
因此,DPO指标刻意忽略较长周期的波动,一方面可以减少周期干扰的混淆,一方面可以凸显个别周期的波动。
一段周期的移动平均线,其周期的二分之一处,是价格重心的聚集点。
以20天的周期为例,第10天是整段周期的重心平衡点。
移动平均线的形状,很像一条波浪状扭曲的绳子,股价在这条绳子的周围,上下来回穿梭。
如果消除扭曲的波动,将这条绳子拉平,重心平衡点视为图表上的0轴。把当日价格与重心平衡点间的差距,绘制于0轴的上下方。
如此一来,可以更清楚的显现出短周期的高、低点。
"""
_dpo = pd.Series(data) - | pd.Series(data) | pandas.Series |
import pandas as pd
import numpy as np
class IdleSleepModeConverter:
def __init__(self, init_fill=None, init_is_ism=None):
if init_fill is not None:
self._current_fill = init_fill
else:
self._current_fill = pd.DataFrame()
if init_is_ism is not None:
self._before_is_ism = init_is_ism
else:
self._before_is_ism = pd.DataFrame()
self._before_10s = pd.DataFrame()
self._converted = pd.DataFrame()
def get_ism(self):
return self._ism_df
def get_closest_fill(self, st):
df = self._df.set_index(self._df.columns[0])
fills = self._is_fill[self._is_fill.index <= st]
if not fills.empty:
fill_et = fills.index[-1] + pd.Timedelta(1, unit='s')
fill_st = fill_et - pd.Timedelta(10, unit='s')
current_fill = df[(df.index >= fill_st)
& (df.index < fill_et)]
if current_fill.index[-1] - current_fill.index[0] > pd.Timedelta(9, unit='s'):
self._current_fill = current_fill
return self._current_fill
def get_last_10s(self):
df = self._df.set_index('HEADER_TIME_STAMP')
et = df.index[-1] + pd.Timedelta(1, unit='s')
st = et - pd.Timedelta(10, unit='s')
return df[(df.index >= st) & (df.index < et)].reset_index(drop=False)
def detect_ism(self):
self._current_is_ism = self._df.groupby(pd.TimeGrouper(
freq='1s', key=self._df.columns[0])).apply(self._detect_ism)
def _detect_ism(self, df):
df = df.set_index(df.columns[0])
unique_counts = df.apply(lambda col: len(col.unique()), axis=0)
return np.all(unique_counts == 1)
def detect_fill(self):
if self._before_is_ism is not None:
is_ism = pd.concat(
[self._before_is_ism.iloc[-9:, ], self._current_is_ism], axis=0)
else:
is_ism = self._current_is_ism
self._is_fill = is_ism.rolling(10).apply(
lambda df: np.all(df == False)).dropna()
self._is_fill = self._is_fill[self._is_fill.values == 1]
def reverse_ism(self):
current_is_ism = self._current_is_ism.reset_index(drop=False)
self._filled_ism_counts = current_is_ism.groupby(
| pd.TimeGrouper(freq='10s', key=current_is_ism.columns[0]) | pandas.TimeGrouper |
# -*- coding: utf-8 -*-
import nose
import numpy as np
from datetime import datetime
from pandas.util import testing as tm
from pandas.core import config as cf
from pandas.compat import u
from pandas.tslib import iNaT
from pandas import (NaT, Float64Index, Series,
DatetimeIndex, TimedeltaIndex, date_range)
from pandas.types.dtypes import DatetimeTZDtype
from pandas.types.missing import (array_equivalent, isnull, notnull,
na_value_for_dtype)
_multiprocess_can_split_ = True
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
with cf.option_context("mode.use_inf_as_null", False):
assert notnull(np.inf)
assert notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.all()
with cf.option_context("mode.use_inf_as_null", True):
assert not notnull(np.inf)
assert not notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.sum() == 2
with cf.option_context("mode.use_inf_as_null", False):
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert (isinstance(isnull(s), Series))
def test_isnull():
assert not isnull(1.)
assert isnull(None)
assert isnull(np.NaN)
assert not isnull(np.inf)
assert not isnull(-np.inf)
# series
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert (isinstance(isnull(s), Series))
# frame
for df in [tm.makeTimeDataFrame(), tm.makePeriodFrame(),
tm.makeMixedDataFrame()]:
result = isnull(df)
expected = df.apply(isnull)
tm.assert_frame_equal(result, expected)
# panel
for p in [tm.makePanel(), tm.makePeriodPanel(), tm.add_nans(tm.makePanel())
]:
result = isnull(p)
expected = p.apply(isnull)
tm.assert_panel_equal(result, expected)
# panel 4d
for p in [ | tm.makePanel4D() | pandas.util.testing.makePanel4D |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Objective: get_history should fetch all the data at once then save it to separate files.
import logging
logger = logging.getLogger(__name__)
fhandler = logging.FileHandler(filename='audiolizer.log', mode='a')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fhandler.setFormatter(formatter)
logger.addHandler(fhandler)
logger.setLevel(logging.DEBUG)
# + active="ipynb"
# import logging
# logger = logging.getLogger()
# logger.setLevel(logging.DEBUG)
# logging.debug("test")
# +
import pytz
from Historic_Crypto import HistoricalData
import pandas as pd
import os
from datetime import datetime
def get_timezones(url):
return [dict(label=v, value=v) for v in pytz.all_timezones]
valid_granularities = [60, 300, 900, 3600, 21600, 86400]
granularity = int(os.environ.get('AUDIOLIZER_GRANULARITY', 300)) # seconds
# see api for valid intervals https://docs.pro.coinbase.com/#get-historic-rates
assert granularity in valid_granularities
audiolizer_temp_dir = os.environ.get('AUDIOLIZER_TEMP', './history')
logger.info('audiolizer temp data: {}'.format(audiolizer_temp_dir))
max_age = pd.Timedelta(os.environ.get('AUDIOLIZER_MAX_AGE', '5m'))
logger.info('audiolizer max daily age {}'.format(max_age))
def get_granularity(cadence):
"""Get query granularity for the current cadence
We need the query granularity to be within the valid granularities
The cadence needs to be a multiple of the granularity
"""
dt = pd.Timedelta(cadence).total_seconds()
for _ in valid_granularities[::-1]:
if (_ <= dt) & (dt%_ == 0):
return _
raise NotImplementedError('cannot find granularity for cadence {}'.format(cadence))
def file_cadence(granularity):
"""Set the resolution of the file"""
if granularity < 60*60*24:
return '1D'
else:
return '1M'
def refactor(df, frequency='1W'):
"""Refactor/rebin the data to a lower cadence
The data is regrouped using pd.Grouper
"""
low = df.low.groupby(pd.Grouper(freq=frequency)).min()
high = df.high.groupby(pd.Grouper(freq=frequency)).max()
close = df.close.groupby(pd.Grouper(freq=frequency)).last()
open_ = df.open.groupby(pd.Grouper(freq=frequency)).first()
volume = df.volume.groupby(pd.Grouper(freq=frequency)).sum()
return pd.DataFrame(dict(low=low, high=high, open=open_, close=close, volume=volume))
def load_date(ticker, granularity, int_):
logger.info('loading single date {}'.format(int_))
start_ = int_.left.strftime('%Y-%m-%d-%H-%M')
end_ = int_.right.strftime('%Y-%m-%d-%H-%M')
try:
return HistoricalData(ticker,
granularity,
start_,
end_,
).retrieve_data()
except:
logger.warning('could not load using {} {}'.format(start_, end_))
raise
def get_gaps(df, granularity):
new_ = refactor(df, '{}s'.format(granularity))
return new_[new_.close.isna()]
def fetch_data(ticker, granularity, start_, end_):
"""Need dates in this format %Y-%m-%d-%H-%M"""
try:
return HistoricalData(ticker,
granularity,
start_,
end_,
).retrieve_data()
except:
logger.warning('could not load using {} {}'.format(start_, end_))
raise
def write_data(df, ticker, granularity):
"""write data grouped by date
Note: data gaps will result if a partial day provided.
Make sure data is complete before passing to this function
"""
for t, group in df.groupby(pd.Grouper(freq='1D')):
tstr = t.strftime('%Y-%m-%d-%H-%M')
fname = audiolizer_temp_dir + '/{}-{}-{}.csv.gz'.format(
ticker, granularity, t.strftime('%Y-%m-%d'))
group.to_csv(fname, compression='gzip')
logger.info('wrote {}'.format(fname))
def fetch_missing(files_status, ticker, granularity):
"""Iterate over batches of missing dates"""
for batch, g in files_status[files_status.found==0].groupby('batch', sort=False):
t1, t2 = g.iloc[[0, -1]].index
# extend by 1 day whether or not t1 == t2
t2 += pd.Timedelta('1D')
endpoints = [t.strftime('%Y-%m-%d-%H-%M') for t in [t1, t2]]
logger.info('fetching {}, {}'.format(len(g), endpoints))
df = fetch_data(ticker, granularity, *endpoints).loc[t1:t2] # only grab data between endpoints
write_data(df[df.index < t2], ticker, granularity)
def get_files_status(ticker, granularity, start_date, end_date):
start_date = pd.to_datetime(start_date.date())
end_date = pd.to_datetime(end_date.date())
fnames = []
foundlings = []
dates = []
batch = []
batch_number = 0
last_found = -1
for int_ in pd.interval_range(start_date, end_date):
dates.append(int_.left)
fname = audiolizer_temp_dir + '/{}-{}-{}.csv.gz'.format(
ticker, granularity, int_.left.strftime('%Y-%m-%d'))
found = int(os.path.exists(fname))
foundlings.append(found)
if found != last_found:
batch_number += 1
last_found = found
batch.append(batch_number)
fnames.append(fname)
files_status = pd.DataFrame(dict(files=fnames, found=foundlings, batch=batch), index=dates)
return files_status
# + active="ipynb"
# files_status = get_files_status('BTC-USD',
# 300, # 2021-07-27 00:00:00 -> 2021-07-29 00:00:00
# pd.to_datetime('2021-07-27 00:00:00'),
# pd.to_datetime('2021-07-29 00:00:00'))
#
# files_status
# -
def get_today_GMT():
# convert from system time to GMT
system_time = pd.Timestamp(datetime.now().astimezone())
today = system_time.tz_convert('GMT').tz_localize(None)
return today
# * getting BTC-USD files status: 2021-07-20 00:00:00 -> 2021-07-21 03:50:49.619707
# * INFO:history:getting BTC-USD files status: 2021-07-20 00:00:00 -> 2021-07-21 04:07:48.872110
# * 2021-07-14 00:00:00 -> 2021-07-21 04:07:22.738431
# +
def get_today(ticker, granularity):
today = get_today_GMT()
tomorrow = today + pd.Timedelta('1D')
start_ = '{}-00-00'.format(today.strftime('%Y-%m-%d'))
end_ = today.strftime('%Y-%m-%d-%H-%M')
try:
df = HistoricalData(ticker,
granularity,
start_,
end_,
).retrieve_data()
return df
except:
logger.warning('could not load using {} {}'.format(start_, end_))
raise
def get_age(fname):
"""Get the age of a given a file"""
st=os.stat(fname)
mtime=st.st_mtime
return pd.Timestamp.now() - datetime.fromtimestamp(mtime)
def get_history(ticker, granularity, start_date, end_date = None):
"""Fetch/load historical data from Coinbase API at specified granularity
Data loaded from start_date through end of end_date
params:
start_date: (str) (see pandas.to_datetime for acceptable formats)
end_date: (str)
granularity: (int) seconds (default: 300)
price data is saved by ticker and date and stored in audiolizer_temp_dir
There are two timezones to keep track of. Assume input in GMT
system timezone: the timezone of the machine the audiolizer is run from
GMT: the timezone that price history is fetched/stored in
"""
start_date = pd.to_datetime(start_date)
today = get_today_GMT() #tz-naive but value matches GMT
if end_date is None:
# don't include today
end_date = today + pd.Timedelta('1d')
logger.info('no end_date provided, using {}'.format(end_date))
else:
# convert the user-specified date and timezone to GMT
end_date = pd.to_datetime(end_date)
# prevent queries from the future
end_date = min(today, end_date) + pd.Timedelta('1d')
logger.info('using end_date {}'.format(end_date))
assert start_date <= end_date
logger.info('getting {} {}s files status: {} -> {}'.format(ticker, granularity, start_date, end_date))
files_status = get_files_status(ticker, granularity, start_date, end_date)
files_status = files_status[files_status.index < pd.to_datetime(today.date())]
fetch_missing(files_status, ticker, granularity)
if len(files_status) == 0:
raise IOError('Could not get file status for {}'.format(ticker, start_date, end_date))
df = pd.concat(map(lambda file: pd.read_csv(file, index_col='time', parse_dates=True, compression='gzip'),
files_status.files)).drop_duplicates()
if end_date >= today:
logger.info('end date is today!')
# check age of today's data. If it's old, fetch the new one
today_fname = audiolizer_temp_dir + '/{}-{}-today.csv.gz'.format(ticker, granularity)
if os.path.exists(today_fname):
if get_age(today_fname) > max_age:
logger.info('{} is too old, fetching new data'.format(today_fname))
today_data = get_today(ticker, granularity)
today_data.to_csv(today_fname, compression='gzip')
else:
logger.info('{} is not that old, loading from disk'.format(today_fname))
today_data = pd.read_csv(today_fname, index_col='time', parse_dates=True, compression='gzip')
else:
logger.info('{} not present. loading'.format(today_fname))
today_data = get_today(ticker, granularity)
today_data.to_csv(today_fname, compression='gzip')
df = | pd.concat([df, today_data]) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pendulum
import arrow
import pandas as pd
from sanic import Blueprint
from sanic import response
from sanic_openapi import doc
from sanic_cors import cross_origin
import pysnooper
from ..models import APIVALIDATION
# from ..eniacutil import ApiValidation
from ..es_operate import es_sql_df, conduct_dataframe, deal_q_time, conduct_mul_dataframe, es_query_df
from ..mongo_operate import read_mongo, read_mongo_aggregate
from ..eniacutil import query_es
import toolkit
if toolkit.__version__ == "1.7.20":
from toolkit.managers import Timer
else:
from toolkit.tools.managers import Timer
import numpy as np
import urllib.parse
financial = Blueprint('financial', url_prefix='/financial', strict_slashes=True)
@financial.route('/class', methods=['GET', 'OPTIONS'])
# @cross_origin(financial)
@doc.summary('股票板块类型')
@doc.description('股票板块类型包括 INDUSTRY 行业板块 REGION 地域板块 CONCEPT 概念板块 other 其它板块')
async def get_big_class(request):
with Timer() as timer:
with Timer() as timer_es:
try:
data = es_sql_df(sql=f"SELECT plate_type FROM stock_plate group by plate_type")
except Exception as e:
data = {"state": -1, "message": str(e)}
data = list(data['plate_type'])
return response.json(
{"create_time": pendulum.now().format('YYYY-MM-DD HH:mm:ss'),
"use_time": f"{timer.cost:.2f}",
"use_time_es": f"{timer_es.cost:.2f}",
"count": len(data),
"data": data
},
headers={'X-Served-By': 'sanic', "Access-Control-Allow-Origin": "*"},
status=200
)
@financial.route('/shareholder/statistics/<market>/<subtype>', methods=['GET', 'OPTIONS'])
# @cross_origin(financial)
@doc.summary('板块股东统计')
@doc.description('此板块下股东数据全返回')
async def get_shareholder_statistics(request, market, subtype):
with Timer() as timer:
with Timer() as timer_es:
try:
usesubtype = "'" + subtype + "'"
usemarket = "'" + market + "'"
data = es_sql_df(
sql=f"SELECT code,code_name FROM stock_plate where plate_code = {usesubtype} and category = {usemarket} ")
codes = list(data['code'])
# print(codes)
sdf = read_mongo(db="stock_data", collection="gdrs", select={'_id': 0})
df = sdf.loc[sdf['code'].isin(codes),]
data = df.replace("--", 0).to_dict('recodes')
except Exception as e:
data = {"state": -1, "message": str(e)}
return response.json(
{"create_time": pendulum.now().format('YYYY-MM-DD HH:mm:ss'),
"use_time": f"{timer.cost:.2f}",
"use_time_es": f"{timer_es.cost:.2f}",
"count": len(data),
"data": data
},
headers={'X-Served-By': 'sanic', "Access-Control-Allow-Origin": "*"},
status=200
)
@financial.route('/shareholder/detail', methods=['GET', 'OPTIONS'])
# @cross_origin(financial)
@doc.summary('股东数据')
@doc.description('股东数据')
async def get_shareholder_detail(request):
with Timer() as timer:
with Timer() as timer_es:
try:
# print(request.args)
# print(request.body)
sdf = read_mongo(db="stock_data",
collection="institution_cq",
query={'date': request.args['date'][0],
request.args['field'][0]: {'$ne': None}},
# todo None Null "NaN" 都没有生效,目前多查2倍数据后,过滤
select={'_id': 0},
sortby=request.args['field'][0],
upAndDown=int(request.args['sort'][0]),
limitNum=int(request.args['limit'][0]) * 2)
# sdf['jdgjdiffrate'] = conduct_mul_dataframe(sdf,lambda _u,_d:_u/_d,sdf['jdgjdiff'],sdf['sjgj'])
sdf = sdf.fillna(value=0)
# print(len(sdf))
start = (int(request.args['page'][0]) - 1) * int(request.args['size'][0])
end = start + int(request.args['size'][0])
sdf = sdf.loc[sdf[request.args['field'][0]] != 0,]
# if int(request.args['sort'][0]) == 1:
# sdf = sdf.sort_values(by=request.args['field'][0], ascending=1)
# else:
# sdf = sdf.sort_values(by=request.args['field'][0], ascending=0)
num = len(sdf)
# print(len(sdf))
sdf = sdf[start:end]
# print(sdf)
# sdf = sdf.sort_values(by=request.args['field'][0], ascending=int(request.args['sort'][0]))
# print(sdf[request.args['field'][0]])
data = sdf.to_dict('recodes')
# print(data[0])
# print(len(data))
# data=data.fillna(value=0)
except Exception as e:
num = 0
data = {"state": -1, "message": str(e)}
return response.json(
{"create_time": pendulum.now().format('YYYY-MM-DD HH:mm:ss'),
"use_time": f"{timer.cost:.2f}",
"use_time_es": f"{timer_es.cost:.2f}",
"count": len(data),
"all_num": num,
"data": data
},
headers={'X-Served-By': 'sanic', "Access-Control-Allow-Origin": "*"},
status=200
)
@financial.route('/shareholder/onedetail', methods=['GET', 'OPTIONS'])
# @cross_origin(financial)
@doc.summary('单只股票股东持股数据')
@doc.description('单只股票股东持股数据')
async def get_shareholder_onedetail(request):
with Timer() as timer:
with Timer() as timer_es:
try:
# print(request.args)
# print(request.body)
sdf = read_mongo(db="stock_data",
collection="institution_cq",
query={'code': request.args['code'][0]},
# todo None Null "NaN" 都没有生效,目前多查2倍数据后,过滤
select={'_id': 0},
)
# sdf['jdgjdiffrate'] = conduct_mul_dataframe(sdf,lambda _u,_d:_u/_d,sdf['jdgjdiff'],sdf['sjgj'])
# print(sdf)
# print(sdf)
sdf = sdf.replace([np.inf, -np.inf], np.nan)
# print(sdf)
sdf = sdf.fillna(value=0)
print(sdf)
data = sdf.to_dict('recodes')
except Exception as e:
data = {"state": -1, "message": str(e)}
return response.json(
{"create_time": pendulum.now().format('YYYY-MM-DD HH:mm:ss'),
"use_time": f"{timer.cost:.2f}",
"use_time_es": f"{timer_es.cost:.2f}",
"count": len(data),
"data": data
},
headers={'X-Served-By': 'sanic', "Access-Control-Allow-Origin": "*"},
status=200
)
@financial.route('/class/<market>/<ctype>', methods=['GET', 'OPTIONS'])
# @cross_origin(financial)
@doc.summary('股票子板块查询')
@doc.description('股票子板块 market 可选 SZ SH HK US 0 ctype 可选 INDUSTRY 行业板块 REGION 地域板块 CONCEPT 概念板块 other 其它板块')
async def get_subtype_name(request, market, ctype):
with Timer() as timer:
with Timer() as timer_es:
try:
usectype = "'" + ctype + "'"
usemarket = "'" + market + "'"
data = es_sql_df(
sql=f"SELECT plate_name,plate_code FROM stock_plate where plate_type = {usectype} and category = {usemarket} ")
data = data.drop_duplicates().to_dict('recodes')
except Exception as e:
data = {"state": -1, "message": str(e)}
return response.json(
{"create_time": pendulum.now().format('YYYY-MM-DD HH:mm:ss'),
"use_time": f"{timer.cost:.2f}",
"use_time_es": f"{timer_es.cost:.2f}",
"count": len(data),
"data": data
},
headers={'X-Served-By': 'sanic', "Access-Control-Allow-Origin": "*"},
status=200
)
@financial.route('/allclass', methods=['GET', 'OPTIONS'])
# @cross_origin(financial)
@doc.summary('股票全板块')
@doc.description('股票子板块 market 可选 SZ SH HK US 0 ctype 可选 INDUSTRY 行业板块 REGION 地域板块 CONCEPT 概念板块 other 其它板块')
async def get_all_class(request):
with Timer() as timer:
with Timer() as timer_es:
try:
data = []
for mkt in ['SH', 'SZ', 'US', 'HK']:
for ctp in ['INDUSTRY', 'REGION', 'CONCEPT', 'other']:
usectype = "'" + ctp + "'"
usemarket = "'" + mkt + "'"
try:
dataone = es_sql_df(
sql=f"SELECT plate_name,plate_code FROM stock_plate where plate_type = {usectype} and category = {usemarket} ")
# print(dataone)
dataone = dataone.drop_duplicates().to_dict('recodes')
# dataone = dataone.to_dict('recodes')
# print(dataone)
except:
dataone = []
data.append({"market": mkt, "plate_type": ctp, "plate": dataone})
# print(data)
except Exception as e:
data = {"state": -1, "message": str(e)}
return response.json(
{"create_time": pendulum.now().format('YYYY-MM-DD HH:mm:ss'),
"use_time": f"{timer.cost:.2f}",
"use_time_es": f"{timer_es.cost:.2f}",
"count": len(data),
"data": data
},
headers={'X-Served-By': 'sanic', "Access-Control-Allow-Origin": "*"},
status=200
)
@financial.route('/allcode', methods=['GET', 'OPTIONS'])
# @cross_origin(financial)
@doc.summary('股票全名称')
@doc.description('拉取全股票板块归属')
async def get_all_class(request):
with Timer() as timer:
with Timer() as timer_es:
try:
data = es_sql_df(sql=f"SELECT * FROM stock_code ")
# print(dataone)
data = data.to_dict('recodes')
# dataone = dataone.to_dict('recodes')
# print(dataone)
except Exception as e:
data = {"state": -1, "message": str(e)}
return response.json(
{"create_time": pendulum.now().format('YYYY-MM-DD HH:mm:ss'),
"use_time": f"{timer.cost:.2f}",
"use_time_es": f"{timer_es.cost:.2f}",
"count": len(data),
"data": data
},
headers={'X-Served-By': 'sanic', "Access-Control-Allow-Origin": "*"},
status=200
)
@financial.route('/code/<market>/<subtype>', methods=['GET', 'OPTIONS'])
# @cross_origin(financial)
@doc.summary('股票子板块中的股票')
@doc.description('股票子板块股票 market 可选 SZ SH HK US ')
async def get_code_from_subtype(request, market, subtype):
with Timer() as timer:
with Timer() as timer_es:
try:
# print(subtype)
usesubtype = "'" + subtype + "'"
usemarket = "'" + market + "'"
data = es_sql_df(
sql=f"SELECT code,code_name FROM stock_plate where plate_code = {usesubtype} and category = {usemarket} ")
data = data.to_dict('recodes')
except Exception as e:
data = {"state": -1, "message": str(e)}
# data = list(data['code'])
return response.json(
{"create_time": pendulum.now().format('YYYY-MM-DD HH:mm:ss'),
"use_time": f"{timer.cost:.2f}",
"use_time_es": f"{timer_es.cost:.2f}",
"count": len(data),
"data": data
},
headers={'X-Served-By': 'sanic', "Access-Control-Allow-Origin": "*"},
status=200
)
@financial.route('/detail/<market>/<subtype>/<dim>', methods=['GET', 'OPTIONS'])
# @cross_origin(financial)
@doc.summary('子板块所有股票财报详情')
@doc.description('子板块所有股票 market 可选 SZ SH HK US ')
async def get_subtype_detail(request, market, subtype, dim):
with Timer() as timer:
with Timer() as timer_es:
try:
# print(subtype)
usesubtype = "'" + subtype + "'"
usemarket = "'" + market + "'"
usedim = "'" + dim + "'"
data = es_sql_df(
sql=f"SELECT code,code_name FROM stock_plate where plate_code = {usesubtype} and category = {usemarket} ")
codes = list(data['code'])
# print(codes)
# print(codes)
# usecodes = " ( " + "".join([" code = " + c + " or " for c in ["'" + i + "'" for i in codes]])[
# :-3] + " ) "
# print(usecodes)
if dim == "Year":
fdata = read_mongo(db="stock_data",
collection="finance",
query={'InfoSource': 'FY',
'$or': [{'code': c} for c in codes]
}
,
select={'_id': 0,
# 'code': 1,
# 'cgsz': 1,
# 'date': 1
},
)
# fdata = es_sql_df(sql=f"SELECT * FROM finance_calculate where {usecodes} and InfoSource = 'FY' ")
else:
fdata = read_mongo(db="stock_data",
collection="finance",
query={
'$or': [{'code': c} for c in codes],
'InfoSource': {
'$ne': "FY"
}
}
,
select={'_id': 0,
# 'code': 1,
# 'cgsz': 1,
# 'date': 1
},
)
# fdata = es_sql_df(sql=f"SELECT * FROM finance_calculate where {usecodes} and InfoSource != 'FY' ")
# print(fdata)
fdata = fdata.fillna(value=0)
fdata = fdata.loc[fdata['PB'] != 0,]
fdata['code_name'] = conduct_dataframe(fdata, 'code',
lambda code: list(data.loc[data['code'] == code, 'code_name'])[
0])
# print(fdata)
# print(data)
# print(fdata)
# print(fdata['code'])
# fdata = pd.merge(data,fdata,how = 'left', on='code')
if dim == "Quarter":
data = []
fdata['time'] = conduct_dataframe(fdata, "EndDate", lambda s: arrow.get(s).format("YYYY-MM"))
for t in list(fdata['time'].drop_duplicates()):
data.append({"dim": t, "result": fdata.loc[(fdata['time'] == t),].to_dict("records")})
elif dim == "Year":
data = []
fdata['time'] = conduct_dataframe(fdata, "EndDate", lambda s: arrow.get(s).format("YYYY"))
for t in list(fdata['time'].drop_duplicates()):
data.append({"dim": t, "result": fdata.loc[(fdata['time'] == t),].to_dict("records")})
else:
pass
# data = data.to_dict('recodes')
except Exception as e:
data = {"state": -1, "message": str(e)}
# data = list(data['code'])
return response.json(
{"create_time": pendulum.now().format('YYYY-MM-DD HH:mm:ss'),
"use_time": f"{timer.cost:.2f}",
"use_time_es": f"{timer_es.cost:.2f}",
"count": len(data),
"data": data
},
headers={'X-Served-By': 'sanic', "Access-Control-Allow-Origin": "*"},
status=200
)
@financial.route('/onedetail/<code>/<dim>', methods=['GET', 'OPTIONS'])
@doc.summary('单只股票财报详情')
@doc.description('单只股票财报详情 可选 Quarter Years')
async def get_one_code_detail(request, code, dim):
with Timer() as timer:
with Timer() as timer_es:
try:
usecode = "'" + code + "'"
usedim = "'" + dim + "'"
# usesubtype = "'" + subtype + "'"
# data = es_sql_df(sql=f"SELECT code FROM stock_plate where plate_code = {usesubtype} and category = {usemarket} ")
# codes = list(data['code'])
# # print(codes)
# usecodes = " ( " + "".join([" code = " + c + " or " for c in ["'" + i + "'" for i in codes]])[:-3] + " ) "
if dim == "Year":
fdata = read_mongo(db="stock_data",
collection="finance",
query={'InfoSource': 'FY',
'code': code
}
,
sortby='EndDate',
select={'_id': 0,
# 'code': 1,
# 'cgsz': 1,
# 'date': 1
},
)
# fdata = es_sql_df(
# sql=f"SELECT * FROM finance_calculate where code = {usecode} and InfoSource = 'FY' ")
else:
fdata = read_mongo(db="stock_data",
collection="finance",
query={
'code': code,
'InfoSource': {
'$ne': "FY"
}
}
,
sortby='EndDate',
select={'_id': 0,
# 'code': 1,
# 'cgsz': 1,
# 'date': 1
},
)
# fdata = es_sql_df(
# sql=f"SELECT * FROM finance_calculate where code = {usecode} and InfoSource != 'FY' ")
# print(fdata)
fdata = fdata.fillna(value=0)
fdata = fdata.loc[fdata['PB'] != 0,]
# print(fdata)
if dim == "Quarter":
data = []
fdata['time'] = conduct_dataframe(fdata, "EndDate", lambda s: arrow.get(s).format("YYYY-MM"))
for t in list(fdata['time'].drop_duplicates()):
data.append({"dim": t, "result": fdata.loc[(fdata['time'] == t),].to_dict("records")})
elif dim == "Year":
data = []
fdata['time'] = conduct_dataframe(fdata, "EndDate", lambda s: arrow.get(s).format("YYYY"))
for t in list(fdata['time'].drop_duplicates()):
data.append({"dim": t, "result": fdata.loc[(fdata['time'] == t),].to_dict("records")})
else:
pass
# data = data.to_dict('recodes')
except Exception as e:
data = {"state": -1, "message": str(e)}
# data = list(data['code'])
# print(data)
return response.json(
{"create_time": pendulum.now().format('YYYY-MM-DD HH:mm:ss'),
"use_time": f"{timer.cost:.2f}",
"use_time_es": f"{timer_es.cost:.2f}",
"count": len(data),
"data": data
},
headers={'X-Served-By': 'sanic', "Access-Control-Allow-Origin": "*"},
status=200
)
@financial.route('/calculate/<code>/<kline_index>/<ranger>/<dim>/<autotype>', methods=['GET', 'OPTIONS'])
# @cross_origin(financial)
@doc.summary('k线季度年度均值计算')
@doc.description('k线季度年度均值计算 可选 Quarter Years')
async def kline_calculate(request, code, kline_index, ranger, dim, autotype):
with Timer() as timer:
with Timer() as timer_es:
try:
data = query_es(kline_index, code, ranger, autotype)
if dim == "Quarter":
data['time'] = conduct_dataframe(data, "create_time", deal_q_time)
data = data[['time', 'close']].groupby('time').mean()
# print(data)
data['time'] = data.index
data = data.to_dict('recodes')
else:
data['time'] = conduct_dataframe(data, "create_time", lambda s: arrow.get(s).format("YYYY"))
data = data[['time', 'close']].groupby('time').mean()
# print(data)
data['time'] = data.index
data = data.to_dict('recodes')
except Exception as e:
data = {"state": -1, "message": str(e)}
# data = list(data['code'])
return response.json(
{"create_time": pendulum.now().format('YYYY-MM-DD HH:mm:ss'),
"use_time": f"{timer.cost:.2f}",
"use_time_es": f"{timer_es.cost:.2f}",
"count": len(data),
"data": data
},
headers={'X-Served-By': 'sanic', "Access-Control-Allow-Origin": "*"},
status=200
)
@financial.route('/statistics/<market>/<subtype>/<dim>', methods=['GET', 'OPTIONS'])
# @cross_origin(financial)
@doc.summary('子板块所有股票财报统计')
@doc.description('子板块所有股票 market 可选 SZ HK US ')
async def get_subtype_statistics(request, market, subtype, dim):
with Timer() as timer:
with Timer() as timer_es:
try:
# print(subtype)
usesubtype = "'" + subtype + "'"
usemarket = "'" + market + "'"
usedim = "'" + dim + "'"
data = es_sql_df(
sql=f"SELECT code FROM stock_plate where plate_code = {usesubtype} and category = {usemarket} ")
codes = list(data['code'])
# print(codes)
usecodes = " ( " + "".join([" code = " + c + " or " for c in ["'" + i + "'" for i in codes]])[
:-3] + " ) "
# print(usecodes)
if dim == "Year":
fdata = read_mongo(db="stock_data",
collection="finance",
query={'InfoSource': 'FY',
'$or': [{'code': c} for c in codes]
}
,
select={'_id': 0,
# 'code': 1,
# 'cgsz': 1,
# 'date': 1
},
)
# fdata = es_sql_df(sql=f"SELECT * FROM finance_calculate where {usecodes} and InfoSource = 'FY' ")
else:
fdata = read_mongo(db="stock_data",
collection="finance",
query={
'$or': [{'code': c} for c in codes],
'InfoSource': {
'$ne': "FY"
}
}
,
select={'_id': 0,
# 'code': 1,
# 'cgsz': 1,
# 'date': 1
},
)
# fdata = es_sql_df(sql=f"SELECT * FROM finance_calculate where {usecodes} and InfoSource != 'FY' ")
# print(fdata)
# fdata = fdata.fillna(value=0)
if dim == "Quarter":
# data = []/financial/statistics
fdata['time'] = conduct_dataframe(fdata, "EndDate", lambda s: arrow.get(s).format("YYYY-MM"))
# for t in list(fdata['time'].drop_duplicates()):
# data.append({"dim":t,"result":fdata.loc[(fdata['time'] == t),].to_dict("records")})
elif dim == "Year":
# data = []
fdata['time'] = conduct_dataframe(fdata, "EndDate", lambda s: arrow.get(s).format("YYYY"))
# for t in list(fdata['time'].drop_duplicates()):
# data.append({"dim":t,"result":fdata.loc[(fdata['time'] == t),].to_dict("records")})
else:
pass
del fdata['EndDate']
del fdata['code']
# del fdata['spider_time']
del fdata['time_key']
# print("fdata")
# print(fdata)
mindata = fdata.groupby("time").min()
mindata['code'] = 'min'
mindata['time'] = mindata.index
maxdata = fdata.groupby("time").max() # axis=0,skipna=True)
maxdata['code'] = 'max'
maxdata['time'] = maxdata.index
mediandata = fdata.groupby("time").median() # axis=0,skipna=True)
mediandata['code'] = 'medain'
mediandata['time'] = mediandata.index
adata = pd.concat([mindata, maxdata, mediandata],
ignore_index=0, sort=False) # 默认设置,索引可重复
adata = adata.fillna(value=0)
# print("adata")
# print(adata)
data = []
# print(list(adata['time'].drop_duplicates()))
for t in list(adata['time'].drop_duplicates()):
data.append({"dim": t, "result": adata.loc[(adata['time'] == t),].to_dict("records")})
# data = data.to_dict('recodes')
# print("data")
# print(data)
except Exception as e:
data = {"state": -1, "message": str(e)}
# data = list(data['code'])
return response.json(
{"create_time": pendulum.now().format('YYYY-MM-DD HH:mm:ss'),
"use_time": f"{timer.cost:.2f}",
"use_time_es": f"{timer_es.cost:.2f}",
"count": len(data),
"data": data
},
headers={'X-Served-By': 'sanic', "Access-Control-Allow-Origin": "*"},
status=200
)
#
# @financial.route('/statistics/Q/<market>/<subtype>/<dim>', methods=['GET', 'OPTIONS'])
# # @cross_origin(financial)
# @doc.summary('子板块所有股票财报统计')
# @doc.description('子板块所有股票 market 可选 SH SZ HK US ')
# async def get_subtype_statistics(request, market, subtype, dim):
# with Timer() as timer:
# with Timer() as timer_es:
# try:
# # print(subtype)
# usesubtype = "'" + subtype + "'"
# usemarket = "'" + market + "'"
# usedim = "'" + dim + "'"
# data = es_sql_df(
# sql=f"SELECT code,code_name FROM stock_plate where plate_code = {usesubtype} and category = {usemarket} ")
# codes = list(data['code'])
#
# # print(codes)
# usecodes = " ( " + "".join([" code = " + c + " or " for c in ["'" + i + "'" for i in codes]])[
# :-3] + " ) "
# # print(usecodes)
# if dim == "Year":
# fdata = es_sql_df(sql=f"SELECT * FROM finance_calculate where {usecodes} and InfoSource = 'FY' ")
# elif dim == "Quarter" and market == "US":
# fdata = es_sql_df(
# sql=f"SELECT * FROM finance_calculate where {usecodes} and ( InfoSource = 'Q1' or InfoSource = 'Q2' or InfoSource = 'Q3' or InfoSource = 'Q4' ) ")
# elif dim == "QuarterTotal" and market == "US":
# fdata = es_sql_df(
# sql=f"SELECT * FROM finance_calculate where {usecodes} and ( InfoSource = 'Q6' or InfoSource = 'Q9' ) ")
# elif dim == "QuarterTotal" and market != "US":
# fdata = es_sql_df(
# sql=f"SELECT * FROM finance_calculate where {usecodes} and ( InfoSource = 'Q3' or InfoSource = 'Q6' or InfoSource = 'Q9' ) ")
#
# else:
# fdata = es_sql_df(sql=f"SELECT * FROM finance_calculate where {usecodes} and InfoSource = 'Q100' ")
#
# # print(fdata)
# fdata = fdata.fillna(value=0)
# fdata = fdata.loc[fdata['PB'] != 0,]
# fdata['code_name'] = conduct_dataframe(fdata, 'code',
# lambda code: list(data.loc[data['code'] == code, 'code_name'])[
# 0])
# # print(fdata)
# # print(data)
# # print(fdata)
# # print(fdata['code'])
# # fdata = pd.merge(data,fdata,how = 'left', on='code')
#
# if dim != "Year":
# fdata['time'] = conduct_mul_dataframe(fdata, lambda q, s: arrow.get(s).format("YYYY") + q,
# fdata['InfoSource'], fdata['EndDate'])
#
#
# elif dim == "Year":
# fdata['time'] = conduct_dataframe(fdata, "EndDate", lambda s: arrow.get(s).format("YYYY"))
# else:
# pass
#
# del fdata['EndDate']
# del fdata['code']
# del fdata['spider_time']
# del fdata['time_key']
# # print("fdata")
# # print(fdata)
#
# mindata = fdata.groupby("time").min()
# mindata['code'] = 'min'
# mindata['time'] = mindata.index
#
# maxdata = fdata.groupby("time").max() # axis=0,skipna=True)
# maxdata['code'] = 'max'
# maxdata['time'] = maxdata.index
#
# mediandata = fdata.groupby("time").median() # axis=0,skipna=True)
# mediandata['code'] = 'medain'
# mediandata['time'] = mediandata.index
#
# adata = pd.concat([mindata, maxdata, mediandata],
# ignore_index=0, sort=False) # 默认设置,索引可重复
#
# adata = adata.fillna(value=0)
# # print("adata")
# # print(adata)
# data = []
# # print(list(adata['time'].drop_duplicates()))
# for t in list(adata['time'].drop_duplicates()):
# data.append({"dim": t, "result": adata.loc[(adata['time'] == t),].to_dict("records")})
#
# # data = data.to_dict('recodes')
# # print("data")
# # print(data)
#
# except Exception as e:
# data = {"state": -1, "message": str(e)}
# # data = list(data['code'])
#
# return response.json(
# {"create_time": pendulum.now().format('YYYY-MM-DD HH:mm:ss'),
# "use_time": f"{timer.cost:.2f}",
# "use_time_es": f"{timer_es.cost:.2f}",
# "count": len(data),
# "data": data
# },
# headers={'X-Served-By': 'sanic', "Access-Control-Allow-Origin": "*"},
# status=200
# )
#
# @financial.route('/detail/Q/<market>/<subtype>/<dim>', methods=['GET', 'OPTIONS'])
# # @cross_origin(financial)
# @doc.summary('子板块所有股票财报详情')
# @doc.description('子板块所有股票 \n market 可选 SZ SH HK US \n dim : Quarter QuarterTotal Year')
# async def get_subtype_detail(request, market, subtype, dim):
# with Timer() as timer:
# with Timer() as timer_es:
# try:
# # print(subtype)
# usesubtype = "'" + subtype + "'"
# usemarket = "'" + market + "'"
# usedim = "'" + dim + "'"
# data = es_sql_df(
# sql=f"SELECT code,code_name FROM stock_plate where plate_code = {usesubtype} and category = {usemarket} ")
# codes = list(data['code'])
# # print(codes)
#
# # print(codes)
# usecodes = " ( " + "".join([" code = " + c + " or " for c in ["'" + i + "'" for i in codes]])[
# :-3] + " ) "
# # print(usecodes)
# if dim == "Year":
# fdata = es_sql_df(sql=f"SELECT * FROM finance_calculate where {usecodes} and InfoSource = 'FY' ")
# elif dim == "Quarter" and market == "US":
# fdata = es_sql_df(
# sql=f"SELECT * FROM finance_calculate where {usecodes} and ( InfoSource = 'Q1' or InfoSource = 'Q2' or InfoSource = 'Q3' or InfoSource = 'Q4' ) ")
# elif dim == "QuarterTotal" and market == "US":
# fdata = es_sql_df(
# sql=f"SELECT * FROM finance_calculate where {usecodes} and ( InfoSource = 'Q6' or InfoSource = 'Q9' ) ")
# elif dim == "QuarterTotal" and market != "US":
# fdata = es_sql_df(
# sql=f"SELECT * FROM finance_calculate where {usecodes} and ( InfoSource = 'Q3' or InfoSource = 'Q6' or InfoSource = 'Q9' ) ")
#
# else:
# fdata = es_sql_df(sql=f"SELECT * FROM finance_calculate where {usecodes} and InfoSource = 'Q100' ")
#
# # print(fdata)
# fdata = fdata.fillna(value=0)
# fdata = fdata.loc[fdata['PB'] != 0,]
# fdata['code_name'] = conduct_dataframe(fdata, 'code',
# lambda code: list(data.loc[data['code'] == code, 'code_name'])[
# 0])
# # print(fdata)
# # print(data)
# # print(fdata)
# # print(fdata['code'])
# # fdata = pd.merge(data,fdata,how = 'left', on='code')
#
# if dim != "Year":
# data = []
# fdata['time'] = conduct_mul_dataframe(fdata, lambda q, s: arrow.get(s).format("YYYY") + q,
# fdata['InfoSource'], fdata['EndDate'])
# for t in list(fdata['time'].drop_duplicates()):
# data.append({"dim": t, "result": fdata.loc[(fdata['time'] == t),].to_dict("records")})
#
# elif dim == "Year":
# data = []
# fdata['time'] = conduct_dataframe(fdata, "EndDate", lambda s: arrow.get(s).format("YYYY"))
# for t in list(fdata['time'].drop_duplicates()):
# data.append({"dim": t, "result": fdata.loc[(fdata['time'] == t),].to_dict("records")})
# else:
# pass
#
# # data = data.to_dict('recodes')
#
# except Exception as e:
# data = {"state": -1, "message": str(e)}
# # data = list(data['code'])
#
# return response.json(
# {"create_time": pendulum.now().format('YYYY-MM-DD HH:mm:ss'),
# "use_time": f"{timer.cost:.2f}",
# "use_time_es": f"{timer_es.cost:.2f}",
# "count": len(data),
# "all_num": len(fdata),
# "data": data
# },
# headers={'X-Served-By': 'sanic', "Access-Control-Allow-Origin": "*"},
# status=200
# )
#
#
# # @financial.route('/onedetail/<subtype>/<code>/<dim>')#, version='v1', name='Dto')
# @financial.route('/onedetail/Q/<code>/<dim>', methods=['GET', 'OPTIONS'])
# # @cross_origin(financial)
# @doc.summary('单只股票财报详情')
# @doc.description('单只股票财报详情 可选 Quarter QuarterTotal Years')
# async def get_one_code_detail(request, code, dim):
# with Timer() as timer:
# with Timer() as timer_es:
# try:
# usecode = "'" + code + "'"
# usedim = "'" + dim + "'"
# market = code[:2]
# # usesubtype = "'" + subtype + "'"
# # data = es_sql_df(sql=f"SELECT code FROM stock_plate where plate_code = {usesubtype} and category = {usemarket} ")
# # codes = list(data['code'])
# # # print(codes)
# # usecodes = " ( " + "".join([" code = " + c + " or " for c in ["'" + i + "'" for i in codes]])[:-3] + " ) "
# if dim == "Year":
# fdata = es_sql_df(
# sql=f"SELECT * FROM finance_calculate where code = {usecode} and InfoSource = 'FY' ")
#
# elif dim == "Quarter" and market == "US":
# fdata = es_sql_df(
# sql=f"SELECT * FROM finance_calculate where code = {usecode} and ( InfoSource = 'Q1' or InfoSource = 'Q2' or InfoSource = 'Q3' or InfoSource = 'Q4' ) ")
# elif dim == "QuarterTotal" and market == "US":
# fdata = es_sql_df(
# sql=f"SELECT * FROM finance_calculate where code = {usecode} and ( InfoSource = 'Q6' or InfoSource = 'Q9' ) ")
# elif dim == "QuarterTotal" and market != "US":
# fdata = es_sql_df(
# sql=f"SELECT * FROM finance_calculate where code = {usecode} and ( InfoSource = 'Q3' or InfoSource = 'Q6' or InfoSource = 'Q9' ) ")
#
# else:
# fdata = es_sql_df(
# sql=f"SELECT * FROM finance_calculate where code = {usecode} and InfoSource = 'Q100' ")
#
# # print(fdata)
# fdata = fdata.fillna(value=0)
# fdata = fdata.loc[fdata['PB'] != 0,]
# # print(fdata)
#
# if dim != "Year":
# data = []
# fdata['time'] = conduct_mul_dataframe(fdata, lambda q, s: arrow.get(s).format("YYYY") + q,
# fdata['InfoSource'], fdata['EndDate'])
# for t in list(fdata['time'].drop_duplicates()):
# data.append({"dim": t, "result": fdata.loc[(fdata['time'] == t),].to_dict("records")})
#
# elif dim == "Year":
# data = []
# fdata['time'] = conduct_dataframe(fdata, "EndDate", lambda s: arrow.get(s).format("YYYY"))
# for t in list(fdata['time'].drop_duplicates()):
# data.append({"dim": t, "result": fdata.loc[(fdata['time'] == t),].to_dict("records")})
# else:
# pass
#
# # data = data.to_dict('recodes')
#
# except Exception as e:
# data = {"state": -1, "message": str(e)}
# # data = list(data['code'])
# # print(data)
#
# return response.json(
# {"create_time": pendulum.now().format('YYYY-MM-DD HH:mm:ss'),
# "use_time": f"{timer.cost:.2f}",
# "use_time_es": f"{timer_es.cost:.2f}",
# "count": len(data),
# "data": data
# },
# headers={'X-Served-By': 'sanic', "Access-Control-Allow-Origin": "*"},
# status=200
# )
#
# @financial.route('/fund/statistics', methods=['GET', 'OPTIONS'])
# # @cross_origin(financial)
# @doc.summary('机构数据统计')
# @doc.description('汇总机构总市值')
# async def get_fund_statistics(request):
# with Timer() as timer:
# with Timer() as timer_es:
# try:
# # print(request.args)
# # print(request.body)
# sdf = read_mongo(db="stock_data",
# collection="ths_position_data",
# # collection="dfcf_jjcc_data",
# query={'date': request.args['date'][0]},
# select={'_id': 0,
# 'mc': 1,
# 'jglx': 1,
#
# # 'date':1,
# 'cgsz': 1},
# )
# # sdf['jdgjdiffrate'] = conduct_mul_dataframe(sdf,lambda _u,_d:_u/_d,sdf['jdgjdiff'],sdf['sjgj'])
# # print(sdf)
#
# sdf['type'] = conduct_dataframe(sdf, "cgsz", lambda _s: isinstance(_s, float))
# sdf = sdf.loc[sdf['type'] == 1,]
# # print(sdf['type'])
# del sdf['type']
# # sdf = sdf.loc[sdf['cgsz'] != "-",]
# # print(sdf.loc[sdf['cgsz'] == '不足0.01%'])
# sdf['cgsz'] = conduct_dataframe(sdf, "cgsz", float)
# sdf = sdf.fillna(value=0)
# df1 = sdf[['mc', 'cgsz', 'jglx']].groupby(['mc', 'jglx']).sum().reset_index()
# df2 = sdf[['mc', 'cgsz', 'jglx']].groupby(['mc', 'jglx']).count().reset_index()
# df1['num'] = df2['cgsz']
# sdf = df1
# # print(sdf)
# if int(request.args['sort'][0]) == 1:
# sdf = sdf.sort_values(by=request.args['field'][0], ascending=1)
# else:
# sdf = sdf.sort_values(by=request.args['field'][0], ascending=0)
#
# # print(sdf)
#
# start = (int(request.args['page'][0]) - 1) * int(request.args['size'][0])
# end = start + int(request.args['size'][0])
# count_num = len(sdf)
# sdf = sdf[start:end]
# data = sdf.to_dict('recodes')
#
#
#
# except Exception as e:
# data = {"state": -1, "message": str(e)}
#
# return response.json(
# {"create_time": pendulum.now().format('YYYY-MM-DD HH:mm:ss'),
# "use_time": f"{timer.cost:.2f}",
# "use_time_es": f"{timer_es.cost:.2f}",
# "count": len(data),
# "count_num": count_num,
# "data": data
# },
# headers={'X-Served-By': 'sanic', "Access-Control-Allow-Origin": "*"},
# status=200
# )
#
#
# @financial.route('/fund/detail', methods=['POST', 'OPTIONS'])
# # @cross_origin(financial)
# @doc.summary('机构数据详情')
# @doc.description('汇总机构数据详情')
# async def get_fund_detail(request):
# with Timer() as timer:
# with Timer() as timer_es:
# try:
# # print(request.args)
# import json
# _json = json.loads(str(request.body, "utf-8"))
# # print(json.loads(str(request.body,"utf-8"))['mc'])
# sdf = read_mongo(db="stock_data",
# collection="ths_position_data",
# # collection="dfcf_jjcc_data",
# query={'mc': _json['mc']},
# select={'_id': 0},
# )
# # sdf['jdgjdiffrate'] = conduct_mul_dataframe(sdf,lambda _u,_d:_u/_d,sdf['jdgjdiff'],sdf['sjgj'])
# # print(sdf)
# # sdf = sdf.fillna(value=0)
# # sdf = sdf[['jglx','mc','cysl']].groupby(['mc','jglx']).sum().reset_index()
# # print(sdf)
# # if int(request.args['sort'][0]) == 1:
# # sdf = sdf.sort_values(by='cysl', ascending=1)
# # else:
# # sdf = sdf.sort_values(by='cysl', ascending=0)
# #
# # start = (int(request.args['page'][0])-1)*int(request.args['size'][0])
# # end = start + int(request.args['size'][0])
# # count_num=len(sdf)
# # sdf = sdf[start:end]
# # print(sdf)
# sdf = sdf.fillna(value=0)
# data = sdf.to_dict('recodes')
#
#
#
# except Exception as e:
# data = {"state": -1, "message": str(e)}
#
# return response.json(
# {"create_time": pendulum.now().format('YYYY-MM-DD HH:mm:ss'),
# "use_time": f"{timer.cost:.2f}",
# "use_time_es": f"{timer_es.cost:.2f}",
# "count": len(data),
# "data": data
# },
# headers={'X-Served-By': 'sanic', "Access-Control-Allow-Origin": "*"},
# status=200
# )
#
#
# @financial.route('/fund/bk', methods=['POST', 'OPTIONS'])
# # @cross_origin(financial)
# @doc.summary('机构数据板块详情')
# @doc.description('汇总机构板块详情')
# async def get_funds_bk(request):
# with Timer() as timer:
# with Timer() as timer_es:
# try:
# # print(request.args)
# import json
# _json = json.loads(str(request.body, "utf-8"))
# # print(json.loads(str(request.body,"utf-8"))['mc'])
# sdf = read_mongo(db="stock_data",
# collection="ths_position_data",
# # collection="dfcf_jjcc_data",
# query={'mc': _json['mc']},
# select={'_id': 0,
# 'code': 1,
# 'cgsz': 1,
# 'date': 1},
# )
# # sdf['jdgjdiffrate'] = conduct_mul_dataframe(sdf,lambda _u,_d:_u/_d,sdf['jdgjdiff'],sdf['sjgj'])
# # print(sdf)
# # sdf = sdf.fillna(value=0)
# # sdf = sdf[['jglx','mc','cysl']].groupby(['mc','jglx']).sum().reset_index()
# # print(sdf)
# # if int(request.args['sort'][0]) == 1:
# # sdf = sdf.sort_values(by='cysl', ascending=1)
# # else:
# # sdf = sdf.sort_values(by='cysl', ascending=0)
# #
# # start = (int(request.args['page'][0])-1)*int(request.args['size'][0])
# # end = start + int(request.args['size'][0])
# # count_num=len(sdf)
# # sdf = sdf[start:end]
# # print(sdf)
# # print(sdf.loc[sdf['date']=="2018-09-30",])
# # sdf = sdf.loc[sdf['date']=="2018-09-30",]
# codes = list(sdf['code'].drop_duplicates())
# dates = list(sdf['date'].drop_duplicates())
# # if len(codes) <= 1:
# # usecodes = " ( " + "".join([" code = " + c + " or " for c in ["'" + i + "'" for i in codes]])[:-3] + " ) "
# # fdata = es_sql_df(sql=f"SELECT code,plate_name FROM stock_plate where code = codes[0] and plate_type = 'INDUSTRY' ")
# #
# #
# # else:
# usecodes = " ( " + "".join([" code = " + c + " or " for c in ["'" + i + "'" for i in codes]])[
# :-3] + " ) "
# # print(usecodes)
# # print(usecodes)
# fdata = es_sql_df(
# sql=f"SELECT code,plate_name FROM stock_plate where {usecodes} and plate_type = 'INDUSTRY' ")
# # fdata = es_sql_df(sql=f"SELECT code,plate_name FROM stock_plate where {usecodes} and plate_type = 'other' ")
# # print(fdata)
# # print(fdata)
# data = []
# # print(fdata)
#
# for d in dates:
# try:
# ndf = pd.merge(sdf.loc[sdf['date'] == d,], fdata, on='code', how='right')
# ndf = ndf.dropna(how='any')
# _sum = sum(ndf['cgsz'])
#
# # 计算百分比
# # ndf['cgsz'] = conduct_dataframe(ndf,"cgsz",lambda _s : _s/_sum)
# # print(ndf)
#
# rdf = pd.pivot_table(ndf,
# index=['date'],
# columns=['plate_name'],
# values='cgsz',
# aggfunc=sum).reset_index()
# # print(rdf)
# # print(d)
# result = rdf.to_dict('recodes')
# # print(result)
# onedata = {"date": result[0]['date']}
# onedata['bk'] = list(result[0].keys())[1:]
# onedata['value'] = list(result[0].values())[1:]
#
# data.append(onedata)
# except:
# pass
#
# # data.append(rdf.to_dict('recodes'))
# fdata = es_sql_df(
# sql=f"SELECT code,plate_name FROM stock_plate where {usecodes} and plate_type = 'CONCEPT' ")
# data1 = []
# for d in dates:
# try:
# ndf = pd.merge(sdf.loc[sdf['date'] == d,], fdata, on='code', how='right')
# ndf = ndf.dropna(how='any')
# _sum = sum(ndf['cgsz'])
# # ndf['cgsz'] = conduct_dataframe(ndf,"cgsz",lambda _s : _s/_sum)
# # print(ndf)
# rdf = pd.pivot_table(ndf,
# index=['date'],
# columns=['plate_name'],
# values='cgsz',
# aggfunc=sum).reset_index()
#
# result = rdf.to_dict('recodes')
# onedata = {"date": result[0]['date']}
# onedata['bk'] = list(result[0].keys())[1:]
# onedata['value'] = list(result[0].values())[1:]
#
# data1.append(onedata)
# except:
# pass
# # print(data)
#
# # print(len(sdf))
# # print(ndf)
# # print(len(ndf))
#
# # sdf = sdf.fillna(value=0)
# # rdf = pd.pivot_table(ndf,index=['date'],
# # columns=['plate_name'],
# # values='cgsz',
# # aggfunc=lambda x : x/sum(x))
# # print(rdf)
# # data = sdf.to_dict('recodes')
#
#
#
# except Exception as e:
# data = {"state": -1, "message": str(e)}
#
# return response.json(
# {"create_time": pendulum.now().format('YYYY-MM-DD HH:mm:ss'),
# "use_time": f"{timer.cost:.2f}",
# "use_time_es": f"{timer_es.cost:.2f}",
# "count": len(data),
# "data": data,
# "data_CONCEPT": data1
# },
# headers={'X-Served-By': 'sanic', "Access-Control-Allow-Origin": "*"},
# status=200
# )
#
#
# @financial.route('/funds/statisticsion', methods=['GET', 'OPTIONS'])
# # @cross_origin(financial)
# @doc.summary('基金数据统计')
# @doc.description('汇总基金总市值')
# async def get_fund2_statistics(request):
# with Timer() as timer:
# with Timer() as timer_es:
# try:
# # print(request.args)
# # print(request.body)
# sdf = read_mongo(db="stock_data",
# collection="dfcf_jjcc_data",
# # collection="dfcf_jjcc_data",
# query={'date': request.args['date'][0]},
# select={'_id': 0,
# 'mc': 1,
# 'cgsz': 1},
# )
# # sdf['jdgjdiffrate'] = conduct_mul_dataframe(sdf,lambda _u,_d:_u/_d,sdf['jdgjdiff'],sdf['sjgj'])
# # print(sdf)
# # sdf = sdf.fillna(value=0)
# # print(sdf)
# # sdf = sdf.loc[sdf['cgsz'] != "---",]
# # sdf['cgsz'] = conduct_dataframe(sdf,"cgsz",float)
# sdf['type'] = conduct_dataframe(sdf, "cgsz", lambda _s: isinstance(_s, float))
# sdf = sdf.loc[sdf['type'] == 1,]
# # print(sdf['type'])
# del sdf['type']
# # print(len(sdf))
# # print(sdf)
# # sdf['']
#
# # sdf = sdf[['mc','cgsz']].groupby(['mc']).sum().reset_index()
# df1 = sdf[['mc', 'cgsz']].groupby(['mc']).sum().reset_index()
# df2 = sdf[['mc', 'cgsz']].groupby(['mc']).count().reset_index()
# df1['num'] = df2['cgsz']
# sdf = df1
# # print(sdf)
#
# if int(request.args['sort'][0]) == 1:
# sdf = sdf.sort_values(by=request.args['field'][0], ascending=1)
# else:
# sdf = sdf.sort_values(by=request.args['field'][0], ascending=0)
#
# start = (int(request.args['page'][0]) - 1) * int(request.args['size'][0])
# end = start + int(request.args['size'][0])
# count_num = len(sdf)
# sdf = sdf[start:end]
# data = sdf.to_dict('recodes')
#
#
#
# except Exception as e:
# data = {"state": -1, "message": str(e)}
#
# return response.json(
# {"create_time": pendulum.now().format('YYYY-MM-DD HH:mm:ss'),
# "use_time": f"{timer.cost:.2f}",
# "use_time_es": f"{timer_es.cost:.2f}",
# "count": len(data),
# "count_num": count_num,
# "data": data
# },
# headers={'X-Served-By': 'sanic', "Access-Control-Allow-Origin": "*"},
# status=200
# )
#
#
# @financial.route('/funds/detail', methods=['POST', 'OPTIONS'])
# # @cross_origin(financial)
# @doc.summary('基金数据详情')
# @doc.description('汇总基金详情')
# async def get_funds_detail(request):
# with Timer() as timer:
# with Timer() as timer_es:
# try:
# # print(request.args)
# import json
# _json = json.loads(str(request.body, "utf-8"))
# # print(json.loads(str(request.body,"utf-8"))['mc'])
# sdf = read_mongo(db="stock_data",
# collection="dfcf_jjcc_data",
# # collection="dfcf_jjcc_data",
# query={'mc': _json['mc']},
# select={'_id': 0},
# )
# # sdf['jdgjdiffrate'] = conduct_mul_dataframe(sdf,lambda _u,_d:_u/_d,sdf['jdgjdiff'],sdf['sjgj'])
# # print(sdf)
# # sdf = sdf.fillna(value=0)
# # sdf = sdf[['jglx','mc','cysl']].groupby(['mc','jglx']).sum().reset_index()
# # print(sdf)
# # if int(request.args['sort'][0]) == 1:
# # sdf = sdf.sort_values(by='cysl', ascending=1)
# # else:
# # sdf = sdf.sort_values(by='cysl', ascending=0)
# #
# # start = (int(request.args['page'][0])-1)*int(request.args['size'][0])
# # end = start + int(request.args['size'][0])
# # count_num=len(sdf)
# # sdf = sdf[start:end]
# # print(sdf)
# codes = list(sdf['code'].drop_duplicates())
#
# sdf = sdf.fillna(value=0)
# data = sdf.to_dict('recodes')
#
#
#
# except Exception as e:
# data = {"state": -1, "message": str(e)}
#
# return response.json(
# {"create_time": pendulum.now().format('YYYY-MM-DD HH:mm:ss'),
# "use_time": f"{timer.cost:.2f}",
# "use_time_es": f"{timer_es.cost:.2f}",
# "count": len(data),
# "data": data
# },
# headers={'X-Served-By': 'sanic', "Access-Control-Allow-Origin": "*"},
# status=200
# )
#
#
# @financial.route('/funds/bk', methods=['POST', 'OPTIONS'])
# # @cross_origin(financial)
# @doc.summary('基金数据板块详情')
# @doc.description('汇总基金板块详情')
# async def get_funds_bk(request):
# with Timer() as timer:
# with Timer() as timer_es:
# try:
# # print(request.args)
# import json
# _json = json.loads(str(request.body, "utf-8"))
# # print(json.loads(str(request.body,"utf-8"))['mc'])
# sdf = read_mongo(db="stock_data",
# collection="dfcf_jjcc_data",
# # collection="dfcf_jjcc_data",
# query={'mc': _json['mc']},
# select={'_id': 0,
# 'code': 1,
# 'cgsz': 1,
# 'date': 1},
# )
# # sdf['jdgjdiffrate'] = conduct_mul_dataframe(sdf,lambda _u,_d:_u/_d,sdf['jdgjdiff'],sdf['sjgj'])
# # print(sdf)
# # sdf = sdf.fillna(value=0)
# # sdf = sdf[['jglx','mc','cysl']].groupby(['mc','jglx']).sum().reset_index()
# # print(sdf)
# # if int(request.args['sort'][0]) == 1:
# # sdf = sdf.sort_values(by='cysl', ascending=1)
# # else:
# # sdf = sdf.sort_values(by='cysl', ascending=0)
# #
# # start = (int(request.args['page'][0])-1)*int(request.args['size'][0])
# # end = start + int(request.args['size'][0])
# # count_num=len(sdf)
# # sdf = sdf[start:end]
# # print(sdf)
# codes = list(sdf['code'].drop_duplicates())
# dates = list(sdf['date'].drop_duplicates())
#
# usecodes = " ( " + "".join([" code = " + c + " or " for c in ["'" + i + "'" for i in codes]])[
# :-3] + " ) "
# # print(usecodes)
# fdata = es_sql_df(
# sql=f"SELECT code,plate_name FROM stock_plate where {usecodes} and plate_type = 'INDUSTRY' ")
# data = []
# for d in dates:
# try:
# ndf = pd.merge(sdf.loc[sdf['date'] == d,], fdata, on='code', how='right')
# ndf = ndf.dropna(how='any')
# _sum = sum(ndf['cgsz'])
# # ndf['cgsz'] = conduct_dataframe(ndf,"cgsz",lambda _s : _s/_sum)
# # print(ndf)
# rdf = pd.pivot_table(ndf,
# index=['date'],
# columns=['plate_name'],
# values='cgsz',
# aggfunc=sum).reset_index()
#
# result = rdf.to_dict('recodes')
# onedata = {"date": result[0]['date']}
# onedata['bk'] = list(result[0].keys())[1:]
# onedata['value'] = list(result[0].values())[1:]
#
# data.append(onedata)
# # print(data)
# except:
# pass
#
# fdata = es_sql_df(
# sql=f"SELECT code,plate_name FROM stock_plate where {usecodes} and plate_type = 'CONCEPT' ")
# data1 = []
# for d in dates:
# try:
# ndf = pd.merge(sdf.loc[sdf['date'] == d,], fdata, on='code', how='right')
# ndf = ndf.dropna(how='any')
# _sum = sum(ndf['cgsz'])
# # ndf['cgsz'] = conduct_dataframe(ndf,"cgsz",lambda _s : _s/_sum)
# # print(ndf)
# rdf = pd.pivot_table(ndf,
# index=['date'],
# columns=['plate_name'],
# values='cgsz',
# aggfunc=sum).reset_index()
#
# result = rdf.to_dict('recodes')
# onedata = {"date": result[0]['date']}
# onedata['bk'] = list(result[0].keys())[1:]
# onedata['value'] = list(result[0].values())[1:]
#
# data1.append(onedata)
# except:
# pass
#
# # data.append(rdf.to_dict('recodes'))
#
# # print(len(sdf))
# # print(ndf)
# # print(len(ndf))
#
# # sdf = sdf.fillna(value=0)
# # rdf = pd.pivot_table(ndf,index=['date'],
# # columns=['plate_name'],
# # values='cgsz',
# # aggfunc=lambda x : x/sum(x))
# # print(rdf)
# # data = sdf.to_dict('recodes')
#
#
#
# except Exception as e:
# data = {"state": -1, "message": str(e)}
# # print(data1)
# return response.json(
# {"create_time": pendulum.now().format('YYYY-MM-DD HH:mm:ss'),
# "use_time": f"{timer.cost:.2f}",
# "use_time_es": f"{timer_es.cost:.2f}",
# "count": len(data),
# "data": data,
# "data_CONCEPT": data1
# },
# headers={'X-Served-By': 'sanic', "Access-Control-Allow-Origin": "*"},
# status=200
# )
@financial.route('/allfunds/statisticsion', methods=['GET', 'OPTIONS'])
# @cross_origin(financial)
@doc.summary('全机构数据统计')
@doc.description('汇总全机构总市值')
async def get_allfund_statistics(request):
with Timer() as timer:
with Timer() as timer_es:
try:
# print(request.args)
# print(request.body)
# count_num = 0
sdf = read_mongo(db="stock_data",
collection="dfcf_ajax_data_top",
# collection="dfcf_jjcc_data",
query={'date': request.args['date'][0],
'market': request.args['market'][0],
},
select={'_id': 0,
'mc': 1,
'Type': 1,
'num': 1,
'cgsz': 1,
'ratio': 1},
sortby=request.args['field'][0], # 排序字段
upAndDown=int(request.args['sort'][0]), # 升序
limitNum=99999, # 限制数量
)
print(len(sdf))
print(arrow.now())
start = (int(request.args['page'][0]) - 1) * int(request.args['size'][0])
end = start + int(request.args['size'][0])
try:
count_num = len(sdf)
sdf = sdf[start:end]
sdf = sdf.fillna(value='---')
data = sdf.to_dict('recodes')
except:
count_num = 0
data = []
except Exception as e:
data = {"state": -1, "message": str(e)}
return response.json(
{"create_time": pendulum.now().format('YYYY-MM-DD HH:mm:ss'),
"use_time": f"{timer.cost:.2f}",
"use_time_es": f"{timer_es.cost:.2f}",
"count": len(data),
"count_num": count_num,
"data": data
},
headers={'X-Served-By': 'sanic', "Access-Control-Allow-Origin": "*"},
status=200
)
@financial.route('/allfunds/range', methods=['GET', 'OPTIONS'])
# @cross_origin(financial)
@doc.summary('全机构季度区间')
@doc.description('全机构季度区间')
async def get_allfund_range(request):
with Timer() as timer:
with Timer() as timer_es:
try:
# print(request.args)
# print(request.body)
# count_num = 0
sdf = read_mongo_aggregate(db="stock_data",
collection="dfcf_ajax_data_top",
pipline=[
{"$match": {'market': request.args['market'][0]}},
{"$group": {"_id": {"date": "$date"}}},
{"$project": {"date": "$_id.date"}},
{'$sort': {"date": 1}},
# {'$limit': 99999999}
],
)
data = list(sdf['date'])#.to_dict('recodes')
except Exception as e:
data = {"state": -1, "message": str(e)}
return response.json(
{"create_time": pendulum.now().format('YYYY-MM-DD HH:mm:ss'),
"use_time": f"{timer.cost:.2f}",
"use_time_es": f"{timer_es.cost:.2f}",
"count": len(data),
"data": data
},
headers={'X-Served-By': 'sanic', "Access-Control-Allow-Origin": "*"},
status=200
)
@financial.route('/allfunds/detail', methods=['POST', 'OPTIONS'])
# @cross_origin(financial)
@doc.summary('全机构数据详情')
@doc.description('汇总全机构详情')
async def get_allfund_detail(request):
with Timer() as timer:
with Timer() as timer_es:
try:
# print(request.args)
import json
_json = json.loads(str(request.body, "utf-8"))
# print(json.loads(str(request.body,"utf-8"))['mc'])
sdf = read_mongo(db="stock_data",
collection="dfcf_ajax_data",
# collection="dfcf_jjcc_data",
query={'mc': _json['mc'],
# 'time_key':{"$gt":1520041600}
},
select={'_id': 0,
'SCode': 0,
'SName': 0,
'RDate': 0,
'SHCode': 0,
'SHName': 0,
'InstSName': 0,
'InstCode': 0,
# 'date':1,
# 'code':1,
# 'cysl':1,
# 'ShareHDNum_ratio':1,
# 'cgsz':1,
# 'Vposition_ratio':1,
# 'TabRate':1,
# 'TabRate_ratio':1,
# 'TabProRate':1,
# 'TabProRate_ratio':1
},
)
# sdf['jdgjdiffrate'] = conduct_mul_dataframe(sdf,lambda _u,_d:_u/_d,sdf['jdgjdiff'],sdf['sjgj'])
# print(sdf)
# sdf = sdf.fillna(value=0)
# sdf = sdf[['jglx','mc','cysl']].groupby(['mc','jglx']).sum().reset_index()
# print(sdf)
# if int(request.args['sort'][0]) == 1:
# sdf = sdf.sort_values(by='cysl', ascending=1)
# else:
# sdf = sdf.sort_values(by='cysl', ascending=0)
#
# start = (int(request.args['page'][0])-1)*int(request.args['size'][0])
# end = start + int(request.args['size'][0])
# count_num=len(sdf)
# sdf = sdf[start:end]
# print(sdf)
# codes = list(sdf['code'].drop_duplicates())
sdf = sdf.fillna(value=0)
data = sdf.to_dict('recodes')
except Exception as e:
data = {"state": -1, "message": str(e)}
return response.json(
{"create_time": pendulum.now().format('YYYY-MM-DD HH:mm:ss'),
"use_time": f"{timer.cost:.2f}",
"use_time_es": f"{timer_es.cost:.2f}",
"count": len(data),
"data": data
},
headers={'X-Served-By': 'sanic', "Access-Control-Allow-Origin": "*"},
status=200
)
@financial.route('/allfunds/bk', methods=['POST', 'OPTIONS'])
# @cross_origin(financial)
@doc.summary('全机构数据板块详情')
@doc.description('汇总全机构板块详情')
async def get_allfund_bk(request):
with Timer() as timer:
with Timer() as timer_es:
try:
# print(request.args)
import json
_json = json.loads(str(request.body, "utf-8"))
# print(json.loads(str(request.body,"utf-8"))['mc'])
sdf = read_mongo(db="stock_data",
collection="dfcf_ajax_data",
# collection="dfcf_jjcc_data",
query={'mc': _json['mc']},
select={'_id': 0,
'code': 1,
'cgsz': 1,
'date': 1},
)
# sdf['jdgjdiffrate'] = conduct_mul_dataframe(sdf,lambda _u,_d:_u/_d,sdf['jdgjdiff'],sdf['sjgj'])
# print(sdf)
# sdf = sdf.fillna(value=0)
# sdf = sdf[['jglx','mc','cysl']].groupby(['mc','jglx']).sum().reset_index()
# print(sdf)
# if int(request.args['sort'][0]) == 1:
# sdf = sdf.sort_values(by='cysl', ascending=1)
# else:
# sdf = sdf.sort_values(by='cysl', ascending=0)
#
# start = (int(request.args['page'][0])-1)*int(request.args['size'][0])
# end = start + int(request.args['size'][0])
# count_num=len(sdf)
# sdf = sdf[start:end]
# print(sdf)
codes = list(sdf['code'].drop_duplicates())
dates = list(sdf['date'].drop_duplicates())
usecodes = " ( " + "".join([" code = " + c + " or " for c in ["'" + i + "'" for i in codes]])[
:-3] + " ) "
# print(usecodes)
fdata = es_sql_df(
sql=f"SELECT code,plate_name FROM stock_plate where {usecodes} and plate_type = 'INDUSTRY' ")
data = []
for d in dates:
try:
ndf = | pd.merge(sdf.loc[sdf['date'] == d,], fdata, on='code', how='right') | pandas.merge |
# -*- coding: utf-8 -*-
"""Console script for pyvirchow."""
import os
import six
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
# os.environ["CUDA_VISIBLE_DEVICES"] = ""
from pyvirchow.io.operations import get_annotation_bounding_boxes
from pyvirchow.io.operations import get_annotation_polygons
from pyvirchow.io.operations import path_leaf
from pyvirchow.io.operations import read_as_rgb
from pyvirchow.io.operations import WSIReader
from pyvirchow.io.tiling import get_all_patches_from_slide
from pyvirchow.io.tiling import save_images_and_mask, generate_tiles, generate_tiles_fast
from pyvirchow.normalization import VahadaneNormalization
from pyvirchow.morphology.patch_extractor import TissuePatch
from pyvirchow.morphology.mask import get_common_interior_polygons
from tqdm import tqdm
import warnings
from multiprocessing import Pool
from pyvirchow.segmentation import label_nuclei, summarize_region_properties
from pyvirchow.misc.parallel import ParallelExecutor
# from pyvirchow.deep_model.model import slide_level_map
# from pyvirchow.deep_model.random_forest import random_forest
from pyvirchow.misc import xmltojson
from scipy.misc import imsave
from skimage.color import rgb2hsv
from collections import defaultdict
import joblib
from joblib import delayed
from joblib import parallel_backend
import numpy as np
from six import iteritems
import click
from shapely.geometry import Polygon as shapelyPolygon
from click_help_colors import HelpColorsGroup
import glob
from PIL import Image
click.disable_unicode_literals_warning = True
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
import pandas as pd
warnings.filterwarnings("ignore", category=RuntimeWarning)
np.warnings.filterwarnings("ignore")
COLUMNS = [
"area",
"bbox_area",
"compactness",
"convex_area",
"eccentricity",
"equivalent_diameter",
"extent",
"fractal_dimension",
"inertia_tensor_eigvals_1",
"inertia_tensor_eigvals_2",
"major_axis_length",
"max_intensity",
"mean_intensity",
"mean_intensity_entire_image",
"minor_axis_length",
"moments_central_1",
"moments_central_10",
"moments_central_11",
"moments_central_12",
"moments_central_13",
"moments_central_14",
"moments_central_15",
"moments_central_16",
"moments_central_2",
"moments_central_3",
"moments_central_4",
"moments_central_5",
"moments_central_6",
"moments_central_7",
"moments_central_8",
"moments_central_9",
"moments_hu_1",
"moments_hu_2",
"moments_hu_3",
"moments_hu_4",
"moments_hu_5",
"moments_hu_6",
"moments_hu_7",
"nuclei",
"nuclei_intensity_over_entire_image",
"orientation",
"perimeter",
"solidity",
"texture",
"total_nuclei_area",
"total_nuclei_area_ratio",
]
@click.group(
cls=HelpColorsGroup, help_headers_color="yellow", help_options_color="green"
)
def cli():
"""pyvirchow: tool for processing WSIs"""
pass
@cli.command(
"create-tissue-masks",
context_settings=CONTEXT_SETTINGS,
help="Extract tissue masks",
)
@click.option("--indir", help="Root directory with all tumor WSIs", required=True)
@click.option(
"--level", type=int, help="Level at which to extract patches", required=True
)
@click.option(
"--savedir", help="Root directory to save extract images to", required=True
)
def extract_tissue_masks_cmd(indir, level, savedir):
"""Extract tissue only patches from tumor WSIs.
"""
tumor_wsis = glob.glob(os.path.join(indir, "*.tif"), recursive=False)
for tumor_wsi in tqdm(tumor_wsis):
wsi = WSIReader(tumor_wsi, 40)
tissue_patch = TissuePatch(wsi, level=level)
uid = wsi.uid.replace(".tif", "")
out_file = os.path.join(
savedir, "level_{}".format(level), uid + "_TissuePatch.npy"
)
os.makedirs(os.path.dirname(out_file), exist_ok=True)
np.save(out_file, tissue_patch.otsu_thresholded)
@cli.command(
"create-annotation-masks",
context_settings=CONTEXT_SETTINGS,
help="Extract annotation masks",
)
@click.option("--indir", help="Root directory with all tumor WSIs", required=True)
@click.option("--jsondir", help="Root directory with all jsons", required=True)
@click.option(
"--level", type=int, help="Level at which to extract patches", required=True
)
@click.option(
"--savedir", help="Root directory to save extract images to", required=True
)
def extract_annotation_masks_cmd(indir, jsondir, level, savedir):
"""Extract annotation patches
We assume the masks have already been generated at level say x.
We also assume the files are arranged in the following heirerachy:
raw data (indir): tumor_wsis/tumor001.tif
json data (jsondir): tumor_jsons/tumor001.json
"""
tumor_wsis = glob.glob(os.path.join(indir, "*.tif"), recursive=False)
for tumor_wsi in tqdm(tumor_wsis):
wsi = WSIReader(tumor_wsi, 40)
uid = wsi.uid.replace(".tif", "")
json_filepath = os.path.join(jsondir, uid + ".json")
if not os.path.exists(json_filepath):
print("Skipping {} as annotation json not found".format(uid))
continue
out_dir = os.path.join(savedir, "level_{}".format(level))
wsi.annotation_masked(json_filepath=json_filepath, level=level, savedir=out_dir)
@cli.command(
"extract-tumor-patches",
context_settings=CONTEXT_SETTINGS,
help="Extract tumor patches from tumor WSIs",
)
@click.option("--indir", help="Root directory with all tumor WSIs", required=True)
@click.option(
"--annmaskdir", help="Root directory with all annotation mask WSIs", required=True
)
@click.option(
"--tismaskdir", help="Root directory with all annotation mask WSIs", required=True
)
@click.option(
"--level", type=int, help="Level at which to extract patches", required=True
)
@click.option(
"--patchsize", type=int, default=128, help="Patch size which to extract patches"
)
@click.option("--stride", type=int, default=128, help="Stride to generate next patch")
@click.option(
"--savedir", help="Root directory to save extract images to", required=True
)
@click.option(
"--threshold", help="Threshold for a cell to be called tumor", default=0, type=int
)
def extract_tumor_patches_cmd(
indir, annmaskdir, tismaskdir, level, patchsize, stride, savedir, threshold
):
"""Extract tumor only patches from tumor WSIs.
We assume the masks have already been generated at level say x.
We also assume the files are arranged in the following heirerachy:
raw data (indir): tumor_wsis/tumor001.tif
masks (maskdir): tumor_masks/level_x/tumor001_AnnotationTumorMask.npy';
tumor_masks/level_x/tumor001_AnnotationNormalMask.npy';
We create the output in a similar fashion:
output (outdir): patches/tumor/level_x/tumor001_xcenter_ycenter.png
Strategy:
1. Load tumor annotated masks
2. Load normal annotated masks
3. Do subtraction tumor-normal to ensure only tumor remains.
Truth table:
tumor_mask normal_mask tumour_for_sure
1 0 1
1 1 0
1 1 0
0 1 0
"""
tumor_wsis = glob.glob(os.path.join(indir, "*.tif"), recursive=False)
# Assume that we want to generate these patches at level 0
# So in order to ensure stride at a lower level
# this needs to be discounted
# stride = int(patchsize / (2**level))
stride = min(int(patchsize / (2 ** level)), 4)
for tumor_wsi in tqdm(tumor_wsis):
last_used_x = None
last_used_y = None
wsi = WSIReader(tumor_wsi, 40)
uid = wsi.uid.replace(".tif", "")
filepath = os.path.join(
annmaskdir, "level_{}".format(level), uid + "_AnnotationColored.npy"
)
if not os.path.exists(filepath):
print("Skipping {} as mask not found".format(uid))
continue
normal_mask = np.load(
os.path.join(
annmaskdir, "level_{}".format(level), uid + "_AnnotationNormalMask.npy"
)
)
tumor_mask = np.load(
os.path.join(
annmaskdir, "level_{}".format(level), uid + "_AnnotationTumorMask.npy"
)
)
tissue_mask = np.load(
os.path.join(tismaskdir, "level_{}".format(level), uid + "_TissuePatch.npy")
)
colored_patch = np.load(
os.path.join(
annmaskdir, "level_{}".format(level), uid + "_AnnotationColored.npy"
)
)
subtracted_mask = tumor_mask * 1 - normal_mask * 1
subtracted_mask[np.where(subtracted_mask < 0)] = 0
subtracted_mask = np.logical_and(subtracted_mask, tissue_mask)
x_ids, y_ids = np.where(subtracted_mask)
for x_center, y_center in zip(x_ids, y_ids):
out_file = "{}/level_{}/{}_{}_{}_{}.png".format(
savedir, level, uid, x_center, y_center, patchsize
)
x_topleft = int(x_center - patchsize / 2)
y_topleft = int(y_center - patchsize / 2)
x_topright = x_topleft + patchsize
y_bottomright = y_topleft + patchsize
# print((x_topleft, x_topright, y_topleft, y_bottomright))
mask = subtracted_mask[x_topleft:x_topright, y_topleft:y_bottomright]
# Feed only complete cancer cells
# Feed if more thatn 50% cells are cancerous!
if threshold <= 0:
threshold = 0.5 * (patchsize * patchsize)
if np.sum(mask) > threshold:
if last_used_x is None:
last_used_x = x_center
last_used_y = y_center
diff_x = stride
diff_y = stride
else:
diff_x = np.abs(x_center - last_used_x)
diff_y = np.abs(y_center - last_used_y)
if diff_x >= stride and diff_y >= stride:
patch = colored_patch[
x_topleft:x_topright, y_topleft:y_bottomright, :
]
os.makedirs(os.path.dirname(out_file), exist_ok=True)
img = Image.fromarray(patch)
img.save(out_file)
last_used_x = x_center
last_used_y = y_center
@cli.command(
"extract-normal-patches",
context_settings=CONTEXT_SETTINGS,
help="Extract normal patches from tumor WSIs",
)
@click.option("--indir", help="Root directory with all tumor WSIs", required=True)
@click.option(
"--annmaskdir", help="Root directory with all annotation mask WSIs", required=False
)
@click.option(
"--tismaskdir", help="Root directory with all annotation mask WSIs", required=True
)
@click.option(
"--level", type=int, help="Level at which to extract patches", required=True
)
@click.option(
"--patchsize", type=int, default=128, help="Patch size which to extract patches"
)
@click.option("--stride", type=int, default=128, help="Stride to generate next patch")
@click.option(
"--savedir", help="Root directory to save extract images to", required=True
)
def extract_normal_patches_cmd(
indir, annmaskdir, tismaskdir, level, patchsize, stride, savedir
):
"""Extract tumor only patches from tumor WSIs.
We assume the masks have already been generated at level say x.
We also assume the files are arranged in the following heirerachy:
raw data (indir): tumor_wsis/tumor001.tif
masks (maskdir): tumor_masks/level_x/tumor001_AnnotationTumorMask.npy';
tumor_masks/level_x/tumor001_AnnotationNormalMask.npy';
We create the output in a similar fashion:
output (outdir): patches/tumor/level_x/tumor001_xcenter_ycenter.png
Strategy:
1. Load tumor annotated masks
2. Load normal annotated masks
3. Do subtraction tumor-normal to ensure only tumor remains.
Truth table:
tumor_mask normal_mask tumour_for_sure
1 0 1
1 1 0
1 1 0
0 1 0
"""
all_wsis = glob.glob(os.path.join(indir, "*.tif"), recursive=True)
# Assume that we want to generate these patches at level 0
# So in order to ensure stride at a lower level
# this needs to be discounted
stride = min(int(patchsize / (2 ** level)), 4)
for wsi in tqdm(all_wsis):
last_used_x = None
last_used_y = None
wsi = WSIReader(wsi, 40)
uid = wsi.uid.replace(".tif", "")
tissue_mask = np.load(
os.path.join(tismaskdir, "level_{}".format(level), uid + "_TissuePatch.npy")
)
if "normal" in uid:
# Just extract based on tissue patches
x_ids, y_ids = np.where(tissue_mask)
subtracted_mask = tissue_mask
colored_patch = wsi.get_patch_by_level(0, 0, level)
elif "tumor" in uid or "test" in uid:
if not os.path.isfile(
os.path.join(
annmaskdir,
"level_{}".format(level),
uid + "_AnnotationNormalMask.npy",
)
):
print("Skipping {}".format(uid))
continue
normal_mask = np.load(
os.path.join(
annmaskdir,
"level_{}".format(level),
uid + "_AnnotationNormalMask.npy",
)
)
tumor_mask = np.load(
os.path.join(
annmaskdir,
"level_{}".format(level),
uid + "_AnnotationTumorMask.npy",
)
)
colored_patch = np.load(
os.path.join(
annmaskdir, "level_{}".format(level), uid + "_AnnotationColored.npy"
)
)
subtracted_mask = normal_mask * 1 - tumor_mask * 1
subtracted_mask[np.where(subtracted_mask < 0)] = 0
subtracted_mask = np.logical_and(subtracted_mask, tissue_mask)
x_ids, y_ids = np.where(subtracted_mask)
for x_center, y_center in zip(x_ids, y_ids):
out_file = "{}/level_{}/{}_{}_{}_{}.png".format(
savedir, level, uid, x_center, y_center, patchsize
)
x_topleft = int(x_center - patchsize / 2)
y_topleft = int(y_center - patchsize / 2)
x_topright = x_topleft + patchsize
y_bottomright = y_topleft + patchsize
mask = subtracted_mask[x_topleft:x_topright, y_topleft:y_bottomright]
# Feed if more thatn 50% masks are positive
if np.sum(mask) > 0.5 * (patchsize * patchsize):
if last_used_x is None:
last_used_x = x_center
last_used_y = y_center
diff_x = stride
diff_y = stride
else:
diff_x = np.abs(x_center - last_used_x)
diff_y = np.abs(y_center - last_used_y)
if diff_x >= stride and diff_y >= stride:
patch = colored_patch[
x_topleft:x_topright, y_topleft:y_bottomright, :
]
os.makedirs(os.path.dirname(out_file), exist_ok=True)
img = Image.fromarray(patch)
img.save(out_file)
last_used_x = x_center
last_used_y = y_center
@cli.command(
"patches-from-coords",
context_settings=CONTEXT_SETTINGS,
help="Extract patches from coordinates file",
)
@click.option("--indir", help="Root directory with all WSIs", required=True)
@click.option("--csv", help="Path to csv with coordinates", required=True)
@click.option(
"--level", type=int, help="Level at which to extract patches", required=True
)
@click.option(
"--patchsize", type=int, default=128, help="Patch size which to extract patches"
)
@click.option(
"--savedir", help="Root directory to save extract images to", required=True
)
def extract_patches_from_coords_cmd(indir, csv, level, patchsize, savedir):
"""Extract patches from coordinates file at a particular level.
Assumption: Coordinates are assumed to be provided at level 0.
"""
patches_to_extract = defaultdict(list)
with open(csv) as fh:
for line in fh:
try:
filename, x0, y0 = line.split(",")
except:
splitted = line.split("_")
# test files have name like test_001
if len(splitted) == 5:
fileprefix, fileid, x0, y0, _ = splitted
filename = "{}_{}".format(fileprefix, fileid)
elif len(splitted) == 4:
filename, x0, y0, _ = splitted
else:
raise RuntimeError(
"Unable to find parsable format. Mustbe filename,x0,y-"
)
# other files have name like normal001
filename = filename.lower()
x0 = int(x0)
y0 = int(y0)
patches_to_extract[filename].append((x0, y0))
for filename, coordinates in tqdm(list(patches_to_extract.items())):
if "normal" in filename:
filepath = os.path.join(indir, "normal", filename + ".tif")
elif "tumor" in filename:
filepath = os.path.join(indir, "tumor", filename + ".tif")
elif "test" in filename:
filepath = os.path.join(indir, filename + ".tif")
else:
raise RuntimeError("Malformed filename?: {}".format(filename))
wsi = WSIReader(filepath, 40)
uid = wsi.uid.replace(".tif", "")
for x0, y0 in coordinates:
patch = wsi.get_patch_by_level(x0, y0, level, patchsize)
out_file = "{}/level_{}/{}_{}_{}_{}.png".format(
savedir, level, uid, x0, y0, patchsize
)
os.makedirs(os.path.dirname(out_file), exist_ok=True)
img = Image.fromarray(patch)
img.save(out_file)
@cli.command(
"extract-test-patches",
context_settings=CONTEXT_SETTINGS,
help="Extract patches from testing dataset",
)
@click.option("--indir", help="Root directory with all WSIs", required=True)
@click.option(
"--tismaskdir", help="Root directory with all annotation mask WSIs", required=True
)
@click.option(
"--level", type=int, help="Level at which to extract patches", required=True
)
@click.option(
"--patchsize", type=int, default=128, help="Patch size which to extract patches"
)
@click.option(
"--stride",
default=64,
help="Slide windows by this much to get the next [atj]",
required=True,
)
@click.option(
"--savedir", help="Root directory to save extract images to", required=True
)
def extract_test_patches_cmd(indir, tismaskdir, level, patchsize, stride, savedir):
wsis = glob.glob(os.path.join(indir, "*.tif"), recursive=False)
for wsi in tqdm(wsis):
last_used_y = None
last_used_x = None
wsi = WSIReader(wsi, 40)
uid = wsi.uid.replace(".tif", "")
tissue_mask = np.load(
os.path.join(tismaskdir, "level_{}".format(level), uid + "_TissuePatch.npy")
)
x_ids, y_ids = np.where(tissue_mask)
for x_center, y_center in zip(x_ids, y_ids):
out_file = "{}/level_{}/{}_{}_{}_{}.png".format(
savedir, level, uid, x_center, y_center, patchsize
)
x_topleft = int(x_center - patchsize / 2)
y_topleft = int(y_center - patchsize / 2)
x_topright = x_topleft + patchsize
y_bottomright = y_topleft + patchsize
mask = tissue_mask[x_topleft:x_topright, y_topleft:y_bottomright]
if np.sum(mask) > 0.5 * (patchsize * patchsize):
if last_used_x is None:
last_used_x = x_center
last_used_y = y_center
diff_x = stride
diff_y = stride
else:
diff_x = np.abs(x_center - last_used_x)
diff_y = np.abs(y_center - last_used_y)
if diff_x >= stride or diff_y >= stride:
colored_patch = wsi.get_patch_by_level(0, 0, level)
patch = colored_patch[
x_topleft:x_topright, y_topleft:y_bottomright, :
]
os.makedirs(os.path.dirname(out_file), exist_ok=True)
img = Image.fromarray(patch)
img.save(out_file)
last_used_x = x_center
last_used_y = y_center
@cli.command(
"estimate-patches",
context_settings=CONTEXT_SETTINGS,
help="Estimate number of extractable tumor patches from tumor WSIs",
)
@click.option("--indir", help="Root directory with all tumor WSIs", required=True)
@click.option("--jsondir", help="Root directory with all jsons", required=True)
@click.option(
"--level", type=int, help="Level at which to extract patches", required=True
)
@click.option(
"--patchsize", type=int, default=128, help="Patch size which to extract patches"
)
@click.option("--stride", type=int, default=128, help="Stride to generate next patch")
@click.option(
"--savedir", help="Root directory to save extract images to", required=True
)
def estimate_patches_cmd(indir, jsondir, level, patchsize, stride, savedir):
all_wsis = glob.glob(os.path.join(indir, "*.tif"), recursive=False)
out_dir = os.path.join(savedir, "level_{}".format(level))
os.makedirs(out_dir, exist_ok=True)
for wsi in tqdm(all_wsis):
wsi = WSIReader(wsi, 40)
uid = wsi.uid.replace(".tif", "")
json_filepath = os.path.join(jsondir, uid + ".json")
if not os.path.exists(json_filepath):
print("Skipping {} as annotation json not found".format(uid))
continue
bounding_boxes = get_annotation_bounding_boxes(json_filepath)
polygons = get_annotation_polygons(json_filepath)
tumor_bb = bounding_boxes["tumor"]
normal_bb = bounding_boxes["normal"]
normal_polygons = polygons["normal"]
tumor_polygons = polygons["tumor"]
polygons_dict = {"normal": normal_polygons, "tumor": tumor_polygons}
rectangles_dict = {"normal": normal_bb, "tumor": tumor_bb}
for polygon_key, polygons in iteritems(polygons_dict):
bb = rectangles_dict[polygon_key]
to_write = ""
with open(os.path.join(savedir, "{}.txt", "w")) as fh:
for rectangle, polygon in zip(bb, polygons):
"""
Sample points from rectangle. We will assume we are sampling the
centers of our patch. So if we sample x_center, y_center
from this rectangle, we need to ensure (x_center +/- patchsize/2, y_center +- patchsize/2)
lie inside the polygon
"""
xmin, ymax = rectangle["top_left"]
xmax, ymin = rectangle["bottom_right"]
path = polygon.get_path()
for x_center in np.arange(xmin, xmax, patchsize):
for y_center in np.arange(ymin, ymax, patchsize):
x_topleft = int(x_center - patchsize / 2)
y_topleft = int(y_center - patchsize / 2)
x_bottomright = x_topleft + patchsize
y_bottomright = y_topleft + patchsize
if path.contains_points(
[(x_topleft, y_topleft), (x_bottomright, y_bottomright)]
).all():
to_write = "{}_{}_{}_{}\n".format(
uid, x_center, y_center, patchsize
)
fh.write(to_write)
def process_wsi(data):
wsi, jsondir, patchsize, stride, level, dirs, write_image = data
wsi = WSIReader(wsi, 40)
uid = wsi.uid.replace(".tif", "")
scale_factor = wsi.get_level_scale_factor(level)
json_filepath = os.path.join(jsondir, uid + ".json")
if not os.path.isfile(json_filepath):
return
boxes = get_annotation_bounding_boxes(json_filepath)
polygons = get_annotation_polygons(json_filepath)
polygons_to_exclude = {"tumor": [], "normal": []}
for polygon in polygons["tumor"]:
# Does this have any of the normal polygons inside it?
polygons_to_exclude["tumor"].append(
get_common_interior_polygons(polygon, polygons["normal"])
)
for polygon in polygons["normal"]:
# Does this have any of the tumor polygons inside it?
polygons_to_exclude["normal"].append(
get_common_interior_polygons(polygon, polygons["tumor"])
)
for polygon_key in list(polygons.keys()):
last_used_x = None
last_used_y = None
annotated_polygons = polygons[polygon_key]
annotated_boxes = boxes[polygon_key]
# iterate through coordinates in the bounding rectangle
# tand check if they overlap with any other annoations and
# if not fetch a patch at that coordinate from the wsi
annotation_index = 0
for annotated_polygon, annotated_box in zip(
annotated_polygons, annotated_boxes
):
annotation_index += 1
minx, miny = annotated_box["top_left"]
maxx, miny = annotated_box["top_right"]
maxx, maxy = annotated_box["bottom_right"]
minx, maxy = annotated_box["bottom_left"]
width = int(maxx) - int(minx)
height = int(maxy) - int(miny)
# (minx, miny), width, height = annotated_box['top_left'], annotated_box['top'].get_xy()
# Should scale?
# No. Do not scale here as the patch is always
# fetched from things at level0
minx = int(minx) # * scale_factor)
miny = int(miny) # * scale_factor)
maxx = int(maxx) # * scale_factor)
maxy = int(maxy) # * scale_factor)
width = int(width * scale_factor)
height = int(height * scale_factor)
annotated_polygon = np.array(annotated_polygon.get_xy())
annotated_polygon = annotated_polygon * scale_factor
# buffer ensures the resulting polygon is clean
# http://toblerity.org/shapely/manual.html#object.buffer
try:
annotated_polygon_scaled = shapelyPolygon(
np.round(annotated_polygon).astype(int)
).buffer(0)
except:
warnings.warn(
"Skipping creating annotation index {} for {}".format(
annotation_index, uid
)
)
continue
assert (
annotated_polygon_scaled.is_valid
), "Found invalid annotated polygon: {} {}".format(
uid, shapelyPolygon(annotated_polygon).is_valid
)
for x_left in np.arange(minx, maxx, 1):
for y_top in np.arange(miny, maxy, 1):
x_right = x_left + patchsize
y_bottom = y_top + patchsize
if last_used_x is None:
last_used_x = x_left
last_used_y = y_top
diff_x = stride
diff_y = stride
else:
diff_x = np.abs(x_left - last_used_x)
diff_y = np.abs(y_top - last_used_y)
# print(last_used_x, last_used_y, x_left, y_top, diff_x, diff_y)
if diff_x <= stride or diff_y <= stride:
continue
else:
last_used_x = x_left
last_used_y = y_top
patch_polygon = shapelyPolygon(
[
(x_left, y_top),
(x_right, y_top),
(x_right, y_bottom),
(x_left, y_bottom),
]
).buffer(0)
assert (
patch_polygon.is_valid
), "Found invalid polygon: {}_{}_{}".format(uid, x_left, y_top)
try:
is_inside = annotated_polygon_scaled.contains(patch_polygon)
except:
# Might raise an exception when the two polygons
# are the same
warnings.warn(
"Skipping: {}_{}_{}_{}.png | Equals: {} | Almost equals: {}".format(
uid, x_left, y_top, patchsize
),
annotated_polygon_scaled.equals(patch_polygon),
annotated_polygon_scaled.almost_equals(patch_polygon),
)
continue
if write_image:
out_file = os.path.join(
dirs[polygon_key],
"{}_{}_{}_{}.png".format(uid, x_left, y_top, patchsize),
)
patch = wsi.get_patch_by_level(x_left, y_top, level, patchsize)
os.makedirs(os.path.dirname(out_file), exist_ok=True)
img = Image.fromarray(patch)
img.save(out_file)
else:
# Just write the coordinates
to_write = "{}_{}_{}_{}\n".format(uid, x_left, y_top, patchsize)
out_file = os.path.join(
dirs[polygon_key], "{}.txt".format(polygon_key)
)
with open(out_file, "a") as fh:
fh.write(to_write)
@cli.command(
"extract-test-both-patches",
context_settings=CONTEXT_SETTINGS,
help="Extract both normal and tumor patches from tissue masks",
)
@click.option("--indir", help="Root directory with all test WSIs", required=True)
@click.option(
"--patchsize", type=int, default=128, help="Patch size which to extract patches"
)
@click.option("--stride", type=int, default=128, help="Stride to generate next patch")
@click.option("--jsondir", help="Root directory with all jsons", required=True)
@click.option(
"--level", type=int, help="Level at which to extract patches", required=True
)
@click.option(
"--savedir", help="Root directory to save extract images to", required=True
)
@click.option("--write_image", help="Should output images", is_flag=True)
def extract_test_both_cmd(
indir, patchsize, stride, jsondir, level, savedir, write_image
):
"""Extract tissue only patches from tumor WSIs.
"""
wsis = glob.glob(os.path.join(indir, "*.tif"), recursive=False)
out_dir = os.path.join(savedir, "level_{}".format(level))
normal_dir = os.path.join(out_dir, "normal")
tumor_dir = os.path.join(out_dir, "tumor")
os.makedirs(out_dir, exist_ok=True)
os.makedirs(normal_dir, exist_ok=True)
os.makedirs(tumor_dir, exist_ok=True)
dirs = {"normal": normal_dir, "tumor": tumor_dir}
total_wsi = len(wsis)
data = [(wsi, jsondir, patchsize, stride, level, dirs, write_image) for wsi in wsis]
with tqdm(total=total_wsi) as pbar:
with Pool(processes=16) as p:
for i, _ in enumerate(p.imap_unordered(process_wsi, data)):
# print(i / total_wsi * 100)
pbar.update()
# for i, wsi in tqdm(enumerate(list(wsis))):
# process_wsi(wsi)
# pbar.update()
def process_segmentation(data):
"""
Parameters
----------
data: tuple
(png_location, tsv_outpath)
"""
png, saveto = data
patch = read_as_rgb(png)
region_properties, _ = label_nuclei(patch, draw=False)
summary = summarize_region_properties(region_properties, patch)
df = pd.DataFrame([summary])
df.to_csv(saveto, index=False, header=True, sep="\t")
@cli.command(
"segment",
context_settings=CONTEXT_SETTINGS,
help="Performs segmentation and extract-features",
)
@click.option("--indir", help="Root directory with all pngs", required=True)
@click.option("--outdir", help="Output directory to out tsv", required=True)
def segementation_cmd(indir, outdir):
"""Perform segmentation and store the tsvs
"""
list_of_pngs = list(glob.glob(indir + "/*.png"))
data = []
for f in list_of_pngs:
tsv = f.replace(os.path.dirname(f), outdir).replace(".png", ".tsv")
if not os.path.isfile(tsv):
data.append((f, tsv))
elif os.stat(tsv).st_size == 0:
data.appen((f, tsv))
os.makedirs(outdir, exist_ok=True)
with tqdm(total=len(data)) as pbar:
with Pool(processes=16) as p:
for i, _ in enumerate(p.imap_unordered(process_segmentation, data)):
pbar.update()
def _process_patches_df(data):
slide_path, json_filepath, patch_size, saveto = data
df = get_all_patches_from_slide(
slide_path,
json_filepath=json_filepath,
filter_non_tissue=True,
patch_size=patch_size,
saveto=saveto,
)
return df
@cli.command(
"patches-df",
context_settings=CONTEXT_SETTINGS,
help="Extract all patches summarized as dataframes",
)
@click.option("--indir", help="Root directory with all tumor WSIs", required=True)
@click.option("--jsondir", help="Root directory with all jsons")
@click.option(
"--patchsize", type=int, default=256, help="Patch size which to extract patches"
)
@click.option(
"--savedir", help="Root directory to save extract images to", required=True
)
def extract_mask_df_cmd(indir, jsondir, patchsize, savedir):
"""Extract tissue only patches from tumor WSIs.
"""
wsis = glob.glob(os.path.join(indir, "*.tif"), recursive=False)
data = []
df = pd.DataFrame()
for wsi in wsis:
basename = path_leaf(wsi).replace(".tif", "")
if jsondir:
json_filepath = os.path.join(jsondir, basename + ".json")
else:
json_filepath = None
if not os.path.isfile(json_filepath):
json_filepath = None
saveto = os.path.join(savedir, basename + ".tsv")
data.append((wsi, json_filepath, patchsize, saveto))
os.makedirs(savedir, exist_ok=True)
with tqdm(total=len(wsis)) as pbar:
with Pool(processes=16) as p:
for i, temp_df in enumerate(p.imap_unordered(_process_patches_df, data)):
df = pd.concat([df, temp_df])
pbar.update()
if "is_tumor" in df.columns:
df = df.sort_values(by=["uid", "is_tumor"])
else:
df["is_tumor"] = False
df = df.sort_values(by=["uid"])
df.to_csv(
os.path.join(savedir, "master_df.tsv"), sep="\t", index=False, header=True
)
@cli.command(
"tif-to-df",
context_settings=CONTEXT_SETTINGS,
help="Extract all patches summarized as dataframes from one WSI",
)
@click.option("--tif", help="Tif", required=True)
@click.option("--jsondir", help="Root directory with all jsons")
@click.option(
"--patchsize", type=int, default=256, help="Patch size which to extract patches"
)
@click.option(
"--savedir", help="Root directory to save extract images to", required=True
)
def extract_df_from_tif_cmd(tif, jsondir, patchsize, savedir):
"""Extract tissue only patches from tumor WSIs.
"""
basename = path_leaf(tif).replace(".tif", "")
if jsondir:
json_filepath = os.path.abspath(os.path.join(jsondir, basename + ".json"))
else:
json_filepath = None
if not os.path.isfile(json_filepath):
json_filepath = None
saveto = os.path.join(savedir, basename + ".tsv")
df = get_all_patches_from_slide(
tif,
json_filepath=json_filepath,
filter_non_tissue=False,
patch_size=patchsize,
saveto=saveto,
)
@cli.command(
"patch-and-mask",
context_settings=CONTEXT_SETTINGS,
help="Extract all patches and their mask from patches dataframes",
)
@click.option("--df", help="Path to dataframe", required=True)
@click.option(
"--patchsize", type=int, default=256, help="Patch size which to extract patches"
)
@click.option(
"--savedir", help="Root directory to save extract images to", required=True
)
@click.option("--savedf", help="Save edited dataframe to", required=True)
def extract_patch_mask_cmd(df, patchsize, savedir, savedf):
"""Extract tissue only patches from tumor WSIs.
"""
assert not os.path.isfile(savedf)
df = pd.read_table(df)
df_copy = df.copy()
df_copy["img_path"] = None
df_copy["mask_path"] = None
df["savedir"] = savedir
df["patch_size"] = patchsize
records = df.reset_index().to_dict("records")
aprun = ParallelExecutor(n_jobs=100)
total = len(records)
returned_data = aprun(total=total)(
delayed(save_images_and_mask)(f) for f in records
)
"""
with tqdm(total=len(df.index)) as pbar:
with Pool(processes=8) as p:
for idx, img_path, mask_path in p.imap_unordered(
save_images_and_mask, records):
df_copy.loc[idx, 'img_path'] = img_path
df_copy.loc[idx, 'mask_path'] = mask_path
pbar.update()
"""
# df_copy.to_csv(savedf, sep='\t', index=False, header=True)
def process_segmentation_both(data):
"""
Parameters
----------
data: tuple
(png_location, tsv_outpath)
"""
is_tissue, is_tumor, pickle_file, savetopng, savetodf = data
if not is_tissue:
df = pd.DataFrame()
df["is_tumor"] = is_tumor
df["is_tissue"] = is_tissue
return df
patch = joblib.load(pickle_file)
region_properties, _ = label_nuclei(patch, draw=False) # savetopng=savetopng)
summary = summarize_region_properties(region_properties, patch)
df = pd.DataFrame([summary])
df["is_tumor"] = is_tumor
df.to_csv(savetodf, index=False, header=True, sep="\t")
return df
@cli.command(
"segment-from-df", context_settings=CONTEXT_SETTINGS, help="Segment from df"
)
@click.option("--df", help="Path to dataframe", required=True)
@click.option("--finaldf", help="Path to dataframe", required=True)
@click.option(
"--savedir", help="Root directory to save extract images to", required=True
)
def process_df_cmd(df, finaldf, savedir):
df = pd.read_table(df)
df["img_path"] = None
df["mask_path"] = None
modified_df = pd.DataFrame()
os.makedirs(savedir, exist_ok=True)
tile_loc = df.tile_loc.astype(str)
tile_loc = tile_loc.str.replace(" ", "").str.replace(")", "").str.replace("(", "")
df[["row", "col"]] = tile_loc.str.split(",", expand=True)
df["segmented_png"] = (
savedir
+ "/"
+ df[["uid", "row", "col"]].apply(lambda x: "_".join(x.values.tolist()), axis=1)
+ ".segmented.png"
)
df["segmented_tsv"] = (
savedir
+ "/"
+ df[["uid", "row", "col"]].apply(lambda x: "_".join(x.values.tolist()), axis=1)
+ ".segmented.tsv"
)
with tqdm(total=len(df.index)) as pbar:
with Pool(processes=8) as p:
for i, temp_df in enumerate(
p.imap_unordered(
process_segmentation_both,
df[
[
"is_tissue",
"is_tumor",
"img_path",
"segmented_png",
"segmented_tsv",
]
].values.tolist(),
)
):
modified_df = pd.concat([modified_df, temp_df])
pbar.update()
modified_df.to_csv(finaldf, sep="\t", index=False, header=True)
def process_segmentation_fixed(batch_sample):
patch_size = batch_sample["patch_size"]
savedir = os.path.abspath(batch_sample["savedir"])
tile_loc = batch_sample["tile_loc"] # [::-1]
segmentedmethod = batch_sample["segmented_method"]
if isinstance(tile_loc, six.string_types):
tile_row, tile_col = eval(tile_loc)
else:
tile_row, tile_col = tile_loc
segmented_img_path = os.path.join(
savedir, batch_sample["uid"] + "_{}_{}.segmented.png".format(tile_row, tile_col)
)
segmented_tsv_path = os.path.join(
savedir,
batch_sample["uid"] + "_{}_{}.segmented_summary.tsv".format(tile_row, tile_col),
)
if os.path.isfile(segmented_img_path) and os.path.isfile(segmented_tsv_path):
df = pd.read_table(segmented_tsv_path)
return batch_sample["index"], segmented_img_path, segmented_tsv_path, df
# the get_tile tuple required is (col, row)
if not os.path.isfile(batch_sample["img_path"]):
save_images_and_mask(batch_sample)
patch = joblib.load(batch_sample["img_path"])
region_properties, _ = label_nuclei(
patch, draw=False, normalization=segmentedmethod
) # , savetopng=segmented_img_path)
summary = summarize_region_properties(region_properties, patch)
df = pd.DataFrame([summary])
try:
df["is_tumor"] = batch_sample["is_tumor"]
except KeyError:
# Must be from a normal sample
df["is_tumor"] = False
df["is_tissue"] = batch_sample["is_tissue"]
df.to_csv(segmented_tsv_path, index=False, header=True, sep="\t")
return batch_sample["index"], segmented_img_path, segmented_tsv_path, df
@cli.command(
"segment-from-df-fast", context_settings=CONTEXT_SETTINGS, help="Segment from df"
)
@click.option("--df", help="Path to dataframe", required=True)
@click.option("--finaldf", help="Path to dataframe", required=True)
@click.option(
"--segmethod",
help="Path to dataframe",
default=None,
type=click.Choice(["None", "vahadane", "macenko", "xu"]),
)
@click.option(
"--savedir", help="Root directory to save extract images to", required=True
)
@click.option("--ncpu", type=int, default=1, help="Patch size which to extract patches")
@click.option(
"--patchsize", type=int, default=256, help="Patch size which to extract patches"
)
def process_df_cmd_fast(df, finaldf, segmethod, savedir, ncpu, patchsize):
savedir = os.path.abspath(savedir)
df_main = pd.read_table(df)
df = df_main.copy()
df["savedir"] = savedir
df["patch_size"] = patchsize
df["segmented_png"] = None
df["segmented_tsv"] = None
df["segmented_method"] = segmethod
modified_df = pd.DataFrame()
os.makedirs(savedir, exist_ok=True)
df_reset_index = df.reset_index()
df_subset = df_reset_index[df_reset_index.is_tissue == True]
records = df_subset.to_dict("records")
with tqdm(total=len(df_subset.index)) as pbar:
if ncpu > 1:
with Pool(processes=ncpu) as p:
for idx, segmented_png, segmented_tsv, summary_df in p.imap_unordered(
process_segmentation_fixed, records
):
df.loc[idx, "segmented_png"] = segmented_png
df.loc[idx, "segmented_tsv"] = segmented_tsv
modified_df = pd.concat([modified_df, summary_df])
modified_df["index"] = idx
pbar.update()
else:
for idx, row in df.iterrows():
row["index"] = idx
_, segmented_png, segmented_tsv, summary_df = process_segmentation_fixed(
row
)
df.loc[idx, "segmented_png"] = segmented_png
df.loc[idx, "segmented_tsv"] = segmented_tsv
modified_df = pd.concat([modified_df, summary_df])
modified_df["index"] = idx
pbar.update()
modified_df = modified_df.set_index("index")
modified_df.to_csv(finaldf, sep="\t", index=False, header=True)
df.to_csv(
finaldf.replace(".tsv", "") + ".segmented.tsv",
sep="\t",
index=False,
header=True,
)
def predict_batch_from_model(patches, model):
"""Predict which pixels are tumor.
Parameters
----------
patch: batch_sizex256x256x3, rgb image
model: keras model
Returns
--------
output: prediction: 256x256x1, per-pixel tumor probability
"""
predictions = model.predict(patches)
predictions = predictions[:, :, :, 1]
return predictions
def predict_from_model(patch, model):
"""Predict which pixels are tumor.
"""
prediction = model.predict(patch.reshape(1, 256, 256, 3))
prediction = prediction[:, :, :, 1].reshape(256, 256)
return prediction
@cli.command(
"heatmap",
context_settings=CONTEXT_SETTINGS,
help="Create tumor probability heatmap",
)
@click.option("--indir", help="Root directory with all WSIs", required=True)
@click.option("--jsondir", help="Root directory with all jsons", required=False)
@click.option(
"--imgmaskdir",
help="Directory where the patches and mask are stored",
default="/Z/personal-folders/interns/saket/github/pyvirchow/data/patch_img_and_mask/",
)
@click.option(
"--modelf",
help="Root directory with all jsons",
default="/Z/personal-folders/interns/saket/github/pyvirchow/notebooks/weights-improvement-12-0.98.hdf",
)
@click.option(
"--patchsize", type=int, default=256, help="Patch size which to extract patches"
)
@click.option(
"--savedir", help="Root directory to save extract images to", required=True
)
@click.option("--gpu", type=str, default="0", help="Which GPU to use?")
@click.option(
"--gpumemfrac", type=float, default=1, help="Fraction of GPU memory to use"
)
def create_tumor_map_cmd(
indir, jsondir, imgmaskdir, modelf, patchsize, savedir, gpu, gpumemfrac
):
"""Extract probability maps for a WSI
"""
batch_size = 32
img_mask_dir = imgmaskdir
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = gpumemfrac
config.gpu_options.visible_device_list = gpu
set_session(tf.Session(config=config))
from keras.models import load_model
if not os.path.isfile(indir):
wsis = glob.glob(os.path.join(indir, "*.tif"), recursive=False)
else:
wsis = [indir]
os.makedirs(savedir, exist_ok=True)
model = load_model(modelf)
for wsi in tqdm(sorted(wsis)):
basename = path_leaf(wsi).replace(".tif", "")
# if basename!= 'tumor_110':
# continue
if jsondir:
json_filepath = os.path.join(jsondir, basename + ".json")
if not os.path.isfile(json_filepath):
json_filepath = None
else:
json_filepath = None
saveto = os.path.join(savedir, basename + ".joblib.pickle")
saveto_original = os.path.join(savedir, basename + ".original.joblib.pickle")
if os.path.isfile(saveto):
# all done so continue
continue
all_samples = get_all_patches_from_slide(wsi, json_filepath, False, patchsize)
if "img_path" not in all_samples.columns:
assert (
imgmaskdir is not None
), "Need to provide directory if img_path column is missing"
tile_loc = all_samples.tile_loc.astype(str)
tile_loc = (
tile_loc.str.replace(" ", "").str.replace(")", "").str.replace("(", "")
)
all_samples[["row", "col"]] = tile_loc.str.split(",", expand=True)
all_samples["img_path"] = (
img_mask_dir
+ "/"
+ all_samples[["uid", "row", "col"]].apply(
lambda x: "_".join(x.values.tolist()), axis=1
)
+ ".img.joblib.pickle"
)
all_samples["mask_path"] = (
img_mask_dir
+ "/"
+ all_samples[["uid", "row", "col"]].apply(
lambda x: "_".join(x.values.tolist()), axis=1
)
+ ".mask.joblib.pickle"
)
if not os.path.isfile("/tmp/white.img.pickle"):
white_img = np.ones([patchsize, patchsize, 3], dtype=np.uint8) * 255
joblib.dump(white_img, "/tmp/white.img.pickle")
# Definitely not a tumor and hence all black
if not os.path.isfile("/tmp/white.mask.pickle"):
white_img_mask = np.ones([patchsize, patchsize], dtype=np.uint8) * 0
joblib.dump(white_img_mask, "/tmp/white.mask.pickle")
all_samples.loc[
all_samples.is_tissue == False, "img_path"
] = "/tmp/white.img.pickle"
all_samples.loc[
all_samples.is_tissue == False, "mask_path"
] = "/tmp/white.mask.pickle"
for idx, row in all_samples.iterrows():
f = row["img_path"]
if not os.path.isfile(f):
row["savedir"] = imgmaskdir
row["patch_size"] = patchsize
row["index"] = idx
save_images_and_mask(row)
print(all_samples.head())
slide = WSIReader(wsi, 40)
n_cols = int(slide.dimensions[0] / patchsize)
n_rows = int(slide.dimensions[1] / patchsize)
n_samples = len(all_samples.index)
assert n_rows * n_cols == len(all_samples.index), "Some division error;"
print("Total: {}".format(len(all_samples.index)))
predicted_thumbnails = list()
for offset in tqdm(list(range(0, n_samples, batch_size))):
batch_samples = all_samples.iloc[offset : offset + batch_size]
X, _ = next(generate_tiles_fast(batch_samples, batch_size, shuffle=False))
if (
batch_samples.is_tissue.nunique() == 1
and batch_samples.iloc[0].is_tissue == False
):
# all patches in this row do not have tissue, skip them all
predicted_thumbnails.append(np.zeros(batch_size, dtype=np.float32))
else:
# make predictions
preds = predict_batch_from_model(X, model)
predicted_thumbnails.append(preds.mean(axis=(1, 2)))
predicted_thumbnails = np.asarray(predicted_thumbnails)
try:
output_thumbnail_preds = predicted_thumbnails.reshape(n_rows, n_cols)
joblib.dump(output_thumbnail_preds, saveto)
except:
# Going to reshape
flattened = predicted_thumbnails.ravel()[: n_rows * n_cols]
print(
"n_row = {} | n_col = {} | n_rowxn_col = {} | orig_shape = {} | flattened: {}".format(
n_rows,
n_cols,
n_rows * n_cols,
np.prod(predicted_thumbnails.shape),
flattened.shape,
)
)
output_thumbnail_preds = flattened.reshape(n_rows, n_cols)
joblib.dump(output_thumbnail_preds, saveto)
slide.close()
continue
slide.close()
def generate_rows(samples, num_samples, batch_size=1):
while True: # Loop forever so the generator never terminates
for offset in range(0, num_samples, batch_size):
batch_samples = samples.iloc[offset : offset + batch_size]
# is_tissue = batch_samples.is_tissue.tolist()
# is_tumor = batch_samples.is_tumor.astype('int32').tolist()
features = []
labels = []
# batch_samples = batch_samples.copy().drop(columns=['is_tissue', 'is_tumor'])
for _, batch_sample in batch_samples.iterrows():
row = batch_sample.values
try:
label = int(batch_sample.is_tumor)
except AttributeError:
# Should be normal
label = 0
if batch_sample.is_tissue:
feature = pd.read_table(
os.path.join(
"/Z/personal-folders/interns/saket/github/pyvirchow",
batch_sample.segmented_tsv,
)
)
# feature = feature.drop(columns=['is_tumor', 'is_tissue'])
try:
feature = feature.loc[:, COLUMNS]
values = feature.loc[0].values
assert len(feature.columns) == 46
except KeyError:
# the segmentation returned empty columns!?
print(batch_sample.segmented_tsv)
# print(feature.columns)
# raise RuntimeError('Cannot parse the columns')
values = [0.0] * 46
features.append(values)
else:
values = [0.0] * 46
features.append(values)
labels.append(label)
X_train = np.array(features, dtype=np.float32)
y_train = np.array(labels)
yield X_train, y_train
@cli.command(
"heatmap-rf",
context_settings=CONTEXT_SETTINGS,
help="Create tumor probability heatmap using random forest model",
)
@click.option("--tif", help="Tif", required=True)
@click.option("--df", help="Root directory with all WSIs", required=True)
@click.option(
"--modelf",
help="Root directory with all jsons",
default="/Z/personal-folders/interns/saket/github/pyvirchow/models/random_forest_all_train.tf.model.meta",
)
@click.option(
"--patchsize", type=int, default=256, help="Patch size which to extract patches"
)
@click.option(
"--savedir", help="Root directory to save extract images to", required=True
)
def create_tumor_map_rf_cmd(tif, df, modelf, patchsize, savedir):
"""Extract probability maps for a WSI
"""
batch_size = 1
all_samples = | pd.read_table(df) | pandas.read_table |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/1/26 13:10
Desc: 申万指数-申万一级、二级和三级
http://www.swsindex.com/IdxMain.aspx
https://legulegu.com/stockdata/index-composition?industryCode=851921.SI
"""
import time
import json
import pandas as pd
from akshare.utils import demjson
import requests
from bs4 import BeautifulSoup
from akshare.index.cons import sw_headers, sw_payload, sw_url
def sw_index_representation_spot() -> pd.DataFrame:
"""
申万-市场表征实时行情数据
http://www.swsindex.com/idx0120.aspx?columnid=8831
:return: 市场表征实时行情数据
:rtype: pandas.DataFrame
"""
url = "http://www.swsindex.com/handler.aspx"
params = {
"tablename": "swzs",
"key": "L1",
"p": "1",
"where": "L1 in('801001','801002','801003','801005','801300','801901','801903','801905','801250','801260','801270','801280','802613')",
"orderby": "",
"fieldlist": "L1,L2,L3,L4,L5,L6,L7,L8,L11",
"pagecount": "9",
"timed": "1632300641756",
}
r = requests.get(url, params=params)
data_json = demjson.decode(r.text)
temp_df = pd.DataFrame(data_json["root"])
temp_df.columns = ["指数代码", "指数名称", "昨收盘", "今开盘", "成交额", "最高价", "最低价", "最新价", "成交量"]
temp_df["昨收盘"] = pd.to_numeric(temp_df["昨收盘"])
temp_df["今开盘"] = pd.to_numeric(temp_df["今开盘"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最高价"] = pd.to_numeric(temp_df["最高价"])
temp_df["最低价"] = pd.to_numeric(temp_df["最低价"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
return temp_df
def sw_index_spot() -> pd.DataFrame:
"""
申万一级行业-实时行情数据
http://www.swsindex.com/idx0120.aspx?columnid=8832
:return: 申万一级行业实时行情数据
:rtype: pandas.DataFrame
"""
url = "http://www.swsindex.com/handler.aspx"
result = []
for i in range(1, 3):
payload = sw_payload.copy()
payload.update({"p": i})
payload.update({"timed": int(time.time() * 1000)})
r = requests.post(url, headers=sw_headers, data=payload)
data = r.content.decode()
data = data.replace("'", '"')
data = json.loads(data)
result.extend(data["root"])
temp_df = pd.DataFrame(result)
temp_df["L2"] = temp_df["L2"].str.strip()
temp_df.columns = ["指数代码", "指数名称", "昨收盘", "今开盘", "成交额", "最高价", "最低价", "最新价", "成交量"]
temp_df["昨收盘"] = pd.to_numeric(temp_df["昨收盘"])
temp_df["今开盘"] = pd.to_numeric(temp_df["今开盘"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最高价"] = pd.to_numeric(temp_df["最高价"])
temp_df["最低价"] = pd.to_numeric(temp_df["最低价"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
return temp_df
def sw_index_second_spot() -> pd.DataFrame:
"""
申万二级行业-实时行情数据
http://www.swsindex.com/idx0120.aspx?columnId=8833
:return: 申万二级行业-实时行情数据
:rtype: pandas.DataFrame
"""
result = []
for i in range(1, 6):
payload = {
"tablename": "swzs",
"key": "L1",
"p": "1",
"where": "L1 in('801011','801012','801013','801014','801015','801016','801021','801022','801023','801032','801033','801034','801035','801036','801037','801041','801051','801072','801073','801074','801075','801081','801082','801083','801084','801092','801093','801094','801101','801102','801111','801112','801123','801131','801132','801141','801142','801143','801151','801152','801153','801154','801155','801156','801161','801162','801163','801164','801171','801172','801173','801174','801175','801176','801177','801178','801181','801182','801191','801192','801193','801194','801202','801211','801212','801213','801214','801222','801223','801053','801054','801055','801076','801203','801204','801205','801711','801712','801713','801721','801722','801723','801724','801725','801731','801732','801733','801734','801741','801742','801743','801744','801751','801752','801761','801881','801017','801018')",
"orderby": "",
"fieldlist": "L1,L2,L3,L4,L5,L6,L7,L8,L11",
"pagecount": "98",
"timed": "",
}
payload.update({"p": i})
payload.update({"timed": int(time.time() * 1000)})
r = requests.post(sw_url, headers=sw_headers, data=payload)
data = r.content.decode()
data = data.replace("'", '"')
data = json.loads(data)
result.extend(data["root"])
temp_df = pd.DataFrame(result)
temp_df["L2"] = temp_df["L2"].str.strip()
temp_df.columns = ["指数代码", "指数名称", "昨收盘", "今开盘", "成交额", "最高价", "最低价", "最新价", "成交量"]
temp_df["昨收盘"] = pd.to_numeric(temp_df["昨收盘"])
temp_df["今开盘"] = pd.to_numeric(temp_df["今开盘"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最高价"] = pd.to_numeric(temp_df["最高价"])
temp_df["最低价"] = pd.to_ | numeric(temp_df["最低价"]) | pandas.to_numeric |
import sys
from calendar import month_name
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
from sklearn.model_selection import TimeSeriesSplit, StratifiedKFold, GroupKFold, KFold
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from dateutil.relativedelta import relativedelta
from collections import Counter, defaultdict
from utils import *
def stratified_group_k_fold(X, y, groups, k, seed=2021):
labels_num = np.max(y) + 1
y_counts_per_group = defaultdict(lambda: np.zeros(labels_num))
y_distr = Counter()
for label, g in zip(y, groups):
y_counts_per_group[g][label] += 1
y_distr[label] += 1
y_counts_per_fold = defaultdict(lambda: np.zeros(labels_num))
groups_per_fold = defaultdict(set)
def eval_y_counts_per_fold(y_counts, fold):
y_counts_per_fold[fold] += y_counts
std_per_label = []
for label in range(labels_num):
label_std = np.std([y_counts_per_fold[i][label] / y_distr[label] for i in range(k)])
std_per_label.append(label_std)
y_counts_per_fold[fold] -= y_counts
return np.mean(std_per_label)
groups_and_y_counts = list(y_counts_per_group.items())
random.Random(seed).shuffle(groups_and_y_counts)
for g, y_counts in sorted(groups_and_y_counts, key=lambda x: -np.std(x[1])):
best_fold = None
min_eval = None
for i in range(k):
fold_eval = eval_y_counts_per_fold(y_counts, i)
if min_eval is None or fold_eval < min_eval:
min_eval = fold_eval
best_fold = i
y_counts_per_fold[best_fold] += y_counts
groups_per_fold[best_fold].add(g)
all_groups = set(groups)
for i in range(k):
train_groups = all_groups - groups_per_fold[i]
test_groups = groups_per_fold[i]
train_indices = [i for i, g in enumerate(groups) if g in train_groups]
test_indices = [i for i, g in enumerate(groups) if g in test_groups]
yield train_indices, test_indices
class StratifiedKFoldWithGroupID():
def __init__(self, group_id_col, stratified_target_id, n_splits, random_state=2021, *args, **kwargs):
super().__init__(*args, **kwargs)
self.group_id_col = group_id_col
self.stratified_target_id = stratified_target_id
self.n_splits = n_splits
self.random_state = random_state
def split(self, _df_X, _se_y, _group, *args, **kwargs):
df = _df_X[[self.group_id_col]]
if isinstance(_se_y, pd.DataFrame):
df["group_target"] = _se_y[self.stratified_target_id]
else:
df["group_target"] = _se_y
df["group_target"] = procLabelEncToSeries(df["group_target"])
df = df.reset_index()
for train_idx, test_idx in stratified_group_k_fold(X=df, y=df["group_target"].values, groups=df[self.group_id_col].values, k=self.n_splits, seed=self.random_state):
print(f"fold train :{df.iloc[train_idx]['group_target'].value_counts()}")
print(f"fold valid :{df.iloc[test_idx]['group_target'].value_counts()}")
#pdb.set_trace()
yield train_idx, test_idx
class ssl1Fold():
def __init__(self):
self.n_splits = 1#n_splits
def split(self, _df_X, _df_y, _group, *args, **kwargs):
df = _df_X.reset_index()
for i in range(1):
train_idx = df.index
test_idx = df.loc[df["train_flag"]==1].index
yield train_idx, test_idx
class DummyKfold():
def __init__(self, n_splits, random_state):
self.n_splits = n_splits
self.random_state = random_state
def split(self, _df_X, _df_y, _group, *args, **kwargs):
# num_data = len(_df_X)
# idx_list = np.arange(num_data)
# kf = KFold(n_splits=self.n_splits, random_state=self.random_state, shuffle=True)
for i in range(self.n_splits):
# print("TRAIN:", train_index, "TEST:", test_index)
# yield train_index, test_index
yield [0], [0]
class SeasonKFold():
"""時系列情報が含まれるカラムでソートした iloc を返す KFold"""
def __init__(self, n_splits, ts_column="Season", clipping=False, num_seasons=5,*args, **kwargs):
super().__init__(*args, **kwargs)
# 時系列データのカラムの名前
self.ts_column = ts_column
# 得られる添字のリストの長さを過去最小の Fold に揃えるフラグ
self.clipping = clipping
self.num_seasons = num_seasons
self.n_splits = n_splits
def split(self, X, *args, **kwargs):
# 渡されるデータは DataFrame を仮定する
assert isinstance(X, pd.DataFrame)
# clipping が有効なときの長さの初期値
train_fold_min_len, test_fold_min_len = sys.maxsize, sys.maxsize
# 時系列のカラムを取り出す
ts = X[self.ts_column]
# 元々のインデックスを振り直して iloc として使える値 (0, 1, 2...) にする
ts_df = ts.reset_index()
ts_list = sorted(ts_df[self.ts_column].unique())
for i in range(self.n_splits):
# 添字を元々の DataFrame の iloc として使える値に変換する
train_list = ts_list[:-self.num_seasons]
test_list= ts_list[-self.num_seasons:]
print(f"train season: {train_list}")
print(f"test season: {test_list}")
#pdb.set_trace()
train_iloc_index = ts_df.loc[ts_df[self.ts_column].isin(train_list)].index
test_iloc_index = ts_df.loc[ts_df[self.ts_column].isin(test_list)].index
ts_list = train_list
if self.clipping:
# TimeSeriesSplit.split() で返される Fold の大きさが徐々に大きくなることを仮定している
train_fold_min_len = min(train_fold_min_len, len(train_iloc_index))
test_fold_min_len = min(test_fold_min_len, len(test_iloc_index))
yield list(train_iloc_index[-train_fold_min_len:]), list(test_iloc_index[-test_fold_min_len:])
class TournamentGroupKFold(GroupKFold):
def __init__(self, group_id_col="Season", day_num_col="DayNum",tournament_start_daynum=133, *args, **kwargs):
super().__init__(*args, **kwargs)
self.group_id_col = group_id_col
self.day_num_col = day_num_col
self.tournament_start_daynum = tournament_start_daynum
def split(self, _df_X, _df_y, _group, *args, **kwargs):
df_tournament = _df_X.loc[_df_X[self.day_num_col]>=self.tournament_start_daynum]
df_tournament_y = _df_y.loc[df_tournament.index]
df_reg = _df_X.loc[~_df_X.index.isin(df_tournament.index)]
reg_index = _df_X.index.get_indexer(df_reg.index).tolist()
if _group is None:
_group = df_tournament[self.group_id_col]
for train_id_index, test_id_index in super().split(df_tournament, df_tournament_y, _group):
# print(f"train_id_index:{train_id_index}")
# print(f"test_id_index:{test_id_index}")
# print(f"train season: {df_tournament.iloc[train_id_index]['Season'].unique()}")
# print(f"test season: {df_tournament.iloc[test_id_index]['Season'].unique()}")
train_set_ID = df_tournament.iloc[train_id_index].index
test_set_ID = df_tournament.iloc[test_id_index].index
train_index_list = _df_X.index.get_indexer(train_set_ID).tolist()
test_index_list = _df_X.index.get_indexer(test_set_ID).tolist()
yield train_index_list+reg_index, test_index_list
class VirtulTimeStampSplit():
def __init__(self, n_splits, num_valid=2500000):
self.num_valid = num_valid
self.n_splits = n_splits
def split_melt(self, _df_X, _df_y, _group):
cpu_stats(f"in split")
row_id_sequence = _df_X.index.tolist()
row_id_list = _df_X.index.unique().tolist()
#print(row_id_list)
all_train_idx = range(len(_df_X))
print(f"row_id_sequence : {sys.getsizeof(row_id_sequence)}")
print(f"row_id_list : {sys.getsizeof(row_id_list)}")
print(f"all_train_idx : {sys.getsizeof(all_train_idx)}")
cpu_stats(f"after creating all_train_idx")
for n in range(self.n_splits):
valid_row_id_list = row_id_list[-self.num_valid:]
first_valid_row_id = valid_row_id_list[0]
valid_id_from = row_id_sequence.index(first_valid_row_id)
valid = all_train_idx[valid_id_from:]
cpu_stats(f"mid yield")
row_id_list = row_id_list[:-self.num_valid]
all_train_idx = all_train_idx[:valid_id_from]#_df_X.index.get_loc(row_id_list)
print(f"fold : {n}")
print(f"train : {len(all_train_idx)}, {all_train_idx}")
print(f"valid : {len(valid)}, {valid}")
cpu_stats(f"before yield")
yield all_train_idx, valid
def split(self, _df_X, _df_y, _group):
all_train_idx = range(len(_df_X))
for n in range(self.n_splits):
valid = all_train_idx[-self.num_valid:]
all_train_idx = all_train_idx[:-self.num_valid]
print(f"fold : {n}")
print(f"train : {len(all_train_idx)}, {all_train_idx}")
print(f"valid : {len(valid)}, {valid}")
yield all_train_idx, valid
def return_debug_index(debug, _train_idx, _val_idx, rate=0.5):
if debug:
train_idx = random.sample(_train_idx, int(len(_train_idx)*rate))
val_idx = random.sample(_val_idx, int(len(_val_idx)*rate))
else:
train_idx = _train_idx
val_idx = _val_idx
return train_idx, val_idx
class myGroupKFold(GroupKFold):
def __init__(self, group_id_col, cut_uout_flag, debug, *args, **kwargs):
super().__init__(*args, **kwargs)
self.group_id_col = group_id_col
self.cut_uout_flag = cut_uout_flag
self.debug = debug
def split(self, _df_X, _df_y, _group=None, *args, **kwargs):
df_X = _df_X.reset_index()
#if _group is None:
group = df_X[self.group_id_col]
for train_index, val_index in super().split(df_X, _df_y, group):
if self.debug:
train_gp_id_list = list(df_X.iloc[train_index][self.group_id_col].unique())
val_gp_id_list = list(df_X.iloc[val_index][self.group_id_col].unique())
train_gp_id_list, val_gp_id_list = return_debug_index(self.debug, train_gp_id_list, val_gp_id_list, rate=0.5)
train_index = df_X.loc[df_X[self.group_id_col].isin(train_gp_id_list)].index
val_index = df_X.loc[df_X[self.group_id_col].isin(val_gp_id_list)].index
#.set_trace()
if self.cut_uout_flag:
df_train = df_X.iloc[train_index]
new_train_index = df_train.loc[df_train["u_out"]==0].index
#new_train_index=train_index
df_val = df_X.iloc[val_index]
new_val_index = df_val.loc[df_val["u_out"]==0].index
print(f"train: {df_X.loc[train_index, 'u_out'].value_counts()}")
print(f"test: {df_X.loc[val_index, 'u_out'].value_counts()}")
print(f"test u_out 0 : {df_X.loc[new_val_index, 'u_out'].value_counts()}")
yield new_train_index, new_val_index
else:
yield train_index, val_index
class TimeStampNewUserSplit(GroupKFold):
def __init__(self, group_id_col, new_user_head_num=10, old_user_tail_num=10, *args, **kwargs):
super().__init__(*args, **kwargs)
self.group_id_col = group_id_col
self.new_user_head_num = new_user_head_num
self.old_user_tail_num = old_user_tail_num
def split(self, _df_X, _df_y, _group, *args, **kwargs): # we assume df has already been sorted by timestamp
df = _df_X[[self.group_id_col]]
df[_df_y.columns] = _df_y
df = df.reset_index()
if _group is None:
_group = df[self.group_id_col]
fold_idx=1
for old_id_index, new_id_index in super().split(df[self.group_id_col], df[_df_y.columns], _group):
test_new_user_index = set(df.iloc[new_id_index].groupby(self.group_id_col).head(self.new_user_head_num).index)
test_old_user_index = set(df.iloc[old_id_index].groupby(self.group_id_col).tail(self.old_user_tail_num).index)
#print(f"test_new_user : {len(test_new_user)}, test_old_user : {len(test_old_user)}")
#print(f"test_new_user : {len(test_new_user_index)}, test_old_user : {len(test_old_user_index)}")
#print(f"train_old_user_index ; {len(train_old_user_index)}, add : {len(train_old_user_index) + len(test_old_user_index)}")
#print(f"old_id_index : {len(old_id_index)}, new_id_index : {len(new_id_index)}")
#print( df.iloc[new_id_index].groupby(self.group_id_col).head(self.new_user_head_num))
#print(f"{df.iloc[test_old_user_index].groupby(self.group_id_col).count()}")
cpu_stats(f"TimeStampNewUserSplit")
fold_idx+=1
yield list(set(old_id_index) - test_old_user_index), list(test_new_user_index|test_old_user_index)
class myStratifiedKFold(StratifiedKFold):
def __init__(self, stratified_col, *args, **kwargs):
super().__init__(*args, **kwargs)
self.stratified_col = stratified_col
def split(self, _df_X, _df_y, dummy_group, *args, **kwargs):
if self.stratified_col in _df_y.columns:
df_y = _df_y[[self.stratified_col]]
else: #self.stratified_col in _df_X.columns:
df_y = _df_X[[self.stratified_col]]
df_y = df_y.reset_index()
for train_id_index, test_id_index in super().split(_df_X, df_y[self.stratified_col]):
print(f"fold train :{train_id_index}")
print(f"{df_y.iloc[train_id_index][self.stratified_col].value_counts()}")
print(f"fold valid :{test_id_index}")
print(f"{df_y.iloc[test_id_index][self.stratified_col].value_counts()}")
# train_label_list = df_y.iloc[train_id_index][self.stratified_col].unique()
# test_label_list = df_y.iloc[test_id_index][self.stratified_col].unique()
# only_test_list = list(set(test_label_list) - set(train_label_list))
# if len(only_test_list)>0:
# only_test_index = list(df_y.loc[df_y[self.stratified_col].isin(only_test_list)].index)
# train_id_index = np.array(train_id_index.tolist()+only_test_index)
# for num in only_test_index:
# new_test_id_index = test_id_index.tolist()
# new_test_id_index.remove(num)
# test_id_index = np.array(new_test_id_index)
yield train_id_index, test_id_index
class siteStratifiedPathGroupKFold():
def __init__(self, df_test, group_id_col, stratified_target_id, n_splits):
self.group_id_col = group_id_col
self.stratified_target_id = stratified_target_id
self.n_splits = n_splits
self.df_test_info = df_test.groupby(self.stratified_target_id)[self.group_id_col].agg(["count", "nunique"])
def split(self, _df_X, _se_y, _group, *args, **kwargs):
df_train = _df_X[[self.group_id_col, self.stratified_target_id]]
df_train = df_train.reset_index()
df_train["fold"]=self.n_splits
for site_id, row in self.df_test_info.iterrows():
count = row["count"]
nunique = row["nunique"]
path_list = df_train.loc[df_train[self.stratified_target_id]==site_id, self.group_id_col].unique()
random.shuffle(path_list)
path_set_list= [t for t in zip(*[iter(path_list)]*nunique)]
diff_dict = {}
for i, path_set in enumerate(path_set_list):
#print(f"{i} : {path_set}")
train_path_count = df_train.loc[df_train[self.group_id_col].isin(path_set), self.group_id_col].count()
diff_count = abs(train_path_count-count)
diff_dict[i] = diff_count
sort_i_list = sorted(diff_dict.items(), key=lambda x:x[1])
#print(sort_i_list)
for k in range(self.n_splits):
sort_i = sort_i_list[k][0]
path_set =path_set_list[sort_i]
df_train.loc[df_train[self.group_id_col].isin(path_set), "fold"] = k
##print(f"{sort_i}, {sort_i_list[k]}")
#print(f"df_train fold k : {df_train.loc[df_train['fold']==k].shape}")
#pdb.set_trace()
for k in range(self.n_splits):
#df_fold_t = df_train.loc[df_train["fold"]==k, ["site_id", "path"]]
#print(f"fold {k}:")
#print(df_fold_t.groupby("site_id")["path"].agg(["count", "nunique"]))
yield list(df_train.loc[df_train["fold"]!=k].index), list(df_train.loc[df_train["fold"]==k].index)
class myStratifiedKFoldWithGroupID(StratifiedKFold):
def __init__(self, group_id_col, stratified_target_id, *args, **kwargs):
super().__init__(*args, **kwargs)
self.group_id_col = group_id_col
self.stratified_target_id = stratified_target_id
def split(self, _df_X, _se_y, _group, *args, **kwargs):
#_df_X["group_target"] = _se_y
df = _df_X[[self.group_id_col, self.stratified_target_id]]
#df["group_target"] = _se_y
df = df.reset_index()
gp = df.groupby(self.group_id_col)[self.stratified_target_id].apply(lambda x:x.mode()[0])
#print(gp)
#print(gp.index)
#del df
#gc.collect()
fold_idx=1
for train_id_index, test_id_index in super().split(gp.index, gp, _group):
#print(f"fold_idx : {fold_idx}")
#print(f"train_id_index : {train_id_index}, test_id_index : {test_id_index}")
#print(f"train_id : {gp.index[train_id_index]}, test_id : {gp.index[test_id_index]}")
train_id_list = list(gp.index[train_id_index])
test_id_list = list(gp.index[test_id_index])
print(f"fold train :{df.loc[df[self.group_id_col].isin(train_id_list), self.stratified_target_id].value_counts()}")
print(f"fold valid :{df.loc[df[self.group_id_col].isin(test_id_list), self.stratified_target_id].value_counts()}")
#print(f"train_seq_id : {df.loc[df[self.group_id_col].isin(train_id_list)].index}, test_id : {df.loc[df[self.group_id_col].isin(test_id_list)].index}")
fold_idx+=1
yield list(df.loc[df[self.group_id_col].isin(train_id_list)].index), list(df.loc[df[self.group_id_col].isin(test_id_list)].index)
# class StratifiedKFoldWithGroupID(StratifiedKFold):
# def __init__(self, group_id_col, *args, **kwargs):
# super().__init__(*args, **kwargs)
# self.group_id_col = group_id_col
# def split(self, _df_X, _se_y, _group, *args, **kwargs):
# #_df_X["group_target"] = _se_y
# df = _df_X[[self.group_id_col]]
# df["group_target"] = _se_y
# df["group_target"] = procLabelEncToSeries(df["group_target"])
# df = df.reset_index()
# gp = df.groupby(self.group_id_col)["group_target"].apply(lambda x:x.mode()[0])
# #print(gp)
# #pdb.set_trace()
# #print(gp.index)
# #del df
# #gc.collect()
# fold_idx=1
# for train_id_index, test_id_index in super().split(gp.index, gp, _group):
# #print(f"fold_idx : {fold_idx}")
# #print(f"train_id_index : {train_id_index}, test_id_index : {test_id_index}")
# #print(f"train_id : {gp.index[train_id_index]}, test_id : {gp.index[test_id_index]}")
# train_id_list = list(gp.index[train_id_index])
# test_id_list = list(gp.index[test_id_index])
# print(f"fold train :{df.loc[df[self.group_id_col].isin(train_id_list), 'group_target'].value_counts()}")
# print(f"fold valid :{df.loc[df[self.group_id_col].isin(test_id_list), 'group_target'].value_counts()}")
# #print(f"train_seq_id : {df.loc[df[self.group_id_col].isin(train_id_list)].index}, test_id : {df.loc[df[self.group_id_col].isin(test_id_list)].index}")
# fold_idx+=1
# yield list(df.loc[df[self.group_id_col].isin(train_id_list)].index), list(df.loc[df[self.group_id_col].isin(test_id_list)].index)
def testStr():
dict_pd = {
"seq_id":["id0_0", "id0_1", "id0_2", "id0_3", "id1_0", "id1_1", "id1_2", "id1_3", "id2_0", "id2_1", "id2_2", "id2_3", "id3_0", "id3_1", "id3_2", "id3_3"],
"val":[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
"id":["id0", "id0", "id0", "id0", "id1", "id1", "id1", "id1", "id2", "id2", "id2", "id2", "id3", "id3", "id3", "id3"],
"cat_y":[0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1,]
}
df = pd.DataFrame(dict_pd)
df = df.set_index("seq_id")
print(df)
fold = StratifiedKFoldWithGroupID(n_splits=2, group_id_col="id")
for tr_idx, vl_idx in fold.split(df, df["cat_y"], None):
print(f"tr_idx : {tr_idx}, vl_idx : {vl_idx}")
class Day28KFold(TimeSeriesSplit):
def __init__(self, date_column, clipping=False, test_days=28, split_type=1, *args, **kwargs):
super().__init__(*args, **kwargs)
# 時系列データのカラムの名前
self.date_column = date_column
# 得られる添字のリストの長さを過去最小の Fold に揃えるフラグ
self.clipping = clipping
self.test_days = test_days
self.split_type= split_type
def split(self, _X, _y, _group, *args, **kwargs):
# 渡されるデータは DataFrame を仮定する
assert isinstance(_X, pd.DataFrame)
X = _X[self.date_column].reset_index()
# clipping が有効なときの長さの初期値
train_fold_min_len, test_fold_min_len = sys.maxsize, sys.maxsize
# 時系列のカラムを取り出す
time_duration_list = X[self.date_column].unique()
#print(f"time_duration_list : {time_duration_list}")
ts_df = pd.DataFrame(time_duration_list, columns=[self.date_column])
#print(ts_df)
# 元々のインデックスを振り直して iloc として使える値 (0, 1, 2...) にする
#ts_df = ts.reset_index()
# 時系列でソートする
sorted_ts_df = ts_df.sort_values(by=self.date_column)
del ts_df
gc.collect()
# スーパークラスのメソッドで添字を計算する
fold_idx=1
for train_index, test_index in super().split(sorted_ts_df, *args, **kwargs):
# 添字を元々の DataFrame の iloc として使える値に変換する
if self.split_type==1:
train_start_day = sorted_ts_df.iloc[train_index].min()[0]
test_end_day = sorted_ts_df.iloc[test_index].max()[0]
test_start_day = test_end_day - relativedelta(days=(self.test_days - 1))
train_end_day = test_start_day - relativedelta(days=1)
elif self.split_type == 2:
last_day=sorted_ts_df[self.date_column].max()
test_start_day = last_day - relativedelta(days=(self.test_days - 1) * fold_idx)
test_end_day = test_start_day + relativedelta(days=(self.test_days - 1))
train_end_day = test_start_day - relativedelta(days=1)
train_start_day = sorted_ts_df[self.date_column].min()
print(f"last_day :{last_day}")
train_iloc_index = X[X[self.date_column] <= train_end_day].index
test_iloc_index = X[(X[self.date_column] >= test_start_day) & (X[self.date_column] <= test_end_day)].index
print(f"train {train_start_day} to {train_end_day}")
print(f"test {test_start_day} to {test_end_day}")
if self.clipping:
# TimeSeriesSplit.split() で返される Fold の大きさが徐々に大きくなることを仮定している
train_fold_min_len = min(train_fold_min_len, len(train_iloc_index))
test_fold_min_len = min(test_fold_min_len, len(test_iloc_index))
print(f"train_fold_min_len : {train_fold_min_len}")
print(f"test_fold_min_len : {test_fold_min_len}")
print("********************")
fold_idx+=1
yield list(train_iloc_index[-train_fold_min_len:]), list(test_iloc_index[-test_fold_min_len:])
class MovingWindowKFold(TimeSeriesSplit):
"""時系列情報が含まれるカラムでソートした iloc を返す KFold"""
def __init__(self, ts_column, clipping=False, *args, **kwargs):
super().__init__(*args, **kwargs)
# 時系列データのカラムの名前
self.ts_column = ts_column
# 得られる添字のリストの長さを過去最小の Fold に揃えるフラグ
self.clipping = clipping
def split(self, X, *args, **kwargs):
# 渡されるデータは DataFrame を仮定する
assert isinstance(X, pd.DataFrame)
# clipping が有効なときの長さの初期値
train_fold_min_len, test_fold_min_len = sys.maxsize, sys.maxsize
# 時系列のカラムを取り出す
ts = X[self.ts_column]
# 元々のインデックスを振り直して iloc として使える値 (0, 1, 2...) にする
ts_df = ts.reset_index()
# 時系列でソートする
sorted_ts_df = ts_df.sort_values(by=self.ts_column)
# スーパークラスのメソッドで添字を計算する
for train_index, test_index in super().split(sorted_ts_df, *args, **kwargs):
# 添字を元々の DataFrame の iloc として使える値に変換する
train_iloc_index = sorted_ts_df.iloc[train_index].index
test_iloc_index = sorted_ts_df.iloc[test_index].index
if self.clipping:
# TimeSeriesSplit.split() で返される Fold の大きさが徐々に大きくなることを仮定している
train_fold_min_len = min(train_fold_min_len, len(train_iloc_index))
test_fold_min_len = min(test_fold_min_len, len(test_iloc_index))
yield list(train_iloc_index[-train_fold_min_len:]), list(test_iloc_index[-test_fold_min_len:])
def main():
df = sns.load_dataset('flights')
month_name_mappings = {name: str(n).zfill(2) for n, name in
enumerate(month_name)}
df['month'] = df['month'].apply(lambda x: month_name_mappings[x])
df['year-month'] = df.year.astype(str) + '-' + df.month.astype(str)
df['year-month'] = | pd.to_datetime(df['year-month'], format='%Y-%m') | pandas.to_datetime |
""" test the scalar Timestamp """
import pytz
import pytest
import dateutil
import calendar
import locale
import numpy as np
from dateutil.tz import tzutc
from pytz import timezone, utc
from datetime import datetime, timedelta
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.tseries import offsets
from pandas._libs.tslibs import conversion
from pandas._libs.tslibs.timezones import get_timezone, dateutil_gettz as gettz
from pandas.errors import OutOfBoundsDatetime
from pandas.compat import long, PY3
from pandas.compat.numpy import np_datetime64_compat
from pandas import Timestamp, Period, Timedelta, NaT
class TestTimestampProperties(object):
def test_properties_business(self):
ts = Timestamp('2017-10-01', freq='B')
control = Timestamp('2017-10-01')
assert ts.dayofweek == 6
assert not ts.is_month_start # not a weekday
assert not ts.is_quarter_start # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_start
assert control.is_quarter_start
ts = Timestamp('2017-09-30', freq='B')
control = Timestamp('2017-09-30')
assert ts.dayofweek == 5
assert not ts.is_month_end # not a weekday
assert not ts.is_quarter_end # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_end
assert control.is_quarter_end
def test_fields(self):
def check(value, equal):
# that we are int/long like
assert isinstance(value, (int, long))
assert value == equal
# GH 10050
ts = Timestamp('2015-05-10 09:06:03.000100001')
check(ts.year, 2015)
check(ts.month, 5)
check(ts.day, 10)
check(ts.hour, 9)
check(ts.minute, 6)
check(ts.second, 3)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 100)
check(ts.nanosecond, 1)
check(ts.dayofweek, 6)
check(ts.quarter, 2)
check(ts.dayofyear, 130)
check(ts.week, 19)
check(ts.daysinmonth, 31)
check(ts.daysinmonth, 31)
# GH 13303
ts = Timestamp('2014-12-31 23:59:00-05:00', tz='US/Eastern')
check(ts.year, 2014)
check(ts.month, 12)
check(ts.day, 31)
check(ts.hour, 23)
check(ts.minute, 59)
check(ts.second, 0)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 0)
check(ts.nanosecond, 0)
check(ts.dayofweek, 2)
check(ts.quarter, 4)
check(ts.dayofyear, 365)
check(ts.week, 1)
check(ts.daysinmonth, 31)
ts = Timestamp('2014-01-01 00:00:00+01:00')
starts = ['is_month_start', 'is_quarter_start', 'is_year_start']
for start in starts:
assert getattr(ts, start)
ts = Timestamp('2014-12-31 23:59:59+01:00')
ends = ['is_month_end', 'is_year_end', 'is_quarter_end']
for end in ends:
assert getattr(ts, end)
# GH 12806
@pytest.mark.parametrize('data',
[Timestamp('2017-08-28 23:00:00'),
Timestamp('2017-08-28 23:00:00', tz='EST')])
@pytest.mark.parametrize('time_locale', [
None] if tm.get_locales() is None else [None] + tm.get_locales())
def test_names(self, data, time_locale):
# GH 17354
# Test .weekday_name, .day_name(), .month_name
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
assert data.weekday_name == 'Monday'
if time_locale is None:
expected_day = 'Monday'
expected_month = 'August'
else:
with tm.set_locale(time_locale, locale.LC_TIME):
expected_day = calendar.day_name[0].capitalize()
expected_month = calendar.month_name[8].capitalize()
assert data.day_name(time_locale) == expected_day
assert data.month_name(time_locale) == expected_month
# Test NaT
nan_ts = Timestamp(NaT)
assert np.isnan(nan_ts.day_name(time_locale))
assert np.isnan(nan_ts.month_name(time_locale))
@pytest.mark.parametrize('tz', [None, 'UTC', 'US/Eastern', 'Asia/Tokyo'])
def test_is_leap_year(self, tz):
# GH 13727
dt = Timestamp('2000-01-01 00:00:00', tz=tz)
assert dt.is_leap_year
assert isinstance(dt.is_leap_year, bool)
dt = Timestamp('1999-01-01 00:00:00', tz=tz)
assert not dt.is_leap_year
dt = Timestamp('2004-01-01 00:00:00', tz=tz)
assert dt.is_leap_year
dt = Timestamp('2100-01-01 00:00:00', tz=tz)
assert not dt.is_leap_year
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013, 12, 31)
result = Timestamp(d).week
expected = 1 # ISO standard
assert result == expected
d = datetime(2008, 12, 28)
result = Timestamp(d).week
expected = 52 # ISO standard
assert result == expected
d = datetime(2009, 12, 31)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 1)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 3)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
result = np.array([Timestamp(datetime(*args)).week
for args in [(2000, 1, 1), (2000, 1, 2), (
2005, 1, 1), (2005, 1, 2)]])
assert (result == [52, 52, 53, 53]).all()
class TestTimestampConstructors(object):
def test_constructor(self):
base_str = '2014-07-01 09:00'
base_dt = datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calendar
assert (calendar.timegm(base_dt.timetuple()) * 1000000000 ==
base_expected)
tests = [(base_str, base_dt, base_expected),
('2014-07-01 10:00', datetime(2014, 7, 1, 10),
base_expected + 3600 * 1000000000),
('2014-07-01 09:00:00.000008000',
datetime(2014, 7, 1, 9, 0, 0, 8),
base_expected + 8000),
('2014-07-01 09:00:00.000000005',
Timestamp('2014-07-01 09:00:00.000000005'),
base_expected + 5)]
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0), ('Asia/Tokyo', 9),
('US/Eastern', -4), ('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3),
(dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, date, expected in tests:
for result in [Timestamp(date_str), Timestamp(date)]:
# only with timestring
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# re-creation shouldn't affect to internal value
result = Timestamp(result)
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# with timezone
for tz, offset in timezones:
for result in [Timestamp(date_str, tz=tz), Timestamp(date,
tz=tz)]:
expected_tz = expected - offset * 3600 * 1000000000
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should preserve tz
result = Timestamp(result)
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should convert to UTC
result = Timestamp(result, tz='UTC')
expected_utc = expected - offset * 3600 * 1000000000
assert result.value == expected_utc
assert conversion.pydt_to_i8(result) == expected_utc
def test_constructor_with_stringoffset(self):
# GH 7833
base_str = '2014-07-01 11:00:00+02:00'
base_dt = datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calendar
assert (calendar.timegm(base_dt.timetuple()) * 1000000000 ==
base_expected)
tests = [(base_str, base_expected),
('2014-07-01 12:00:00+02:00',
base_expected + 3600 * 1000000000),
('2014-07-01 11:00:00.000008000+02:00', base_expected + 8000),
('2014-07-01 11:00:00.000000005+02:00', base_expected + 5)]
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0), ('Asia/Tokyo', 9),
('US/Eastern', -4), ('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3),
(dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, expected in tests:
for result in [Timestamp(date_str)]:
# only with timestring
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# re-creation shouldn't affect to internal value
result = Timestamp(result)
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# with timezone
for tz, offset in timezones:
result = Timestamp(date_str, tz=tz)
expected_tz = expected
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should preserve tz
result = Timestamp(result)
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should convert to UTC
result = Timestamp(result, tz='UTC')
expected_utc = expected
assert result.value == expected_utc
assert conversion.pydt_to_i8(result) == expected_utc
# This should be 2013-11-01 05:00 in UTC
# converted to Chicago tz
result = Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')
assert result.value == Timestamp('2013-11-01 05:00').value
expected = "Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')" # noqa
assert repr(result) == expected
assert result == eval(repr(result))
# This should be 2013-11-01 05:00 in UTC
# converted to Tokyo tz (+09:00)
result = Timestamp('2013-11-01 00:00:00-0500', tz='Asia/Tokyo')
assert result.value == Timestamp('2013-11-01 05:00').value
expected = "Timestamp('2013-11-01 14:00:00+0900', tz='Asia/Tokyo')"
assert repr(result) == expected
assert result == eval(repr(result))
# GH11708
# This should be 2015-11-18 10:00 in UTC
# converted to Asia/Katmandu
result = Timestamp("2015-11-18 15:45:00+05:45", tz="Asia/Katmandu")
assert result.value == Timestamp("2015-11-18 10:00").value
expected = "Timestamp('2015-11-18 15:45:00+0545', tz='Asia/Katmandu')"
assert repr(result) == expected
assert result == eval(repr(result))
# This should be 2015-11-18 10:00 in UTC
# converted to Asia/Kolkata
result = Timestamp("2015-11-18 15:30:00+05:30", tz="Asia/Kolkata")
assert result.value == Timestamp("2015-11-18 10:00").value
expected = "Timestamp('2015-11-18 15:30:00+0530', tz='Asia/Kolkata')"
assert repr(result) == expected
assert result == eval(repr(result))
def test_constructor_invalid(self):
with tm.assert_raises_regex(TypeError, 'Cannot convert input'):
Timestamp(slice(2))
with tm.assert_raises_regex(ValueError, 'Cannot convert Period'):
Timestamp(Period('1000-01-01'))
def test_constructor_invalid_tz(self):
# GH#17690
with tm.assert_raises_regex(TypeError, 'must be a datetime.tzinfo'):
Timestamp('2017-10-22', tzinfo='US/Eastern')
with tm.assert_raises_regex(ValueError, 'at most one of'):
Timestamp('2017-10-22', tzinfo=utc, tz='UTC')
with tm.assert_raises_regex(ValueError, "Invalid frequency:"):
# GH#5168
# case where user tries to pass tz as an arg, not kwarg, gets
# interpreted as a `freq`
Timestamp('2012-01-01', 'US/Pacific')
def test_constructor_tz_or_tzinfo(self):
# GH#17943, GH#17690, GH#5168
stamps = [Timestamp(year=2017, month=10, day=22, tz='UTC'),
Timestamp(year=2017, month=10, day=22, tzinfo=utc),
Timestamp(year=2017, month=10, day=22, tz=utc),
Timestamp(datetime(2017, 10, 22), tzinfo=utc),
Timestamp(datetime(2017, 10, 22), tz='UTC'),
Timestamp(datetime(2017, 10, 22), tz=utc)]
assert all(ts == stamps[0] for ts in stamps)
def test_constructor_positional(self):
# see gh-10758
with pytest.raises(TypeError):
Timestamp(2000, 1)
with pytest.raises(ValueError):
Timestamp(2000, 0, 1)
with pytest.raises(ValueError):
Timestamp(2000, 13, 1)
with pytest.raises(ValueError):
Timestamp(2000, 1, 0)
with pytest.raises(ValueError):
Timestamp(2000, 1, 32)
# see gh-11630
assert (repr(Timestamp(2015, 11, 12)) ==
repr(Timestamp('20151112')))
assert (repr(Timestamp(2015, 11, 12, 1, 2, 3, 999999)) ==
repr(Timestamp('2015-11-12 01:02:03.999999')))
def test_constructor_keyword(self):
# GH 10758
with pytest.raises(TypeError):
Timestamp(year=2000, month=1)
with pytest.raises(ValueError):
Timestamp(year=2000, month=0, day=1)
with pytest.raises(ValueError):
Timestamp(year=2000, month=13, day=1)
with pytest.raises(ValueError):
Timestamp(year=2000, month=1, day=0)
with pytest.raises(ValueError):
Timestamp(year=2000, month=1, day=32)
assert (repr(Timestamp(year=2015, month=11, day=12)) ==
repr(Timestamp('20151112')))
assert (repr(Timestamp(year=2015, month=11, day=12, hour=1, minute=2,
second=3, microsecond=999999)) ==
repr(Timestamp('2015-11-12 01:02:03.999999')))
def test_constructor_fromordinal(self):
base = datetime(2000, 1, 1)
ts = Timestamp.fromordinal(base.toordinal(), freq='D')
assert base == ts
assert ts.freq == 'D'
assert base.toordinal() == ts.toordinal()
ts = Timestamp.fromordinal(base.toordinal(), tz='US/Eastern')
assert Timestamp('2000-01-01', tz='US/Eastern') == ts
assert base.toordinal() == ts.toordinal()
# GH#3042
dt = datetime(2011, 4, 16, 0, 0)
ts = Timestamp.fromordinal(dt.toordinal())
assert ts.to_pydatetime() == dt
# with a tzinfo
stamp = Timestamp('2011-4-16', tz='US/Eastern')
dt_tz = stamp.to_pydatetime()
ts = Timestamp.fromordinal(dt_tz.toordinal(), tz='US/Eastern')
assert ts.to_pydatetime() == dt_tz
@pytest.mark.parametrize('result', [
Timestamp(datetime(2000, 1, 2, 3, 4, 5, 6), nanosecond=1),
Timestamp(year=2000, month=1, day=2, hour=3, minute=4, second=5,
microsecond=6, nanosecond=1),
Timestamp(year=2000, month=1, day=2, hour=3, minute=4, second=5,
microsecond=6, nanosecond=1, tz='UTC'),
Timestamp(2000, 1, 2, 3, 4, 5, 6, 1, None),
Timestamp(2000, 1, 2, 3, 4, 5, 6, 1, pytz.UTC)])
def test_constructor_nanosecond(self, result):
# GH 18898
expected = Timestamp(datetime(2000, 1, 2, 3, 4, 5, 6), tz=result.tz)
expected = expected + Timedelta(nanoseconds=1)
assert result == expected
@pytest.mark.parametrize('arg', ['year', 'month', 'day', 'hour', 'minute',
'second', 'microsecond', 'nanosecond'])
def test_invalid_date_kwarg_with_string_input(self, arg):
kwarg = {arg: 1}
with pytest.raises(ValueError):
Timestamp('2010-10-10 12:59:59.999999999', **kwarg)
def test_out_of_bounds_value(self):
one_us = np.timedelta64(1).astype('timedelta64[us]')
# By definition we can't go out of bounds in [ns], so we
# convert the datetime64s to [us] so we can go out of bounds
min_ts_us = np.datetime64(Timestamp.min).astype('M8[us]')
max_ts_us = np.datetime64(Timestamp.max).astype('M8[us]')
# No error for the min/max datetimes
Timestamp(min_ts_us)
Timestamp(max_ts_us)
# One us less than the minimum is an error
with pytest.raises(ValueError):
Timestamp(min_ts_us - one_us)
# One us more than the maximum is an error
with pytest.raises(ValueError):
Timestamp(max_ts_us + one_us)
def test_out_of_bounds_string(self):
with pytest.raises(ValueError):
Timestamp('1676-01-01')
with pytest.raises(ValueError):
Timestamp('2263-01-01')
def test_barely_out_of_bounds(self):
# GH#19529
# GH#19382 close enough to bounds that dropping nanos would result
# in an in-bounds datetime
with pytest.raises(OutOfBoundsDatetime):
Timestamp('2262-04-11 23:47:16.854775808')
def test_bounds_with_different_units(self):
out_of_bounds_dates = ('1677-09-21', '2262-04-12')
time_units = ('D', 'h', 'm', 's', 'ms', 'us')
for date_string in out_of_bounds_dates:
for unit in time_units:
dt64 = np.datetime64(date_string, dtype='M8[%s]' % unit)
with pytest.raises(ValueError):
Timestamp(dt64)
in_bounds_dates = ('1677-09-23', '2262-04-11')
for date_string in in_bounds_dates:
for unit in time_units:
dt64 = np.datetime64(date_string, dtype='M8[%s]' % unit)
Timestamp(dt64)
def test_min_valid(self):
# Ensure that Timestamp.min is a valid Timestamp
Timestamp(Timestamp.min)
def test_max_valid(self):
# Ensure that Timestamp.max is a valid Timestamp
Timestamp(Timestamp.max)
def test_now(self):
# GH#9000
ts_from_string = Timestamp('now')
ts_from_method = Timestamp.now()
ts_datetime = datetime.now()
ts_from_string_tz = Timestamp('now', tz='US/Eastern')
ts_from_method_tz = Timestamp.now(tz='US/Eastern')
# Check that the delta between the times is less than 1s (arbitrarily
# small)
delta = Timedelta(seconds=1)
assert abs(ts_from_method - ts_from_string) < delta
assert abs(ts_datetime - ts_from_method) < delta
assert abs(ts_from_method_tz - ts_from_string_tz) < delta
assert (abs(ts_from_string_tz.tz_localize(None) -
ts_from_method_tz.tz_localize(None)) < delta)
def test_today(self):
ts_from_string = Timestamp('today')
ts_from_method = Timestamp.today()
ts_datetime = datetime.today()
ts_from_string_tz = Timestamp('today', tz='US/Eastern')
ts_from_method_tz = Timestamp.today(tz='US/Eastern')
# Check that the delta between the times is less than 1s (arbitrarily
# small)
delta = Timedelta(seconds=1)
assert abs(ts_from_method - ts_from_string) < delta
assert abs(ts_datetime - ts_from_method) < delta
assert abs(ts_from_method_tz - ts_from_string_tz) < delta
assert (abs(ts_from_string_tz.tz_localize(None) -
ts_from_method_tz.tz_localize(None)) < delta)
class TestTimestamp(object):
def test_tz(self):
tstr = '2014-02-01 09:00'
ts = Timestamp(tstr)
local = ts.tz_localize('Asia/Tokyo')
assert local.hour == 9
assert local == Timestamp(tstr, tz='Asia/Tokyo')
conv = local.tz_convert('US/Eastern')
assert conv == Timestamp('2014-01-31 19:00', tz='US/Eastern')
assert conv.hour == 19
# preserves nanosecond
ts = Timestamp(tstr) + offsets.Nano(5)
local = ts.tz_localize('Asia/Tokyo')
assert local.hour == 9
assert local.nanosecond == 5
conv = local.tz_convert('US/Eastern')
assert conv.nanosecond == 5
assert conv.hour == 19
def test_utc_z_designator(self):
assert get_timezone(Timestamp('2014-11-02 01:00Z').tzinfo) == 'UTC'
def test_asm8(self):
np.random.seed(7960929)
ns = [Timestamp.min.value, Timestamp.max.value, 1000]
for n in ns:
assert (Timestamp(n).asm8.view('i8') ==
np.datetime64(n, 'ns').view('i8') == n)
assert (Timestamp('nat').asm8.view('i8') ==
np.datetime64('nat', 'ns').view('i8'))
def test_class_ops_pytz(self):
def compare(x, y):
assert (int( | Timestamp(x) | pandas.Timestamp |
import pandas as pd
import numpy as np
import nltk
import json
import os
import logging
import logzero
from logzero import logger
import itertools
from scipy.sparse import save_npz
try:
from .helpers import unicode_remover, character_remover, character_transformer, contraction_transformer, lemmatize
except:
from helpers import unicode_remover, character_remover, character_transformer, contraction_transformer, lemmatize
from datetime import datetime
from nltk.tokenize import word_tokenize
from nltk.corpus import wordnet
from collections import Counter
from sklearn.feature_extraction.text import TfidfVectorizer
from PIL import Image
class Cleaner():
def __init__(self, stop_words_filename='custom_stop_words.txt', assets_directory='./cleaner/assets/', debug=1, early_stop=None):
self.init_stop_words(os.path.join(assets_directory, stop_words_filename))
self.contraction_filename = os.path.join(assets_directory, 'contractions.json')
self.early_stop = early_stop
self.tag_dict = {
"J": wordnet.ADJ,
"N": wordnet.NOUN,
"V": wordnet.VERB,
"R": wordnet.ADV
}
logzero.loglevel(debug)
def init_stop_words(self, stop_words_filename):
""" Sets custom stop words list """
delete_from_stop_words = ['more', 'most', 'very', 'no', 'nor', 'not']
self.stop_words = nltk.corpus.stopwords.words("english")
self.stop_words = list(set(self.stop_words) - set(delete_from_stop_words))
with open(stop_words_filename) as stop_words_file:
lines = [line.rstrip() for line in stop_words_file]
self.stop_words += lines
def set_file(self, filepath, index_col='review_id', content_col='comment', filetype='csv'):
"""
Sets a new file to be cleaned:
- self.tokenized_corpus: dict{int: review_id, list[str]: tokenized review (cleaned + split)}
- self.tokenized_corpus_ngram: dict{int: review_id, list[tuple(n * str)]: tokenized review (cleaned + split)}
- self.tokenized_corpus_sentences = dict{int: restaurant_id, str: tokenized review sentence}
- self.word_count = dict{int: review_id, dict{str: word, int: count}}
- self.word_count_by_restaurant = dict{int: restaurant_id, dict{str: word, int: count}}
- self.df_word_frequency = dict{int: restaurant_id, df(index = review_id, columns = set of vocab per restaurant)}
Raises:
TypeError: if filename is not of type str
"""
if isinstance(filepath, str):
self.filename = filepath.split('/')[-1]
print("filepath in cleaner set file:", filepath)
if filetype == 'csv':
self.df = pd.read_csv(filepath, sep='#', index_col=[index_col])
elif filetype == 'json':
json = | pd.read_json(filepath, lines=True) | pandas.read_json |
#
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta, abstractproperty
from lru import LRU
import warnings
from pandas.tseries.holiday import AbstractHolidayCalendar
from six import with_metaclass
from numpy import searchsorted
import numpy as np
import pandas as pd
from pandas import (
DataFrame,
date_range,
DatetimeIndex,
)
from pandas.tseries.offsets import CustomBusinessDay
from zipline.utils.calendars._calendar_helpers import (
next_divider_idx,
previous_divider_idx,
is_open,
minutes_to_session_labels,
)
from zipline.utils.input_validation import (
attrgetter,
coerce,
preprocess,
)
from zipline.utils.memoize import lazyval
start_default = pd.Timestamp('1990-01-01', tz='UTC')
end_base = | pd.Timestamp('today', tz='UTC') | pandas.Timestamp |
# Importing essential libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from math import floor
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import ShuffleSplit
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Lasso
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import train_test_split
import pickle
# Loading the dataset
df = pd.read_csv('Bengluru_House_Data.csv')
# Removing the columns of society
df = df.drop('society', axis='columns')
''' Data Cleaning Process '''
# Applying median to the balcony and bath column
balcony_median = float(floor(df.balcony.median()))
bath_median = float(floor(df.bath.median()))
df.balcony = df.balcony.fillna(balcony_median)
df.bath = df.bath.fillna(bath_median)
# Dropping the rows with null values because the dataset is huge as compared to null values.
df = df.dropna()
# Converting the size column to bhk
df['bhk'] = df['size'].apply(lambda x: int(x.split(' ')[0]))
df = df.drop('size', axis='columns')
# Since the total_sqft contains range values such as 1133-1384, lets filter out these values
def isFloat(x):
try:
float(x)
except:
return False
return True
# Displaying all the rows that are not integers
df[~df['total_sqft'].apply(isFloat)]
# Converting the range values to integer values and removing other types of error
def convert_sqft_to_num(x):
tokens = x.split('-')
if len(tokens) == 2:
return (float(tokens[0])+float(tokens[1]))/2
try:
return float(x)
except:
return None
df['new_total_sqft'] = df.total_sqft.apply(convert_sqft_to_num)
df = df.drop('total_sqft', axis='columns')
# Removing the rows in new_total_sqft column that hase None values
df = df.dropna()
''' Feature Engineering '''
# Adding a new column of price_per_sqft
df1 = df.copy()
# In our dataset the price column is in Lakhs
df1['price_per_sqft'] = (df1['price']*100000)/df1['new_total_sqft']
# Checking unique values of 'location' column
locations = list(df['location'].unique())
# Removing the extra spaces at the end
df1.location = df1.location.apply(lambda x: x.strip())
# Calulating all the unqiue values in 'location' column
location_stats = df1.groupby('location')['location'].agg('count').sort_values(ascending=False)
# Labelling the locations with less than or equal to 10 occurences to 'other'
locations_less_than_10 = location_stats[location_stats<=10]
df1.location = df1.location.apply(lambda x: 'other' if x in locations_less_than_10 else x)
# Labelling the dates into Not Ready
dates = df1.groupby('availability')['availability'].agg('count').sort_values(ascending=False)
dates_not_ready = dates[dates<10000]
df1.availability = df1.availability.apply(lambda x: 'Not Ready' if x in dates_not_ready else x)
''' Removing Outliers '''
df2 = df1[~(df1.new_total_sqft/df1.bhk<300)]
# Since there is a wide range for 'price_per_sqft' column with min = Rs.267/sqft till max = Rs. 127470/sqft, we remove the extreme ends using the SD
def remove_pps_outliers(df):
df_out = | pd.DataFrame() | pandas.DataFrame |
from collections import Counter
from itertools import product
import os
import pickle
import numpy as np
import pandas as pd
from tqdm import tqdm
from sklearn.preprocessing import OneHotEncoder
import sys
sys.path.insert(0, '../myfunctions')
from prediction import fit_prediction_model
#from causal_inference import infer_mediation_3mediator
from causal_inference import infer_mediation, select_estimator
if __name__=='__main__':
## load data
res = pd.read_excel('../data/hiv-brain-age.xlsx')
A = res['HIV'].values.astype(float)
res.loc[res.Sex=='M', 'Sex'] = 1
res.loc[res.Sex=='F', 'Sex'] = 0
race = OneHotEncoder(sparse=False).fit_transform(res.Race.values.astype(str).reshape(-1,1))
L = np.c_[res[['Age', 'Sex', 'Tobacco use disorder', 'Alcoholism']].values.astype(float), race]
Y = res['BAI'].values.astype(float)
Mnames = ['obesity', 'heart disorder', 'sleep disorder']
M = res[Mnames].values.astype(float)
for mm in Mnames:
print(mm, Counter(res[mm]))
Mnames.append('avg')
n_mediator = len(Mnames)
sids = np.arange(len(A))
## set up numbers
random_state = 2020
prediction_methods = ['linear', 'rf']#, 'xgb']
ci_method = 'dr'
Nbt = 1000
np.random.seed(random_state)
## generate cv split
"""
cv_path = 'patients_cv_split_real_data2.pickle'
if os.path.exists(cv_path):
with open(cv_path, 'rb') as ff:
tr_sids, te_sids = pickle.load(ff)
else:
cvf = 5
sids2 = np.array(sids)
np.random.shuffle(sids2)
tr_sids = []
te_sids = []
cv_split = np.array_split(sids2, cvf)
for cvi in cv_split:
te_sids.append(np.sort(cvi))
tr_sids.append(np.sort(np.setdiff1d(sids2, cvi)))
with open(cv_path, 'wb') as ff:
pickle.dump([tr_sids, te_sids], ff)
"""
tr_sids = [sids]
te_sids = [sids]
## bootstrapping
#"""
res = []
for bti in tqdm(range(Nbt+1)):
np.random.seed(random_state+bti)
if bti==0:
# use the actual data
Abt = A
Ybt = Y
Lbt = L
Mbt = M
sidsbt = sids
prediction_methods_outcome = prediction_methods
prediction_methods_exposure = prediction_methods
else:
# use bootstrapped data
btids = np.random.choice(len(Y), len(Y), replace=True)
Abt = A[btids]
Ybt = Y[btids]
Lbt = L[btids]
Mbt = M[btids]
sidsbt = sids[btids]
prediction_methods_outcome = [best_pm_o]
prediction_methods_exposure = [best_pm_e]
# outer loop cross validation
for cvi in range(len(tr_sids)):
trid = np.in1d(sidsbt, tr_sids[cvi])
teid = np.in1d(sidsbt, te_sids[cvi])
Atr = Abt[trid]
Ytr = Ybt[trid]
Ltr = Lbt[trid]
Mtr = Mbt[trid]
Ate = Abt[teid]
Yte = Ybt[teid]
Lte = Lbt[teid]
Mte = Mbt[teid]
Lmean = Ltr.mean(axis=0)
Lstd = Ltr.std(axis=0)
Ltr = (Ltr-Lmean)/Lstd
Lte = (Lte-Lmean)/Lstd
#try:
for pi, pm in enumerate(product(prediction_methods_outcome, prediction_methods_exposure)):
if bti==0:
print(pm)
pm_outcome, pm_exposure = pm
# fit A|L
model_a_l, model_a_l_perf = fit_prediction_model(pm_exposure+':bclf', Ltr, Atr,
random_state=random_state+pi+1000)
# fit Y|A,L,M
model_y_alm, model_y_alm_perf = fit_prediction_model(pm_outcome+':reg', np.c_[Atr, Ltr, Mtr], Ytr,
random_state=random_state+pi*3000)
model_m_als = []
model_m_al_perfs = []
for mi, mediator_name in enumerate(Mnames[:-1]):
"""
if mi==0:
# fit M1|A,L
model_m_al, model_m_al_perf = fit_prediction_model(pm+':bclf', np.c_[Atr, Ltr], Mtr[:, mi],
save_path='models_real_data2/med_model_%s_cv%d_%s'%(mediator_name, cvi+1, pm) if bti==0 else None,
random_state=random_state+pi*2000+mi)
elif mi==1:
# fit M2|A,L,M1
model_m_al, model_m_al_perf = fit_prediction_model(pm+':bclf', np.c_[Atr, Ltr, Mtr[:,0]], Mtr[:, mi],
save_path='models_real_data2/med_model_%s_cv%d_%s'%(mediator_name, cvi+1, pm) if bti==0 else None,
random_state=random_state+pi*2000+mi)
elif mi==2:
# fit M3|A,L,M1,M2
model_m_al, model_m_al_perf = fit_prediction_model(pm+':bclf', np.c_[Atr, Ltr, Mtr[:,[0,1]]], Mtr[:, mi],
save_path='models_real_data2/med_model_%s_cv%d_%s'%(mediator_name, cvi+1, pm) if bti==0 else None,
random_state=random_state+pi*2000+mi)
"""
# fit Mi|A,L
model_m_al, model_m_al_perf = fit_prediction_model(pm_exposure+':bclf', np.c_[Atr, Ltr], Mtr[:, mi],
random_state=random_state+pi*2000+mi)
model_m_als.append(model_m_al)
model_m_al_perfs.append(model_m_al_perf)
# do causal inference
#cdes, scies, cies0, cies1 = infer_mediation_3mediator(cim, model_a_l, model_m_als, model_y_alms, Yte, Mte, Ate, Lte, random_state=random_state+pi*4000+ci)
cdes, scies, cies0, cies1 = infer_mediation(ci_method, model_a_l, model_m_als, model_y_alm,
Yte, Mte, Ate, Lte, random_state=random_state+pi*4000)
# add average performance
cdes.append(np.mean(cdes))
scies.append(np.mean(scies))
cies0.append(np.mean(cies0))
cies1.append(np.mean(cies1))
model_m_al_perfs.append(np.nan)
res.append([bti, cvi, pm_outcome, pm_exposure, ci_method, model_a_l_perf, model_y_alm_perf] + model_m_al_perfs + cdes + scies + cies0 + cies1)
#print(res[-1])
#except Exception as ee:
# print(str(ee))
if bti==0:
_,_,best_pm_o, best_pm_e = select_estimator(
np.array([x[1] for x in res if x[0]==bti]),
[(x[2],x[3]) for x in res if x[0]==bti],
np.array([x[-n_mediator*4+n_mediator-1]+x[-n_mediator*3+n_mediator-1] for x in res if x[0]==bti]))
print('best prediction model: outcome: %s; exposure: %s'%(best_pm_o, best_pm_e))
res = [x for x in res if x[2]==best_pm_o and x[3]==best_pm_e]
with open('results_real_data2.pickle', 'wb') as ff:
pickle.dump([res, best_pm_o, best_pm_e], ff, protocol=2)
#"""
#with open('results_real_data2.pickle', 'rb') as ff:
# res, best_pm_o, best_pm_e = pickle.load(ff)
res = np.array(res, dtype=object)
Nbt = res[:,0].max()
perf_A_L_cols = ['perf(A|L)']
perf_Y_ALM_cols = ['perf(Y|A,L,M)']
perf_M_AL_cols = ['perf(M|A,L) %s'%x for x in Mnames]
CDE_cols = ['CDE %s'%x for x in Mnames]
sCIE_cols = ['sCIE %s'%x for x in Mnames]
CIE0_cols = ['CIE0 %s'%x for x in Mnames]
CIE1_cols = ['CIE1 %s'%x for x in Mnames]
cols = perf_A_L_cols + perf_Y_ALM_cols + perf_M_AL_cols + CDE_cols + sCIE_cols + CIE0_cols + CIE1_cols
columns = ['bt', 'fold', 'outcome_prediction_model', 'exposure_prediction_model', 'causal_inference_model'] + cols
res = | pd.DataFrame(data=res, columns=columns) | pandas.DataFrame |
"""Copyright (c) Facebook, Inc. and its affiliates."""
# pylint: disable=unused-argument,too-many-statements,unused-variable
import functools
import glob
import os
from collections import defaultdict
from pathlib import Path
from typing import List, Optional, Union
import altair as alt
import altair_saver
import numpy as np
import pandas as pd
import typer
from altair.expr import datum
from functional import pseq, seq
from pedroai.io import (
read_json,
read_jsonlines,
requires_file,
requires_files,
safe_file,
)
from pedroai.math import to_precision
from rich.console import Console
from leaderboard.config import conf
from leaderboard.data import (
IrtParsed,
LeaderboardPredictions,
load_squad_submissions,
load_squad_v2,
)
alt.data_transformers.disable_max_rows()
PAPERS_ROOT = Path(os.environ.get("PAPERS_ROOT", "./"))
AUTO_FIG = PAPERS_ROOT / "auto_fig"
COMMIT_AUTO_FIGS = PAPERS_ROOT / "commit_auto_figs"
BASE_SIZE = 150
plot_app = typer.Typer()
console = Console()
def save_chart(chart: alt.Chart, base_path: Union[str, Path], filetypes: List[str]):
if isinstance(base_path, Path):
base_path = str(base_path)
for t in filetypes:
path = base_path + "." + t
if t in ("svg", "pdf"):
method = "node"
else:
method = None
console.log(f"Saving to: {path}")
altair_saver.save(chart, safe_file(path), method=method)
def generate_ablation_files():
ablation_files = {}
for path in glob.glob("data/linear/**/**/**/report.json"):
fields = path.split("/")
irt_family = fields[2]
irt_type = fields[3]
features = fields[4]
if irt_type in ("1PL", "2PL"):
continue
ablation_files[(irt_family, irt_type, features)] = Path(path)
return ablation_files
PLOTS = {}
def register_plot(name: str):
def decorator(func):
PLOTS[name] = func
return func
return decorator
ABLATION_FILES = generate_ablation_files()
def generate_irt_files():
irt_files = {}
for model_type, evaluations in conf["irt"]["squad"]["dev"]["pyro"].items():
for eval_type in ("full", "heldout"):
irt_files[(model_type, eval_type)] = Path(evaluations[eval_type]) / "report.json"
return irt_files
IRT_FILES = generate_irt_files()
def init_score():
return {"tie": 0, "win": 0, "loss": 0}
def run_stats_tournament(fold: str):
test_results = {}
for test in ["mcnemar", "see", "sem", "student_t", "wilcoxon"]:
stats = read_json(f"data/stats/fold={fold}/sampling=random/percent=100/{test}.json")
match_results = defaultdict(init_score)
alpha = 0.01
for r in stats["results"]:
model_a = r["model_a"]
model_b = r["model_b"]
if r["pvalue"] is not None and r["pvalue"] < alpha:
if r["score_a"] > r["score_b"]:
match_results[model_a]["win"] += 1
match_results[model_b]["loss"] += 1
else:
match_results[model_a]["loss"] += 1
match_results[model_b]["win"] += 1
else:
match_results[model_a]["tie"] += 1
match_results[model_b]["tie"] += 1
test_results[test] = match_results
return test_results
@register_plot("rank_correlation_table")
@requires_file(conf["squad"]["dev_to_test"])
def rank_correlation_table(filetypes: List[str], commit: bool = False, include_test: bool = True):
irt_model = "3PL"
dev_irt_params = IrtParsed.from_irt_file(
Path(conf["irt"]["squad"]["dev"]["pyro"][irt_model]["full"]) / "parameters.json"
)
dev_predictions = LeaderboardPredictions.parse_file(
conf["squad"]["submission_predictions"]["dev"]
)
dev_id_to_subject = load_squad_submissions(dev_predictions)
console.log("N Dev IRT", len(dev_irt_params.model_stats))
stats_results = run_stats_tournament("dev")
mcnemar_results = stats_results["mcnemar"]
see_results = stats_results["see"]
student_t_results = stats_results["student_t"]
sem_results = stats_results["sem"]
if include_test:
mapping = read_json(conf["squad"]["dev_to_test"])
dev_to_test = mapping["dev_to_test"]
test_irt_params = IrtParsed.from_irt_file(
Path(conf["irt"]["squad"]["test"]["pyro"][irt_model]["full"]) / "parameters.json"
)
console.log("N Test IRT", len(test_irt_params.model_stats))
test_stats_results = run_stats_tournament("test")
test_mcnemar_results = test_stats_results["mcnemar"]
test_see_results = test_stats_results["see"]
test_student_t_results = test_stats_results["student_t"]
test_sem_results = test_stats_results["sem"]
else:
mapping = None
dev_to_test = None
test_irt_params = None
test_stats_results = None
test_mcnemar_results = None
test_see_results = None
test_student_t_results = None
test_sem_results = None
rows = []
n_test = 0
n_dev = 0
for subject_id in dev_id_to_subject.keys():
subject = dev_id_to_subject[subject_id]
entry = {
"subject_id": subject_id,
"name": subject["name"],
"dev_em": subject["dev_em"],
"test_em": subject["test_em"],
"dev_skill": dev_irt_params.model_stats[subject_id].skill,
# "dev_mcnemar": mcnemar_results[subject_id]["win"],
# "dev_see": see_results[subject_id]["win"],
# "dev_student_t": student_t_results[subject_id]["win"],
# "dev_sem": sem_results[subject_id]["win"],
}
n_dev += 1
if include_test:
if subject_id in dev_to_test:
test_subject_id = dev_to_test[subject_id]
if test_subject_id in test_irt_params.model_stats:
entry["test_skill"] = test_irt_params.model_stats[test_subject_id].skill
# entry["test_mcnemar"] = test_mcnemar_results[test_subject_id]["win"]
# entry["test_see"] = test_see_results[test_subject_id]["win"]
# entry["test_student_t"] = test_student_t_results[test_subject_id][
# "win"
# ]
# entry["test_sem"] = test_sem_results[test_subject_id]["win"]
n_test += 1
rows.append(entry)
console.log("N Dev", n_dev, "N Test", n_test)
df = pd.DataFrame(rows).dropna(axis=0)
console.log(df)
name_mapping = {
"dev_em": r"EM$_{\text{dev}}$",
"test_em": r"EM$_{\text{test}}$",
"dev_skill": r"Ability$_{\text{dev}}$",
"test_skill": r"Ability$_{\text{test}}$",
}
correlations = df.corr(method="kendall")
correlations.to_pickle("/tmp/leaderboard_correlations.pickle")
console.log(correlations)
print(
correlations.applymap(lambda n: f"${to_precision(n, 3)}$")
.rename(columns=name_mapping, index=name_mapping)
.to_latex(column_format="l" + len(name_mapping) * "r", escape=False)
)
@register_plot("sampling_stability")
def sample_stability_plot(filetypes: List[str], commit: bool = False):
input_dir = Path(conf["stability"]["sampling"])
random_df = pd.read_json(input_dir / "random_df.json")
irt_df = pd.read_json(input_dir / "irt_df.json")
info_df = pd.read_json(input_dir / "info_df.json")
method_names = {
"dev_high_disc_to_test": "High Discrimination",
"dev_high_diff_to_test": "High Difficulty",
"dev_high_disc_diff_to_test": "High Disc + Diff",
"dev_info_to_test": "High Information",
"dev_random_to_test": "Random",
}
def format_df(dataframe):
return dataframe.assign(
sampling_method=dataframe["variable"].map(lambda v: method_names[v])
)
x_scale = alt.X("trial_size", title="Development Set Sample Size", scale=alt.Scale(type="log"))
y_scale = alt.Scale(zero=False)
color_scale = alt.Color(
"sampling_method",
title="Sampling Method",
legend=alt.Legend(orient="bottom-right", fillColor="white", padding=5, strokeColor="gray"),
sort=[
"High Disc + Diff",
"High Information",
"High Discrimination",
"High Difficulty",
"Random",
],
)
random_line = (
alt.Chart(format_df(random_df))
.mark_line()
.encode(
x=x_scale,
y=alt.Y("mean(value)", scale=y_scale, title="Correlation to Test Rank"),
color=color_scale,
)
)
random_band = (
alt.Chart(format_df(random_df))
.mark_errorband(extent="ci")
.encode(x=x_scale, y=alt.Y("value", title="", scale=y_scale), color=color_scale)
)
determ_df = pd.concat([irt_df, info_df])
irt_line = (
alt.Chart(format_df(determ_df))
.mark_line()
.encode(x=x_scale, y=alt.Y("value", title="", scale=y_scale), color=color_scale)
)
font_size = 18
chart = (
(random_band + random_line + irt_line)
.configure_axis(labelFontSize=font_size, titleFontSize=font_size)
.configure_legend(
labelFontSize=font_size, titleFontSize=font_size, symbolLimit=0, labelLimit=0,
)
.configure_header(labelFontSize=font_size)
.configure(padding=0)
)
if commit:
save_chart(chart, COMMIT_AUTO_FIGS / "sampling_rank", filetypes)
else:
save_chart(chart, AUTO_FIG / "sampling_rank", filetypes)
@register_plot("cat_sampling_stability")
def cat_sample_stability_plot(filetypes: List[str], commit: bool = False):
input_dir = Path(conf["stability"]["cat_sampling"])
random_df = pd.read_json(input_dir / "random_df.json")
irt_df = pd.read_json(input_dir / "irt_df.json")
info_df = pd.read_json(input_dir / "info_df.json")
method_names = {
"dev_high_disc_to_test": "High Discrimination",
"dev_high_diff_to_test": "High Difficulty",
"dev_high_disc_diff_to_test": "High Disc + Diff",
"dev_info_to_test": "High Information",
"dev_random_to_test": "Random",
}
def format_df(dataframe):
return dataframe.assign(
sampling_method=dataframe["variable"].map(lambda v: method_names[v])
)
x_scale = alt.X("trial_size", title="Development Set Sample Size", scale=alt.Scale(type="log"))
y_scale = alt.Scale(zero=False)
color_scale = alt.Color(
"sampling_method",
title="Sampling Method",
legend=alt.Legend(orient="bottom-right", fillColor="white", padding=5, strokeColor="gray"),
sort=[
"High Information",
"High Discrimination",
"High Disc + Diff",
"High Difficulty",
"Random",
],
)
random_line = (
alt.Chart(format_df(random_df))
.mark_line()
.encode(
x=x_scale,
y=alt.Y("mean(value)", scale=y_scale, title="Correlation to Test Rank"),
color=color_scale,
)
)
random_band = (
alt.Chart(format_df(random_df))
.mark_errorband(extent="ci")
.encode(x=x_scale, y=alt.Y("value", title="", scale=y_scale), color=color_scale)
)
determ_df = pd.concat([irt_df, info_df])
irt_line = (
alt.Chart(format_df(determ_df))
.mark_line()
.encode(x=x_scale, y=alt.Y("value", title="", scale=y_scale), color=color_scale)
)
font_size = 18
chart = (
(random_band + random_line + irt_line)
.configure_axis(labelFontSize=font_size, titleFontSize=font_size)
.configure_legend(
labelFontSize=font_size, titleFontSize=font_size, symbolLimit=0, labelLimit=0,
)
.configure_header(labelFontSize=font_size)
.configure(padding=0)
)
if commit:
save_chart(chart, COMMIT_AUTO_FIGS / "cat_sampling_rank", filetypes)
else:
save_chart(chart, AUTO_FIG / "cat_sampling_rank", filetypes)
def label_experiment(label):
if label.startswith("test_"):
return "Dev Sample to Test"
else:
return "Dev Sample to Dev Sample"
def label_sig(fold: str):
if fold == "dev":
return "Dev Sample to Dev Sample"
elif fold == "test":
return "Dev Sample to Test"
else:
raise ValueError(f"Invalid fold: {fold}")
@functools.lru_cache()
def load_test_irt():
test_irt_parsed = IrtParsed.from_irt_file(
Path(conf["irt"]["squad"]["test"]["pyro"]["3PL"]["full"]) / "parameters.json"
)
test_preds = LeaderboardPredictions.parse_file(conf["squad"]["submission_predictions"]["test"])
mapping = read_json(conf["squad"]["dev_to_test"])
dev_to_test = mapping["dev_to_test"]
def get_test_irt(dev_id):
if dev_id in dev_to_test:
test_id = dev_to_test[dev_id]
if test_id in test_irt_parsed.model_stats:
return test_irt_parsed.model_stats[test_id].skill
else:
return None
else:
return None
def get_test_classical(dev_id):
if dev_id in dev_to_test:
test_id = dev_to_test[dev_id]
if test_id in test_preds.model_scores:
return test_preds.model_scores[test_id]["exact_match"]
else:
return None
else:
return None
return get_test_irt, get_test_classical
def rank_compute_bootstrap_ci(data_path: str, n_trials: int = 1000, fold: str = "dev"):
"""Given stability experiment, compute bootstrapped
confidence intervals, and check if correlations are above 95%
interval.
Args:
data_path (str): Path to dataframe stored in feather format with experiment
"""
df = pd.read_feather(data_path)
size = df["size"].iloc[0]
trial_id = df["trial_id"].iloc[0]
if fold == "test":
get_test_irt, get_test_classical = load_test_irt()
df["b_irt"] = df["subject_id"].map(get_test_irt)
df["b_classical"] = df["subject_id"].map(get_test_classical)
df = df.dropna(0)
real_corr = df.corr(method="kendall")
# Due to not implementing identifiability, IRT scores may be flipped
# Detect that and adjust as necessary
if real_corr["a_irt"].a_classical < 0:
df["a_irt"] = -df["a_irt"]
if real_corr["b_irt"].b_classical < 0:
df["b_irt"] = -df["b_irt"]
real_corr = df.corr(method="kendall")
corr_diff = real_corr["a_irt"].b_irt - real_corr["a_classical"].b_classical
a_classical_scores = df.a_classical.to_numpy()
a_irt_scores = df.a_irt.to_numpy()
indices = np.arange(0, len(a_classical_scores))
# Build up a distribution of score differences
diff_dist = []
# Simulate a bunch of times
n_subjects = len(a_classical_scores)
for _ in range(n_trials):
# Create a new similar DF, except sample with replacement one set of rankings
# Be sure to keep pairs of irt/classical scores together
sample_indices = np.random.choice(indices, n_subjects, replace=True)
sample_classical = a_classical_scores[sample_indices]
sample_irt = a_irt_scores[sample_indices]
sample_df = pd.DataFrame(
{
"subject_id": df["subject_id"],
# I'm not sure doing replacement is correct
# Also not sure if n=161 is correct, seems odd,
# but I'd be worried if I did only 20 that
# the distribution of differences might be different
"a_classical": sample_classical,
"a_irt": sample_irt,
# Keep one ranking the same
"b_classical": df["b_classical"],
"b_irt": df["b_irt"],
}
)
sample_corr = sample_df.corr(method="kendall")
# Grab correlations
irt_corr = sample_corr.loc["a_irt"].b_irt
classical_corr = sample_corr.loc["a_classical"].b_classical
# Record the difference
diff_dist.append(irt_corr - classical_corr)
diff_df = pd.DataFrame({"diff": diff_dist})
# Two tailed test, so divide by two
alpha = 1 - 0.95
lower, upper = diff_df["diff"].quantile([alpha, 1 - alpha])
# significant = bool(corr_diff < lower or upper < corr_diff)
significant = bool(upper < corr_diff)
p_value = 1 - ((diff_df["diff"] < corr_diff).sum() / n_trials)
return {
"significant": significant,
"p_value": float(p_value),
"diff": float(corr_diff),
"irt_corr": float(real_corr["a_irt"].b_irt),
"classical_corr": float(real_corr["a_classical"].b_classical),
"trial_size": int(size),
"trial_id": int(trial_id),
"lower": float(lower),
"upper": float(upper),
"alpha": alpha,
"diff_dist": diff_dist,
}
def process_trial_group(trial_size, trials):
diff_dist = []
for t in trials:
diff_dist.extend(t["diff_dist"])
diff_dist = np.array(diff_dist)
for t in trials:
p_value = 1 - (diff_dist < t["diff"]).mean()
t["total_p_value"] = p_value
yield t
def get_cached_rank_stability_sig(force: bool = False, n_trials: bool = 1000):
input_dir = Path(conf["stability"]["ranking"])
output_path = Path(conf["stability"]["ranking_sig"])
if output_path.exists() and not force:
console.log("Cached ranking stability found")
return pd.read_feather(output_path)
console.log("Cached ranking stability not found, computing...")
console.log("Computing dev results")
dev_results = (
pseq(input_dir.glob("*.feather"))
.map(lambda x: rank_compute_bootstrap_ci(x, n_trials=n_trials, fold="dev"))
.list()
)
console.log("Computing test results")
test_results = (
pseq(input_dir.glob("*.feather"))
.map(lambda x: rank_compute_bootstrap_ci(x, n_trials=n_trials, fold="test"))
.list()
)
dev_processed = (
seq(dev_results)
.group_by(lambda x: x["trial_size"])
.smap(process_trial_group)
.flatten()
.list()
)
test_processed = (
seq(test_results)
.group_by(lambda x: x["trial_size"])
.smap(process_trial_group)
.flatten()
.list()
)
dev_df = pd.DataFrame(dev_processed).drop("diff_dist", axis=1)
dev_df["fold"] = "dev"
test_df = pd.DataFrame(test_processed).drop("diff_dist", axis=1)
test_df["fold"] = "test"
df = pd.concat([dev_df, test_df]).reset_index()
df["experiment"] = df["fold"].map(label_sig)
df.to_feather(output_path)
return df
@register_plot("rank_sig")
def rank_stability_sig(filetypes: List[str], commit: bool = False, n_trials: int = 1000):
df = get_cached_rank_stability_sig(force=False, n_trials=n_trials)
font_size = 14
chart = (
# The plot gets too crowded if we include below 100, where
# we have a higher density of experiments than 1 per 100 sizes
alt.Chart(df[df["trial_size"] > 99])
.mark_boxplot(size=5)
.encode(
x=alt.X("trial_size", title="Sample Size"),
y=alt.Y("total_p_value", title="P-Value", axis=alt.Axis(tickCount=11),),
)
.properties(width=400, height=150)
.facet(alt.Column("experiment", title=""))
.configure_header(titleFontSize=font_size, labelFontSize=font_size)
)
if commit:
save_chart(chart, COMMIT_AUTO_FIGS / "ranking_stability_significance", filetypes)
else:
save_chart(chart, AUTO_FIG / "ranking_stability_significance", filetypes)
@register_plot("stability")
def rank_stability_plot(filetypes: List[str], commit: bool = False):
test_irt_parsed = IrtParsed.from_irt_file(
Path(conf["irt"]["squad"]["test"]["pyro"]["3PL"]["full"]) / "parameters.json"
)
test_preds = LeaderboardPredictions.parse_file(conf["squad"]["submission_predictions"]["test"])
mapping = read_json(conf["squad"]["dev_to_test"])
dev_to_test = mapping["dev_to_test"]
df = create_rank_stability_df(
dev_to_test=dev_to_test, test_preds=test_preds, test_irt_parsed=test_irt_parsed
)
names = {
"abs_irt_corr": "IRT to IRT",
"classical_corr": "Acc to Acc",
"test_classical_sample_classical_corr": "Acc to Acc",
"test_classical_sample_irt_corr": "IRT to Acc",
"test_irt_sample_classical_corr": "Acc to IRT",
"test_irt_sample_irt_corr": "IRT to IRT",
}
color_order = ["IRT to IRT", "Acc to Acc", "IRT to Acc", "Acc to IRT"]
melt_df = df.drop(columns=["irt_corr"]).melt(id_vars=["trial_size", "trial_id"]).dropna(axis=0)
excluded = ["IRT to Acc", "Acc to IRT"]
console.log(melt_df.head())
melt_df["correlation"] = melt_df["variable"].map(lambda v: names[v])
melt_df = melt_df[melt_df["correlation"].map(lambda v: v not in excluded)]
melt_df["experiment"] = melt_df["variable"].map(label_experiment)
base = alt.Chart(melt_df).encode(
x=alt.X(
"trial_size",
title="Development Set Sample Size",
scale=alt.Scale(type="log", base=2, domain=[16, 6000]),
),
color=alt.Color(
"correlation",
title="Correlation",
scale=alt.Scale(scheme="category10"),
sort=color_order,
legend=alt.Legend(
symbolOpacity=1,
symbolType="circle",
symbolStrokeWidth=3,
orient="none",
legendX=570,
legendY=105,
fillColor="white",
strokeColor="gray",
padding=5,
),
),
)
y_title = "Kendall Rank Correlation"
line = base.mark_line(opacity=0.7).encode(
y=alt.Y("mean(value):Q", scale=alt.Scale(zero=False), title=y_title),
)
band = base.mark_errorband(extent="ci").encode(
y=alt.Y("value", title=y_title, scale=alt.Scale(zero=False)),
color=alt.Color("correlation", sort=color_order),
)
font_size = 14
chart = (
(band + line)
.properties(width=300, height=170)
.facet(alt.Column("experiment", title=""))
.configure_header(titleFontSize=font_size, labelFontSize=font_size)
.resolve_axis(y="independent")
.configure(padding=0)
)
if commit:
save_chart(chart, COMMIT_AUTO_FIGS / "stability_simulation_corr", filetypes)
else:
save_chart(chart, AUTO_FIG / "stability_simulation_corr", filetypes)
def create_rank_stability_df(*, test_irt_parsed, dev_to_test, test_preds):
def get_test_irt(dev_id):
if dev_id in dev_to_test:
test_id = dev_to_test[dev_id]
if test_id in test_irt_parsed.model_stats:
return test_irt_parsed.model_stats[test_id].skill
else:
return None
else:
return None
def get_test_classical(dev_id):
if dev_id in dev_to_test:
test_id = dev_to_test[dev_id]
if test_id in test_preds.model_scores:
return test_preds.model_scores[test_id]["exact_match"]
else:
return None
else:
return None
rows = []
trials = {}
input_dir = Path(conf["stability"]["ranking"])
for path in input_dir.glob("*.feather"):
exp_df = pd.read_feather(path)
exp_df["abs_a_irt"] = exp_df["a_irt"].abs()
exp_df["abs_b_irt"] = exp_df["b_irt"].abs()
exp_df["test_classical"] = exp_df["subject_id"].map(get_test_classical)
exp_df["test_irt"] = exp_df["subject_id"].map(get_test_irt)
# Drop the rows missing test data
exp_df = exp_df.dropna(0)
size = exp_df.iloc[0]["size"]
trial_id = exp_df.iloc[0].trial_id
trials[(size, trial_id)] = exp_df
corr = exp_df.corr(method="kendall")
rows.append(
{
"trial_size": size,
"trial_id": trial_id,
"irt_corr": corr.loc["a_irt"].b_irt,
"classical_corr": corr.loc["a_classical"].b_classical,
"test_irt_sample_irt_corr": abs(corr.loc["test_irt"].a_irt),
"test_irt_sample_classical_corr": abs(corr.loc["test_irt"].a_classical),
"test_classical_sample_irt_corr": abs(corr.loc["test_classical"].a_irt),
"test_classical_sample_classical_corr": abs(corr.loc["test_classical"].a_classical),
}
)
rows.append(
{
"trial_size": size,
"trial_id": trial_id,
"irt_corr": None,
"classical_corr": None,
"test_irt_sample_irt_corr": abs(corr.loc["test_irt"].b_irt),
"test_irt_sample_classical_corr": abs(corr.loc["test_irt"].b_classical),
"test_classical_sample_irt_corr": abs(corr.loc["test_classical"].b_irt),
"test_classical_sample_classical_corr": abs(corr.loc["test_classical"].b_classical),
}
)
df = pd.DataFrame(rows)
df["abs_irt_corr"] = df["irt_corr"].abs()
return df
# unregistering b/c the data doesn't seem to exist, and
# the commit flag wasn't there -- assuming it's an old plot?
# @register_plot("irt_correlation")
def irt_correlation_table(
filetypes: List[str],
include_multidim: bool = True,
method: str = "kendall",
commit: bool = False,
):
irt_3pl = IrtParsed.from_irt_file("data/irt/squad/dev/pyro/3PL_full/parameters.json")
irt_2pl = IrtParsed.from_irt_file("data/irt/squad/dev/pyro/2PL_full/parameters.json")
irt_1pl = IrtParsed.from_irt_file("data/irt/squad/dev/pyro/1PL_full/parameters.json")
models = [irt_3pl, irt_2pl, irt_1pl]
if include_multidim:
multidim = IrtParsed.from_multidim_1d_file(
"data/multidim_irt/squad/dev/dim=1/models.jsonlines",
"data/multidim_irt/squad/dev/dim=1/items.jsonlines",
)
models.append(multidim)
subject_rows = []
for sid in irt_3pl.model_ids:
entry = {"subject_id": sid}
for irt_model in models:
entry[irt_model.irt_model.value] = irt_model.model_stats[sid].skill
subject_rows.append(entry)
subject_df = pd.DataFrame(subject_rows)
console.log("Method:", method)
subject_corr = subject_df.corr(method=method)
console.log("Subject Correlations")
console.log(subject_corr)
names = {"pyro_3pl": "3PL", "pyro_2pl": "2PL", "pyro_1pl": "1PL"}
print(
subject_corr.applymap(lambda n: f"${to_precision(n, 3)}$")
.rename(columns=names, index=names)
.to_latex(column_format="l" + len(names) * "r", escape=False)
)
item_rows = []
for item_id in irt_3pl.example_ids:
entry = {"item_id": item_id}
for irt_model in models:
entry[irt_model.irt_model.value] = irt_model.example_stats[item_id].diff
item_rows.append(entry)
item_df = pd.DataFrame(item_rows)
item_corr = item_df.corr(method=method)
console.log("Item Correlations")
console.log(item_corr)
print(
item_corr.applymap(lambda n: f"${to_precision(n, 3)}$")
.rename(columns=names, index=names)
.to_latex(column_format="l" + len(names) * "r", escape=False)
)
def create_subject_df(*, dev_id_to_subject, dev_irt_params, dev_to_test, test_irt_params):
subject_rows = []
id_to_subject_stats = {}
for subject_id in dev_id_to_subject.keys():
subject = dev_id_to_subject[subject_id]
entry = {
"subject_id": subject_id,
"name": subject["name"],
"dev_em": subject["dev_em"],
"test_em": subject["test_em"],
"dev_skill": dev_irt_params.model_stats[subject_id].skill,
}
if subject_id in dev_to_test:
test_subject_id = dev_to_test[subject_id]
if test_subject_id in test_irt_params.model_stats:
entry["test_skill"] = test_irt_params.model_stats[test_subject_id].skill
subject_rows.append(entry)
id_to_subject_stats[subject_id] = entry
subject_df = pd.DataFrame(subject_rows)
subject_df["overfit"] = subject_df["test_em"] - subject_df["dev_em"]
return subject_df, id_to_subject_stats
def create_item_df(dev_irt_params):
item_rows = []
for item_id, item_stats in dev_irt_params.example_stats.items():
item_rows.append({"item_id": item_id, "disc": item_stats.disc, "diff": item_stats.diff})
all_item_df = | pd.DataFrame(item_rows) | pandas.DataFrame |
#%%
import pandas as pd
import numpy as np
from datetime import datetime
import os
import pickle
import matplotlib.pyplot as plt
exec(open('../env_vars.py').read())
dir_data = os.environ['dir_data']
dir_picklejar = os.environ['dir_picklejar']
dir_code_methods = os.environ['dir_code_methods']
#%%
###############################################################################
# Dictionaries for latent variable models
###############################################################################
filename = os.path.join(os.path.realpath(dir_picklejar), 'dict_selfreport')
infile = open(filename,'rb')
dict_selfreport = pickle.load(infile)
infile.close()
filename = os.path.join(os.path.realpath(dir_picklejar), 'dict_random_ema')
infile = open(filename,'rb')
dict_random_ema = pickle.load(infile)
infile.close()
filename = os.path.join(os.path.realpath(dir_picklejar), 'dict_puffmarker')
infile = open(filename,'rb')
dict_puffmarker = pickle.load(infile)
infile.close()
#%%
###############################################################################
# Create a data frame with records of start & end of day timestamps
# for each participant-day
###############################################################################
# output of this script is the data frame data_day_limits
exec(open(os.path.join(os.path.realpath(dir_code_methods), 'setup-day-limits.py')).read())
data_reference = data_day_limits.loc[:,['participant_id','study_day']].groupby('participant_id').count().reset_index()
data_reference = data_reference.rename(columns = {'study_day':'max_study_day'})
# SANITY CHECK
#data_reference['max_study_day'].value_counts() # this is equal to 14
#%%
###############################################################################
# Knit together various data streams
###############################################################################
all_participant_id = data_hq_episodes['id'].drop_duplicates()
all_participant_id.index = np.array(range(0,len(all_participant_id.index)))
all_dict = {}
# %%
for i in range(0, len(all_participant_id)):
current_participant = all_participant_id[i]
current_dict = {}
for j in range(1, 15):
this_study_day = j
# Lets work with selfeport first ##########################################
current_dict_selfreport = dict_selfreport[current_participant][j]
if len(current_dict_selfreport['hours_since_start_day'])==0:
tmp_selfreport = pd.DataFrame({})
else:
tmp_selfreport = pd.DataFrame({'assessment_type':'selfreport',
'hours_since_start_day': current_dict_selfreport['hours_since_start_day'],
'smoke': 'Yes',
'when_smoke': current_dict_selfreport['message'],
'delta': current_dict_selfreport['delta']
})
# Now let's work with Random EMA ##########################################
current_dict_random_ema = dict_random_ema[current_participant][j]
if len(current_dict_random_ema['hours_since_start_day'])==0:
tmp_random_ema = pd.DataFrame({})
else:
tmp_random_ema = pd.DataFrame({'assessment_type':'random_ema',
'hours_since_start_day': current_dict_random_ema['hours_since_start_day'],
'smoke': current_dict_random_ema['smoke'],
'when_smoke': current_dict_random_ema['when_smoke'],
'delta': current_dict_random_ema['delta']
})
# Now, let's concatanate ##################################################
frames = [tmp_selfreport, tmp_random_ema]
result = pd.concat(frames)
if len(result.index) > 0:
# important step to sort according to hours_since_start_day
result.sort_values(by=['hours_since_start_day'], inplace=True)
result['hours_since_start_day_shifted'] = result['hours_since_start_day'].shift(periods=+1)
result['hours_since_start_day_shifted'] = np.where(pd.isna(result['hours_since_start_day_shifted']), 0, result['hours_since_start_day_shifted'])
result['time_between'] = result['hours_since_start_day'] - result['hours_since_start_day_shifted']
# Let's create a time variable that depends on the value of 'smoke' #######
result['delta'] = np.where(np.logical_and(result['assessment_type']=='selfreport', result['when_smoke']==4), result['time_between']/2, result['delta'])
result['delta'] = np.where(np.logical_and(result['assessment_type']=='random_ema', result['when_smoke']==6), result['time_between']/2, result['delta'])
result['puff_time'] = np.where(result['smoke']=='Yes', result['hours_since_start_day']-result['delta'], np.nan)
# Rearrange columns #######################################################
#result = result.loc[:, ['assessment_type', 'smoke','hours_since_start_day_shifted','hours_since_start_day','time_between','puff_time']]
# Combine information into a dictionary ###################################
new_dict = {this_study_day: result}
current_dict.update(new_dict)
# Update participant ########################################################
all_dict.update({current_participant:current_dict})
#%%
filename = os.path.join(os.path.realpath(dir_picklejar), 'dict_knitted')
outfile = open(filename, 'wb')
pickle.dump(all_dict, outfile)
outfile.close()
###############################################################################
# Do checks on dict_knitted
###############################################################################
dict_knitted = all_dict
#%%
all_participant_id = data_hq_episodes['id'].drop_duplicates()
all_participant_id.index = np.array(range(0,len(all_participant_id.index)))
total_count_flagged = 0
total_count_rows = 0
#%%
for i in range(0, len(all_participant_id)):
current_participant = all_participant_id[i]
for j in range(1, 15):
this_study_day = j
dat = dict_knitted[current_participant][this_study_day]
if len(dat.index)>0:
dat['flag'] = np.where(dat['puff_time'] < dat['hours_since_start_day_shifted'], 1, 0)
dat['flag'] = np.where( | pd.isna(dat['puff_time']) | pandas.isna |
import pickle
import pandas as pd
from matplotlib import pyplot as plt
import sagemaker
import boto3
import s3fs
import json
import datetime
from tzlocal import get_localzone as tzlocal
import numpy as np
import datetime
from tzlocal import get_localzone as tzlocal
import boto3
from tqdm import tqdm_notebook as tqdm
import os
from os import path
import json
from deep_ar import series_to_jsonline
bmw_bucket_name = os.environ['BMW_DATA_BUCKET'] #'fog-bigdata-bmw-data'
data_bucket_name = os.environ['SANITIZED_DATA_BUCKET'] #'fog-datasets'
data_freq = os.environ['DATA_FREQUENCY'] #'5min'
s3_con = boto3.client('s3')
obj_list = s3_con.list_objects(Bucket=bmw_bucket_name,Prefix='metrics2/output')['Contents']
file_names = [key['Key'] for key in obj_list]
data = []
for file_name in tqdm(file_names):
# Select file with the correct extension
if not file_name.endswith('output.json'):
continue
file_str = s3_con.get_object(Bucket=bmw_bucket_name, Key=file_name).get('Body').read().decode('utf-8')
batch = eval(file_str)
# Aggregates response code into a unique time series
for code in ['response-code-200','response-code-4xx','response-code-5xx']:
if code not in batch.keys():
continue
data = data + batch[code]['Datapoints']
# Creates a pandas Dataframe from data
df = pd.DataFrame(data)
df.index = [i.replace(tzinfo=None) for i in pd.to_datetime(df.Timestamp)]
df = df.drop(columns=['Unit'])
df = df.groupby('Timestamp').max()
series = pd.Series(data=df.SampleCount.values, index=[i.replace(tzinfo=None) for i in pd.to_datetime(df.index)])
series = series.sort_index()
#series = series[series.index < datetime.datetime(2019,1,26,0,0,0)]
series = series.groupby([pd.Grouper(freq=data_freq)]).sum()
# Apply a running mean of the previous 15 minutes to each datapoint -> smoothing to remove anomalies and have a clean training set
n_backsteps = 5
conv = np.hstack([np.ones(n_backsteps)/n_backsteps,np.zeros(n_backsteps-1)])
pad_vals = np.pad(series.values,n_backsteps-1,mode='edge')
series = pd.Series(data=np.convolve(pad_vals,conv,mode='valid'),index=series.index)
# Scale down a part of the data (that was incorrectly scaled, for unknown reasons)
series[np.logical_and(series.index >= pd.Timestamp(2019,1,26),series.index < pd.Timestamp(2019,1,31,8,55))] /= 2
test_idx = np.logical_and(series.index > datetime.datetime(2019,1,28,0,0,0), series.index <= datetime.datetime(2019,2,4,0,0,0))
train_idx = series.index <= datetime.datetime(2019,1,28,0,0,0)
# Upload RCF-shaped data
print("Uploading RCF-shaped data")
prefix = 'rcf'
s3_data_path = "{}/{}/data".format(data_bucket_name, prefix)
s3filesystem = s3fs.S3FileSystem()
with s3filesystem.open(s3_data_path + "/train/data.csv", 'w') as fp:
fp.write(series[train_idx].to_csv())
with s3filesystem.open(s3_data_path + "/test/data.csv", 'w') as fp:
fp.write(series[test_idx].to_csv())
# Upload DeepAR-shaped data
print("Uploading DeepAR-shaped data")
# Create feature series of holidays
end_of_holiday = datetime.date(2019, 1, 7)
holidays_data = [1 if time < | pd.Timestamp(end_of_holiday,tz=None) | pandas.Timestamp |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilitites to preprocess SEER and SUPPORT datasets.
This module has utility functions to load and preprocess the SEER and SUPPORT
datasets for learning and evaluation of Survival Analysis methods. Preprocessing
involves standard scaling of the features, excluding the protected attribute,
imputing the missing values (for SEER) and removing outliers.
Not designed to be called directly, would be called when running a function from
fair_survival_analysis.fair_survival_analysis
"""
import numpy as np
import pandas as pd
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
def preprocess_support(data, prot='sex', fair_strategy=None):
"""Function to preprocess a loaded SUPPORT dataset.
Imputation and standard rescaling is performed.
Args:
data:
pandas dataframe of the loaded SUPPORT csv.
prot:
a string to idetify the name of the column containing the protected
attributes.
fair_strategy:
Fairness strategy to be incorporated while preprocessing. Can be one of
None, "unawaereness" or "coupled".
Returns:
a tuple of the shape of (x, t, e, a). Where 'x' are the features, t is the
event (or censoring) times, e is the censoring indicator and a is a vector
of the protected attributes.
"""
x1 = data[[
'age', 'num.co', 'meanbp', 'wblc', 'hrt', 'resp', 'temp', 'pafi', 'alb',
'bili', 'crea', 'sod', 'ph', 'glucose', 'bun', 'urine', 'adlp', 'adls'
]]
catfeats = ['sex', 'dzgroup', 'dzclass', 'income', 'race', 'ca']
if fair_strategy in ['unawareness', 'coupled']:
print('Note: For ' + fair_strategy, ' the protected feature is removed...')
catfeats = set(catfeats)
catfeats = list(catfeats - set([prot]))
x2 = | pd.get_dummies(data[catfeats]) | pandas.get_dummies |
#!/usr/bin/env python
# coding: utf-8
# %%
import os
import re
from numpy import random
import pandas as pd
import networkx as nx
import numpy as np
from pathlib import Path
from datetime import datetime
from .utils import (keydefaultdict, plot_pandas_categorical_features, mkdir_ifnotexist, load_pickle, save_pickle)
from .mlutils import bucketize_into_bins, get_discrete_labels
from .utils import attrdict
from scipy.io.arff import loadarff
pd.options.display.max_columns = 50
# %%
PACKAGE_DATA_FOLDER = os.path.abspath(os.path.join(Path(__file__).parent, 'data'))
# KAGGLE Message
KAGGLE_LINK = {
'lendingclub': 'wordsforthewise/lending-club',
}
def kaggle_dataset(folder):
mkdir_ifnotexist(folder)
if len(os.listdir(folder)) > 0:
return True
else:
# Get the dataset short name (folder name)
dataset = os.path.basename(os.path.dirname(os.path.join(folder, '.placeholder')))
if dataset in KAGGLE_LINK:
dataset_link = KAGGLE_LINK[dataset]
else:
raise ValueError(f'No link for this Kaggle dataset ({dataset}).')
# Print the message to download
print(f"""
This dataset is not directly available in this library. It will be downloaded using kaggle.
Please make sure that ~/.kaggle/kaggle.json is available and correctly configured.
To make the dataset available through this library please download it using:
kaggle datasets download --unzip -d {dataset_link} -p {folder}
- To install kaggle use `pip install kaggle`
- If you are behind a proxy you must set it using: `kaggle config set -n proxy -v http://yourproxy.com:port`
- If you wish to download the dataset to another location other than the library location please pass an appropriate `folder` argument.
""")
return False
# %%
def load_lendingclub(
folder=os.path.join(PACKAGE_DATA_FOLDER, 'lendingclub'),
load_preprocessed=True,
cleaning_type='ax',
random_state=2020,
):
random_state = np.random.RandomState(random_state)
def target_clean(df):
# Non-completed loans
df = df[df['loan_status'] != 'Current']
df = df[df['loan_status'] != 'In Grace Period']
# The taget must not be NaN
df = df.dropna(how='any', subset=['loan_status'])
# Recode targets
df['loan_status'] = df.loan_status.map({
'Fully Paid': 0,
'Charged Off': 1,
'Late (31-120 days)': 1,
'Late (16-30 days)': 1,
'Does not meet the credit policy. Status:Fully Paid': 0,
'Does not meet the credit policy. Status:Charged Off': 1,
'Default': 1
})
return df.reset_index(drop=True).copy()
def basic_cleaning(df):
# Drop columns with NaN more than 90%
drop_cols = df.columns[df.isnull().mean() > 0.9]
df = df.drop(drop_cols, axis=1)
# Drop records with more than 50% of NaN features
df = df[(df.isnull().mean(axis=1) < .5)]
df['verification_status'] = df.verification_status.map({'Verified': 0, 'Source Verified': 1, 'Not Verified': 2})
df['debt_settlement_flag'] = df.debt_settlement_flag.map({'N': 0, 'Y': 1})
df['initial_list_status'] = df.initial_list_status.map({'w': 0, 'f': 1})
df['application_type'] = df.application_type.map({'Individual': 0, 'Joint App': 1})
df['hardship_flag'] = df.hardship_flag.map({'N': 0, 'Y': 1})
df['pymnt_plan'] = df.pymnt_plan.map({'n': 0, 'y': 1})
df['disbursement_method'] = df.disbursement_method.map({'Cash': 0, 'DirectPay': 1})
df['term'] = df.term.map({' 36 months': 0, ' 60 months': 1})
df['grade'] = df['grade'].map({v: i for i, v in enumerate(np.sort(df['grade'].unique()))})
df['sub_grade'] = df['sub_grade'].map({v: i for i, v in enumerate(np.sort(df['sub_grade'].unique()))})
df['emp_length'] = df['emp_length'].apply(lambda x: x.replace('year', '').replace('s', '').replace('+', '').
replace('< 1', '0') if isinstance(x, str) else '-1').astype(int)
df['earliest_cr_line'] = df['earliest_cr_line'].apply(lambda x: int(x[-4:]))
df['issue_d'] = pd.to_datetime(df['issue_d'])
# Get rid of few customers with no home
df = df[df['home_ownership'].apply(lambda x: x in ['OWN', 'RENT', 'MORTGAGE'])]
df['home_ownership'] = df.home_ownership.map({'MORTGAGE': 0, 'OWN': 1, 'RENT': 2})
return df.reset_index(drop=True).copy()
def ax_cleaning(df):
COLUMNS = ['loan_status', 'issue_d'] + sorted([
'loan_amnt', 'term', 'int_rate', 'installment', 'grade', 'sub_grade', 'emp_length', 'home_ownership',
'annual_inc', 'verification_status', 'dti', 'earliest_cr_line', 'open_acc', 'pub_rec', 'revol_bal',
'revol_util', 'total_acc', 'application_type', 'mort_acc', 'pub_rec_bankruptcies'
])
feature = 'dti'
filtercol = (df[feature] < 0)
df[feature][filtercol] = random_state.normal(24.5, .5, size=int((filtercol).sum()))
filtercol = (df[feature].isnull())
df[feature][filtercol] = -1
feature = 'pub_rec_bankruptcies'
filtercol = (df[feature].isnull())
df[feature][filtercol] = df[feature].median()
feature = 'mort_acc'
filtercol = (df[feature].isnull())
df[feature][filtercol] = 52
feature = 'revol_util'
filtercol = (df[feature].isnull())
df[feature][filtercol] = random_state.normal(82.5, 3, size=int((filtercol).sum()))
return df[COLUMNS].reset_index(drop=True).copy()
if kaggle_dataset(folder) is False:
return None
PREPROCESSED_FILENAME = os.path.join(folder, f'preprocessed_{cleaning_type}.pkl')
if load_preprocessed and os.path.exists(PREPROCESSED_FILENAME):
return load_pickle(PREPROCESSED_FILENAME)
else:
df = pd.read_csv(os.path.join(folder, 'accepted_2007_to_2018q4.csv/accepted_2007_to_2018Q4.csv'))
df = target_clean(df)
df = basic_cleaning(df)
if cleaning_type == 'ax':
df = ax_cleaning(df)
else:
raise ValueError('Invalid cleaning type specified.')
dataset = attrdict(data=df.reset_index(drop=True),
class_names=['Good', 'Bad'],
target_name='loan_status',
split_date='issue_d')
save_pickle(dataset, PREPROCESSED_FILENAME)
return dataset
# %%
# def load_toy_financial():
# """
# ____ _ _ _ ____ _ _ ____ ____ ___ ____ _ _
# |___ | |\ | |__| |\ | | |___ | | | \_/
# | | | \| | | | \| |___ |___ | |__| |
# """
# FEATURES = W, D, H, N = ['Warning', 'Devaluation', 'House Crash', 'Negative News']
# CLASSES = C, E = ['Drop Consumer Confidence', 'External Problem']
# data = pd.DataFrame(
# [
# [0, 0, 0, 0, 0, 0],
# [0, 0, 1, 0, 1, 1],
# [0, 1, 0, 0, 1, 1],
# [0, 1, 1, 0, 1, 1],
# [1, 0, 0, 0, 0, 0],
# [1, 0, 1, 0, 0, 0],
# [1, 1, 0, 0, 1, 1],
# [1, 1, 1, 0, 1, 1],
# [0, 0, 0, 1, 1, 0],
# [0, 0, 1, 1, 1, 0],
# [0, 1, 0, 1, 1, 0],
# [0, 1, 1, 1, 1, 0],
# [1, 0, 0, 1, 1, 1],
# [1, 0, 1, 1, 1, 1],
# [1, 1, 0, 1, 1, 1],
# [1, 1, 1, 1, 1, 1],
# ],
# columns=FEATURES + CLASSES
# )
# for col in data:
# data[col] = data[col].apply(lambda x: '+' if x == 1 else '-')
# # Create Bayesian Network
# net = nx.DiGraph()
# for node in FEATURES + CLASSES:
# net.add_node(node)
# for a, b in [(E, C), (E, W), (C, D), (C, H), (C, N), (D, H)]:
# net.add_edge(a, b)
# return data, CLASSES, net
# %%
def load_adult(folder=os.path.join(PACKAGE_DATA_FOLDER, 'adult')):
"""
____ ___ _ _ _ ___
|__| | \ | | | |
| | |__/ |__| |___ |
"""
with open(os.path.join(folder, "adult.names")) as f:
names = [line.split(':')[0] for line in f.readlines()[-15:] if ":" in line]
df = pd.read_csv(os.path.join(folder, "adult.data"), header=None, names=names + ['class'], index_col=False)
return df
# %%
def load_heloc(folder=os.path.join(PACKAGE_DATA_FOLDER, 'heloc'), random_state=2020, cleaning_type='default_clean'):
""""
_ _ ____ _ ____ ____
|__| |___ | | | |
| | |___ |___ |__| |___
Description of the dataset can be found here: http://dukedatasciencefico.cs.duke.edu/models/
"""
random_state = np.random.RandomState(random_state)
def clean_heloc(data):
data['RiskPerformance'] = data['RiskPerformance'].apply(lambda x: 1 if 'Bad' in x else 0)
all_special_mask = np.all(
((data.drop(columns=['RiskPerformance']) <= -7) & (data.drop(columns=['RiskPerformance']) >= -9)).values,
axis=1)
data = data[~all_special_mask]
data = data[np.sort(data.columns.values)]
data['ExternalRiskEstimate'][data['ExternalRiskEstimate'] < 0] = data['ExternalRiskEstimate'].median()
data['MSinceMostRecentDelq'][data['MSinceMostRecentDelq'] < 0] = \
random_state.uniform(30, 80, int((data['MSinceMostRecentDelq'] < 0).sum())).astype(int)
data['MSinceMostRecentInqexcl7days'][data['MSinceMostRecentInqexcl7days'] == -7] = -1
data['MSinceMostRecentInqexcl7days'][data['MSinceMostRecentInqexcl7days'] == -8] = 25
data['MSinceOldestTradeOpen'][data['MSinceOldestTradeOpen'] == -8] = random_state.uniform(
145, 165, int((data['MSinceOldestTradeOpen'] == -8).sum())).astype(int)
data['NetFractionInstallBurden'][data['NetFractionInstallBurden'] == -8] = random_state.normal(
data['NetFractionInstallBurden'].median(), 7.5, size=int(
(data['NetFractionInstallBurden'] == -8).sum())).astype(int)
data['NetFractionRevolvingBurden'][data['NetFractionRevolvingBurden'] == -8] = random_state.normal(
75, 5, size=int((data['NetFractionRevolvingBurden'] == -8).sum())).astype(int)
data['NumInstallTradesWBalance'][data['NumInstallTradesWBalance'] == -8] = 0
data['NumRevolvingTradesWBalance'][data['NumRevolvingTradesWBalance'] == -8] = random_state.normal(
13, 1, size=int((data['NumRevolvingTradesWBalance'] == -8).sum())).astype(int)
data['NumBank2NatlTradesWHighUtilization'][data['NumBank2NatlTradesWHighUtilization'] == -8] = 20
data['PercentTradesWBalance'][data['PercentTradesWBalance'] == -8] = data['PercentTradesWBalance'].median()
return data.reset_index(drop=True).copy()
df = pd.read_csv(os.path.join(folder, 'HELOC.csv'))
if 'default_clean' in cleaning_type:
df = clean_heloc(df)
return attrdict(
data=df,
target_name='RiskPerformance',
class_names=['Good', 'Bad'],
)
# %%
def load_fico(main_data_path,
folder='fico',
order='natural',
original=False,
BINS_STRATEGY=None,
N_BINS=None,
NB_BINS=None):
"""
____ _ ____ ____
|___ | | | |
| | |___ |__|
"""
if original:
df = pd.read_csv(os.path.join(main_data_path, folder, 'heloc_dataset_v1.csv'))
label = 'RiskPerformance'
df_string = df.copy()
for c, col in enumerate(df_string.columns.values):
if df_string.dtypes[col] != 'object' and col != label: # Do not bucketize categorical feature
df_string[col], col_labels = bucketize_into_bins(df_string[col],
N_BINS,
retlabels=True,
strategy=BINS_STRATEGY)
df_string[col] = df_string[col].apply(lambda x: col_labels[x])
df_string[label] = pd.get_dummies(df_string[label])[['Bad']]
decoder = None
else:
df = pd.read_csv(os.path.join(main_data_path, folder, 'WOE_Rud_data.csv'))
keys = pd.read_csv(os.path.join(main_data_path, folder, 'keys.csv'))
y = pd.read_csv(os.path.join(main_data_path, folder, 'y_data.csv'))
label = 'RiskPerformance'
df.insert(loc=0, column=label, value=pd.get_dummies(y[label])[['Bad']])
df_string = df.copy()
if order == 'natural':
decoder = {}
decoder[label] = {i: val for i, val in enumerate(['0', '1'])}
for col in keys['feature'].unique():
# key = keys['sort_val'][keys['feature']==col]
vals = round(keys['value'][keys['feature'] == col], 3).unique().astype(str)
decoder[col] = {i: val for i, val in enumerate(list(vals))}
df_string = round(df_string, 3).astype(str)
else:
df_string = df_string - df_string.min()
df_string = df_string.astype(str)
decoder = None
return df_string, label, decoder
def load_water(main_data_path, folder='water_quality', complexity='hard'):
"""
_ _ _ ____ ___ ____ ____ ____ _ _ ____ _ _ ___ _ _
| | | |__| | |___ |__/ | | | | |__| | | | \_/
|_|_| | | | |___ | \ |_\| |__| | | |___ | | |
"""
raw_data = loadarff(os.path.join(main_data_path, folder, 'water-quality-nom.arff'))
df = pd.DataFrame(raw_data[0])
for f in df.columns.values:
df[f] = df[f].astype(int)
labels = df.columns.values[-14:]
for col in labels:
df[col] = df[col].apply(lambda x: get_discrete_labels(2, 'binary')[x])
def bucketize_and_apply_labels(col, *args, **kargs):
df[col], col_values, col_labels = bucketize_into_bins(df[col].values,
*args,
retlabels=True,
retbins=True,
strategy='uniform',
label_type='generic',
**kargs)
df[col] = df[col].apply(lambda x: col_labels[x])
print(col, col_values)
if complexity == 'hard':
for col in ['std_temp', 'std_pH', 'o2', 'o2sat', 'sio2']:
bucketize_and_apply_labels(col, 3)
for col in ['conduct', 'hardness']:
df[col] = (df[col] >= 2) * 1 + (df[col] >= 3) * 1 + (df[col] >= 4) * 1 + (df[col] >= 5) * 1
bucketize_and_apply_labels(col, 5)
for col in ['no3']:
df[col] = (df[col] >= 1) * 1 + (df[col] >= 2) * 1 + (df[col] >= 3) * 1
bucketize_and_apply_labels(col, 4)
for col in ['co2', 'no2', 'nh4', 'po4', 'cl', 'kmno4', 'k2cr2o7', 'bod']:
df[col] = (df[col] >= 1) * 1
bucketize_and_apply_labels(col, 2)
elif complexity == 'medium':
for col in ['std_temp', 'std_pH', 'o2', 'o2sat']:
bucketize_and_apply_labels(col, 3)
for col in ['conduct']:
df[col] = (df[col] >= 2) * 1 + (df[col] >= 4) * 1
bucketize_and_apply_labels(col, 3)
for col in ['hardness']:
df[col] = (df[col] >= 3) * 1 + (df[col] >= 5) * 1
bucketize_and_apply_labels(col, 3)
for col in ['no3', 'sio2']:
df[col] = (df[col] >= 2) * 1
bucketize_and_apply_labels(col, 2)
for col in ['co2', 'no2', 'nh4', 'po4', 'cl', 'kmno4', 'k2cr2o7', 'bod']:
df[col] = (df[col] >= 1) * 1
bucketize_and_apply_labels(col, 2)
elif complexity == 'easy':
for col in ['std_temp', 'std_pH', 'o2', 'o2sat']:
bucketize_and_apply_labels(col, 2)
for col in ['conduct']:
df[col] = (df[col] >= 4) * 1 # + (df[col] >= 4) * 1
bucketize_and_apply_labels(col, 2)
for col in ['hardness']:
df[col] = (df[col] >= 5) * 1 # + (df[col] >= 5) * 1
bucketize_and_apply_labels(col, 2)
for col in ['no3', 'sio2']:
df[col] = (df[col] >= 2) * 1
bucketize_and_apply_labels(col, 2)
for col in ['co2', 'no2', 'nh4', 'po4', 'cl', 'kmno4', 'k2cr2o7', 'bod']:
df[col] = (df[col] >= 1) * 1
bucketize_and_apply_labels(col, 2)
return df, labels
# %%
def load_california(main_data_path, BINS, BINS_STRATEGY, NB_BINS, folder="california"):
"""
____ ____ _ _ ____ ____ ____ _ _ _ ____
| |__| | | |___ | | |__/ |\ | | |__|
|___ | | |___ | | |__| | \ | \| | | |
"""
df = pd.read_csv(os.path.join(main_data_path, folder, 'housing_with_feature_engineering.csv'))
LABEL = 'median_house_value'
for c, col in enumerate(df.columns.values):
if df.dtypes[col] != 'object' and col != LABEL: # Do not bucketize categorical feature
df[col], col_labels = bucketize_into_bins(
df[col],
BINS[c], # args.NB_BINS,
retlabels=True,
strategy=BINS_STRATEGY,
label_type='generic')
df[col] = df[col].apply(lambda x: col_labels[x])
df[LABEL], col_labels = bucketize_into_bins(df[LABEL], NB_BINS, retlabels=True, label_type='generic')
df[LABEL] = df[LABEL].apply(lambda x: col_labels[x])
LABELS = [LABEL]
return df, LABELS
# %%
def load_earthquake_police(categorical=True):
"""
____ ____ ____ ___ _ _ ____ _ _ ____ _ _ ____
|___ |__| |__/ | |__| | | | | |__| |_/ |___
|___ | | | \ | | | |_\| |__| | | | \_ |___
Earthquake : 12 samples, 3 features, 2 classifications
"""
FEATURES = V, M, P = ['Feel Vibration', 'Mary Calls', 'Police Calls']
CLASSES = A, E = ['Alarm', 'Earthquake']
# Create dataset
data = pd.DataFrame(
[
#V, M, P->A, E
[0, 0, 0, 0, 0],
[0, 0, 1, 1, 0],
[0, 1, 0, 1, 0],
[0, 1, 1, 1, 0],
[1, 0, 0, 0, 0],
[1, 0, 1, 1, 1],
[1, 1, 0, 1, 1],
[1, 1, 1, 1, 1],
[2, 0, 0, 0, 2],
[2, 0, 1, 1, 2],
[2, 1, 0, 1, 2],
[2, 1, 1, 1, 2],
],
columns=FEATURES + CLASSES)
# Create Bayesina Network
net = nx.DiGraph()
for node in FEATURES + CLASSES:
net.add_node(node)
for a, b in [(A, M), (A, P), (E, V), (E, A)]:
net.add_edge(a, b)
if categorical:
for col in [V, E]:
data[col] = data[col].apply(lambda x: 'Weak' if x == 1 else 'Strong' if x == 2 else 'No')
for col in [M, P, A]:
data[col] = data[col].apply(lambda x: 'Yes' if x == 1 else 'No')
return data, FEATURES, CLASSES, net
def load_amphibians(main_data_path, folder='amphibians'):
"""
____ _ _ ___ _ _ _ ___ _ ____ _ _ ____
|__| |\/| |__] |__| | |__] | |__| |\ | [__
| | | | | | | | |__] | | | | \| ___]
Note: This are actually 2 datasets (depending on subject):
- por(tuguese) (~600)
- mat(h) (~300)
-> Most of the students in math are also in portuguese
"""
df = pd.read_csv(os.path.join(main_data_path, folder, 'dataset.csv'), sep=';', skiprows=1, index_col='ID')
labels = df.columns.values[-7:]
# display(df)
for col in ['SR']:
df[col], col_labels = bucketize_into_bins(df[col].values,
strategy='quantile',
nb_bins=3,
retlabels=True,
label_type='generic')
df[col] = df[col].apply(lambda x: col_labels[x])
for col in ['NR']:
df[col], col_labels = bucketize_into_bins(df[col].values,
strategy='uniform',
nb_bins=3,
retlabels=True,
label_type='generic')
df[col] = df[col].apply(lambda x: col_labels[x])
for col in labels:
df[col] = df[col].apply(lambda x: get_discrete_labels(2, 'binary')[x])
df = df.apply(lambda col: col.apply(lambda x: str(x)))
return df, labels
def load_student(main_data_path,
subject='portuguese',
one_hot_ordinal=True,
nb_bins_scores=3,
nb_bins_numeric=4,
nb_bins_classes=5,
folder='student'):
"""
____ ___ _ _ ___ ____ _ _ ___
[__ | | | | \ |___ |\ | |
___] | |__| |__/ |___ | \| |
Note: This are actually 2 datasets (depending on subject):
- por(tuguese) (~600)
- mat(h) (~300)
-> Most of the students in math are also in portuguese
"""
df = pd.read_csv(
os.path.join(main_data_path, folder, f'student-{subject[:3]}.csv'),
sep=';',
)
education = {0: 'none', 1: 'primary <=4th', 2: 'primary >4th', 3: 'secondary', 4: 'higher'}
travel_time = {1: '<15 min', 2: '15-30 min', 3: '30-60 min', 4: '>1 hour'}
study_time = {1: '<2 h', 2: '2-5 h', 3: '5-10 h', 4: '>10 h'}
for col in ['Fedu', 'Medu']:
if one_hot_ordinal:
for v, name in list(education.items())[1:]:
df[col + '>=' + name] = df[col].apply(lambda x: 'yes' if x >= v else 'no')
del df[col]
else:
df[col] = df[col].apply(lambda x: education[x])
for col in ['Mjob', 'Fjob']:
df[col] = df[col].apply(lambda x: x.replace('_', ' '))
df['age'] = df['age'].apply(lambda x: (str(x) if x < 20 else '>=20') + ' yro')
df['Pstatus'] = df['Pstatus'].apply(lambda x: {'T': 'together', 'A': 'apart'}[x])
df['address'] = df['address'].apply(lambda x: {'R': 'rural', 'U': 'urban'}[x])
df['famsize'] = df['famsize'].apply(lambda x: {'GT3': '>=4', 'LE3': '<4'}[x])
df['traveltime'] = df['traveltime'].apply(lambda x: travel_time[x])
df['studytime'] = df['studytime'].apply(lambda x: study_time[x])
scores = ['famrel', 'freetime', 'goout', 'Dalc', 'Walc', 'health']
numeric = ['absences']
classes = ['G1', 'G2', 'G3']
# Score-like features
nb_bins_scores = min(5, nb_bins_scores)
for col in scores:
df[col], col_labels = bucketize_into_bins(df[col].values,
strategy='uniform',
nb_bins=nb_bins_scores,
retlabels=True,
label_type='generic')
df[col] = df[col].apply(lambda x: col_labels[x])
# Numeric
for col in numeric:
df[col], col_labels = bucketize_into_bins(df[col].values,
strategy='quantile',
nb_bins=nb_bins_numeric,
retlabels=True,
label_type='generic')
df[col] = df[col].apply(lambda x: col_labels[x])
# Classes
for col in classes:
df[col], col_labels = bucketize_into_bins(df[col].values,
strategy='quantile',
nb_bins=nb_bins_classes,
retlabels=True,
label_type='generic')
df[col] = df[col].apply(lambda x: col_labels[x])
return df, classes
# %%
def load_university_admission():
""""
_ _ _ _ _ _ _ ____ ____ ____ _ ___ _ _ ____ ___ _ _ _ ____ ____ _ ____ _ _
| | |\ | | | | |___ |__/ [__ | | \_/ |__| | \ |\/| | [__ [__ | | | |\ |
|__| | \| | \/ |___ | \ ___] | | | | | |__/ | | | ___] ___] | |__| | \|
"""
return pd.DataFrame(
[[0, 0, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, 1, 0, 0], [0, 0, 1, 1, 0], [0, 1, 0, 0, 0], [0, 1, 0, 1, 0],
[0, 1, 1, 0, 0], [0, 1, 1, 1, 1], [1, 0, 0, 0, 0], [1, 0, 0, 1, 1], [1, 0, 1, 0, 0], [1, 0, 1, 1, 1],
[1, 1, 0, 0, 0], [1, 1, 0, 1, 1], [1, 1, 1, 0, 1], [1, 1, 1, 1, 1]],
columns=['W', 'F', 'E', 'G', 'class'])
# %%
def load_votes_dataset(main_data_path, folder='house-votes', mock_features_names=False, shorten_names=True):
"""
_ _ ____ ___ ____ ____
| | | | | |___ [__
\/ |__| | |___ ___]
(UCI ML Repo)
"""
# Encoder
VOTES_ENCODER = {'y': 'yes', 'n': 'no'}
# Parsing features/class names and values
with open(os.path.join(main_data_path, folder, 'house-votes-84.names'), 'r') as file:
texts = [
s.split('.')[1].strip() for s in file.read().split('Attribute Information:')[1].split(
'8. Missing Attribute Values:')[0].strip().split('\n')
]
features_values = [s.split('(')[1].split(')')[0].split(',') for s in texts]
features_values = [[s.strip() for s in sub] for sub in features_values]
# class_values = features_values[0]
features_values = features_values[1:]
features_names = [s.split(':')[0].strip() for s in texts][1:]
# Load into a pandas DataFrame
data = pd.read_csv(os.path.join(main_data_path, folder, 'house-votes-84.data'),
names=['party'] + features_names,
header=None)
if mock_features_names:
data.columns = ['class'] + [f'F{i}' for i in range(len(data.columns) - 1)]
data = data.apply(lambda s: s.apply(lambda x: VOTES_ENCODER[x] if x in VOTES_ENCODER else x))
if shorten_names:
data.rename(
columns={
# "handicapped-infants": "handic",
# "water-project-cost-sharing": "water",
# "adoption-of-the-budget-resolution": "budget",
# "physician-fee-freeze": "physic",
# "el-salvador-aid": "elsal",
# "religious-groups-in-schools": "relig",
# "anti-satellite-test-ban": "satel",
# "aid-to-nicaraguan-contras": "nicar",
# "mx-missile": "missil",
# "immigration": "immig",
# "synfuels-corporation-cutback": "synfue",
# "education-spending": "edu",
# "superfund-right-to-sue": "sue",
# "crime": "crime",
# "duty-free-exports": "expor",
# "export-administration-act-south-africa": "safr",
"handicapped-infants": "handicapped\ninfants",
"water-project-cost-sharing": "water\nproject",
"adoption-of-the-budget-resolution": "budget\nresolution",
"physician-fee-freeze": "physician\nfee freeze",
"el-salvador-aid": "el salvador\naid",
"religious-groups-in-schools": "religion\nin school",
"anti-satellite-test-ban": "anti-satellite\ntest ban",
"aid-to-nicaraguan-contras": "nicaraguan\naid",
"mx-missile": "mx\nmissile",
"immigration": "immigration",
"synfuels-corporation-cutback": "synfuels\ncutback",
"education-spending": "education\nspending",
"superfund-right-to-sue": "superfund\nsuing",
"crime": "crime",
"duty-free-exports": "duty free\nexports",
"export-administration-act-south-africa": "south africa\nexport",
},
inplace=True)
return data, 'party'
# %%
def load_parole_dataset(main_data_path, folder='parole', CATEGORICAL=False, bins=[4, 4, 5]):
"""
___ ____ ____ ____ _ ____
|__] |__| |__/ | | | |___
| | | | \ |__| |___ |___
Load the Parole dataset
CATEGORICAL : bool
True to preprocess the numerical features
bins : list/tuple
Size of the bins of the numerical features, in order:
- time.served
- max.sentence
- age
Source: National Corrections Reporting Program, 2004 (ICPSR 26521) https://www.icpsr.umich.edu/icpsrweb/NACJD/studies/26521
Note:
- Naive Bayes Classifier Binarized doesn't work well, we need state and crime to be categorical
"""
# Decoder
parole_features_decoder_dict = {
'male': {
0: 'Female',
1: 'Male'
},
'race': {
0: 'White',
1: 'Non-white'
},
'age': None,
'state': {
0: 'Other state',
1: 'Kentucky',
2: 'Louisiana',
3: 'Virginia',
},
'time.served': None,
'max.sentence': None,
'multiple.offenses': {
0: 'single-offender',
1: 'multi-offender'
},
'crime': {
0: 'other-crime',
1: 'larceny',
2: 'drug',
3: 'driving',
},
'violator': {
0: 'No',
1: 'Yes'
}
}
# Load
df = pd.read_csv(os.path.join(main_data_path, folder, 'parole.csv'))
# Convert to categorical
if CATEGORICAL:
# Create categories for continuous data
df['race'] = df['race'] - 1
df['state'] = df['state'] - 1
df['crime'] = df['crime'] - 1
df["time.served"], time_bins = pd.cut(df["time.served"], bins=bins[0], labels=False, retbins=True)
df["max.sentence"], sentence_bins = pd.cut(df["max.sentence"], bins=bins[1], labels=False, retbins=True)
df["age"], age_bins = pd.cut(df["age"], bins=bins[2], labels=False, retbins=True)
parole_features_decoder_dict['age'] = {
i: f'{int(round(abs(age_bins[i]), 0))}-{int(round(age_bins[i+1], 0))} yr.'
for i in range(0,
len(age_bins) - 1)
}
parole_features_decoder_dict['max.sentence'] = {
i: f'{int(round(abs(sentence_bins[i]), 0))}-{int(round(sentence_bins[i+1], 0))} yr.'
for i in range(0,
len(sentence_bins) - 1)
}
parole_features_decoder_dict['time.served'] = {
i: f'{int(round(abs(time_bins[i]), 0))}-{int(round(time_bins[i+1], 0))} yr.'
for i in range(0,
len(time_bins) - 1)
}
for col in df.columns.values:
df[col] = df[col].apply(lambda val: parole_features_decoder_dict[col][val])
df = df.rename(
columns={
'time.served': 'time\nserved',
'max.sentence': 'max\nsentence',
'violator': 'parole\nviolator',
'multiple.offenses': 'multiple\noffenses',
'male': 'sex'
})
return df, 'parole\nviolator'
# %%
""""
___ _ ____ _ _
|__] | |__| \_/
| |___ | | |
"""
def load_play_bcc():
"""
Play : 8 samples, 3 features, 2 classifications
"""
FEATURES = ['Temperature', 'Humidity', 'Pressure']
CLASSES = ['Rainy', 'Play']
data = pd.DataFrame(
[
#T,H,P R,P
[0, 0, 0, 1, 0],
[0, 0, 1, 0, 0],
[0, 1, 0, 1, 0],
[0, 1, 1, 0, 0],
[1, 0, 0, 0, 1],
[1, 0, 1, 0, 1],
[1, 1, 0, 1, 0],
[1, 1, 1, 0, 1],
],
columns=FEATURES + CLASSES)
return data, data[FEATURES], data[CLASSES]
def load_play_bbcc(only_play=False, categorical=True):
"""
Play : 16 samples, 4 features, 2 classifications
"""
FEATURES = ['Temperature', 'Humidity', 'Pressure', 'Windy']
CLASSES = ['Rainy', 'Play']
data = pd.DataFrame(
[
#T,H,P,W R,P
[0, 0, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 1, 0],
[0, 1, 1, 0, 0, 1],
[1, 0, 0, 0, 0, 1],
[1, 0, 1, 0, 0, 1],
[1, 1, 0, 0, 1, 0],
[1, 1, 1, 0, 0, 1],
[0, 0, 0, 1, 1, 0],
[0, 0, 1, 1, 0, 0],
[0, 1, 0, 1, 1, 0],
[0, 1, 1, 1, 0, 0],
[1, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 0, 1],
[1, 1, 0, 1, 1, 0],
[1, 1, 1, 1, 0, 1],
],
columns=FEATURES + CLASSES)
if only_play:
CLASSES = CLASSES[1:]
if categorical:
for col in ['Temperature', 'Humidity', 'Pressure']:
data[col] = data[col].apply(lambda x: 'High' if x == 1 else 'Low')
for col in ['Windy', 'Rainy', 'Play']:
data[col] = data[col].apply(lambda x: 'Yes' if x == 1 else 'No')
return data, FEATURES, CLASSES
def load_play_bbcc2(only_play=False, categorical=True):
"""
Play : 18 samples, 3 features, 2 classifications
"""
FEATURES = T, P, W = ['Temperature', 'Pressure', 'Windy']
CLASSES = R, Outcome = ['Rainy', 'Play']
data = pd.DataFrame(
[
#T,P,W R,P
[0, 0, 0, 1, 0],
[0, 0, 1, 1, 0],
[0, 0, 2, 1, 0],
[0, 1, 0, 0, 1],
[0, 1, 1, 0, 0],
[0, 1, 2, 0, 0],
[1, 0, 0, 1, 0],
[1, 0, 1, 1, 0],
[1, 0, 2, 1, 0],
[1, 1, 0, 0, 1],
[1, 1, 1, 0, 1],
[1, 1, 2, 0, 1],
[2, 0, 0, 1, 0],
[2, 0, 1, 1, 0],
[2, 0, 2, 1, 0],
[2, 1, 0, 1, 0],
[2, 1, 1, 1, 0],
[2, 1, 2, 1, 0],
],
columns=FEATURES + CLASSES)
# Create Bayesina Network
net = nx.DiGraph()
for node in FEATURES + CLASSES:
net.add_node(node)
for a, b in [(Outcome, W), (Outcome, T), (Outcome, R), (R, T), (R, P)]:
net.add_edge(a, b)
if only_play:
CLASSES = CLASSES[1:]
if categorical:
for col in [T, W]:
data[col] = data[col].apply(lambda x: 'Medium' if x == 1 else 'High' if x == 2 else 'Low')
for col in [P]:
data[col] = data[col].apply(lambda x: 'High' if x == 1 else 'Low')
for col in [R, Outcome]:
data[col] = data[col].apply(lambda x: 'Yes' if x == 1 else 'No')
return data, FEATURES, CLASSES, net
def load_play_nn(only_play=False):
"""
Play : 6 samples, 2 features, 1 classification
"""
FEATURES = ['t', 'w']
CLASSES = ['o']
data = pd.DataFrame(
[
#T,W P
[0, 0, 0],
[1, 0, 1],
[2, 0, 1],
[0, 1, 0],
[1, 1, 0],
[2, 1, 1],
],
columns=FEATURES + CLASSES)
if only_play:
CLASSES = CLASSES[1:]
return data, data[FEATURES], data[CLASSES]
def load_play_naive(only_play=False):
"""
Play : 16 samples, 4 features, 1 classification
"""
FEATURES = ['Temperature', 'Humidity', 'Windy', 'Rainy']
CLASSES = ['Play Golf']
data = pd.DataFrame(
[
#T,H,W,R
[0, 0, 0, 0, 1],
[0, 0, 1, 0, 1],
[0, 1, 0, 0, 0],
[0, 1, 1, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 1, 0, 1],
[1, 1, 0, 0, 0],
[1, 1, 1, 0, 1],
[0, 0, 0, 1, 1],
[0, 0, 1, 1, 1],
[0, 1, 0, 1, 0],
[0, 1, 1, 1, 1],
[1, 0, 0, 1, 0],
[1, 0, 1, 1, 0],
[1, 1, 0, 1, 0],
[1, 1, 1, 1, 1],
],
columns=FEATURES + CLASSES)
if only_play:
CLASSES = CLASSES[1:]
return data, data[FEATURES], data[CLASSES]
# %%
def load_emotions(main_data_path, folder='emotions', nb_bins=False, strategy='quantile'):
""""
____ _ _ ____ ___ _ ____ _ _ ____
|___ |\/| | | | | | | |\ | [__
|___ | | |__| | | |__| | \| ___]
From UCO ML Repo: http://www.uco.es/kdis/mllresources/
72 features (numeric) and 6 labels
nb_bins: int
Number of bins for all the features
strategy: str
bins strategy, see emals.mlutils.bucketize_into_bins
"""
assert nb_bins is False or (isinstance(nb_bins, int) and nb_bins >= 2)
raw_data = loadarff(os.path.join(main_data_path, folder, 'emotions.arff'))
data = | pd.DataFrame(raw_data[0]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
This code is taken from Tensorflow tutorials. I added in some cross feature columns to improve the
accuracy to around 80%. This is my first look at using Estimators.
Link to tutorial: https://www.tensorflow.org/tutorials/estimator/linear
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from IPython.display import clear_output
from six.moves import urllib
import tensorflow as tf
import os
import sys
from sklearn.metrics import roc_curve
# Load dataset.
dftrain = | pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/train.csv') | pandas.read_csv |
import logging
from typing import NamedTuple, List, Dict, Sequence, Any
import numpy as np
import pandas as pd
from catpy.util import add_deprecated_gets, set_request_bool
from .base import CatmaidClientApplication
logger = logging.getLogger(__name__)
def lol_to_df(data, columns, dtypes=None):
if not dtypes:
return pd.DataFrame(data, columns=columns)
col_data = {s: [] for s in columns}
for row in data:
for col, item in zip(columns, row):
col_data[col].append(item)
try:
col_dtype = zip(columns, dtypes)
except TypeError:
col_dtype = zip(columns, (dtypes for _ in columns))
col_data = {col: pd.array(col_data[col], dtype=dtype) for col, dtype in col_dtype}
return pd.DataFrame(col_data)
def interpolate_node_locations(parent_xyz, child_xyz, z_res=1, z_offset=0):
parent_child = np.array([parent_xyz, child_xyz])
z_slices = np.round((parent_child[:, 2] - z_offset) / z_res).astype(int)
n_slices_between = abs(np.diff(z_slices)[0]) - 1
if n_slices_between < 1:
return
parent_child[:, 2] = (z_slices * z_res + z_offset).astype(float)
new_xyz = zip(
np.linspace(*parent_child[:, 0], num=n_slices_between + 2),
np.linspace(*parent_child[:, 1], num=n_slices_between + 2),
np.linspace(*parent_child[:, 2], num=n_slices_between + 2),
)
next(new_xyz) # discard, it's the parent location
for _ in range(n_slices_between):
yield next(new_xyz)
assert next(new_xyz)
def in_roi(roi_xyz, coords_xyz):
"""Closed interval: use intersection_roi"""
x, y, z = coords_xyz
return all(
[
roi_xyz[0, 0] <= x <= roi_xyz[1, 0],
roi_xyz[0, 1] <= y <= roi_xyz[1, 1],
roi_xyz[0, 2] <= z <= roi_xyz[1, 2],
]
)
def treenode_df(response: List[List[Any]]) -> pd.DataFrame:
return lol_to_df(
response,
["id", "parent", "user", "x", "y", "z", "radius", "confidence"],
[
np.uint64,
| pd.UInt64Dtype() | pandas.UInt64Dtype |
'''
Author: <NAME>
Utilities to summarize the outputs produced by the model i.e. the results.txt files spitted out by the
evaluation scripts.
'''
import os
from sys import path
import re
import pandas as pd
import numpy as np
from scipy.stats import ttest_rel
# res will need to be passed to the last function, as an example:
#output_path = '/scratch/geeticka/relation-extraction/output/semeval2010/CrossValidation'
#def res(path): return os.path.join(output_path, path)
## Below are the methods to gather necessary information from the results file
def read_confusion_matrix_per_line(cur_line):
if re.search(r'.*\|.*', cur_line): # only get those lines which have a pipe operator
splitted_line = cur_line.strip().split()
pipe_seen = 0 # the correct numbers are between two pipes
confusion_matrix_line = []
for val in splitted_line:
if val.startswith('|'):
pipe_seen += 1
if pipe_seen == 1 and val.strip() != '|': # keep collecting the values as you are
val = [x for x in val if x != '|'] # to handle some special cases when the pipe operator
# is stuck to the number (happens when the number is too long)
val = ''.join(val)
confusion_matrix_line.append(float(val))
return confusion_matrix_line
return None
def read_accuracy_per_line(cur_line):
if cur_line.startswith('Accuracy (calculated'):
accuracy = re.match(r'.*= (.*)%', cur_line).groups()[0]
accuracy = float(accuracy)
return accuracy
return None
def read_precision_recall_f1(cur_line): # assume that the mode is once we have read 'Results for the individual'
match = re.match(r'.*= (.*)%.*= (.*)%.*= (.*)%$', cur_line)
if match:
precision, recall, f1 = match.groups()
return float(precision), float(recall), float(f1)
else:
return None
#if not cur_line.startswith('Micro-averaged result'): # you want to read only up to the point when the relations
# will need to double check above
# confusion matrix portion refers to which part of the file to read
# for eg, this will be <<< (9+1)-WAY EVALUATION TAKING DIRECTIONALITY INTO ACCOUNT -- OFFICIAL >>> for semeval
def get_file_metrics(num_relations, result_file, confusion_matrix_portion):
official_portion_file = False
individual_relations_f1_portion = False
micro_f1_portion = False
macro_f1_portion = False
confusion_matrix_official = [] # stores the official confusion matrix read from the file
accuracy = None
metrics_indiv_relations = [] # precision, recall and f1 for each relation
metrics_micro = [] # excluding the other relation
metrics_macro = [] # excluding the other relation
with open(result_file, 'r') as result_file:
for cur_line in result_file:
cur_line = cur_line.strip()
#if cur_line.startswith('<<< (9+1)-WAY EVALUATION TAKING DIRECTIONALITY INTO ACCOUNT -- OFFICIAL >>>'):
if official_portion_file is True and cur_line.startswith('<<<'):
break
if cur_line.startswith(confusion_matrix_portion):
official_portion_file = True
if official_portion_file is False:
continue
confusion_matrix_line = read_confusion_matrix_per_line(cur_line)
if confusion_matrix_line is not None:
confusion_matrix_official.append(confusion_matrix_line)
acc = read_accuracy_per_line(cur_line)
if acc is not None: accuracy = acc
# figure out which sub portion of the official portion we are in
if cur_line.startswith('Results for the individual relations:'):
individual_relations_f1_portion = True
elif cur_line.startswith('Micro-averaged result'):
micro_f1_portion = True
elif cur_line.startswith('MACRO-averaged result'):
macro_f1_portion = True
# populate the precision, recall and f1 for the correct respective lists
if individual_relations_f1_portion is True and micro_f1_portion is False:
vals = read_precision_recall_f1(cur_line)
if vals is not None: metrics_indiv_relations.append([vals[0], vals[1], vals[2]])
elif micro_f1_portion is True and macro_f1_portion is False:
vals = read_precision_recall_f1(cur_line)
if vals is not None: metrics_micro.append([vals[0], vals[1], vals[2]])
elif macro_f1_portion is True:
vals = read_precision_recall_f1(cur_line)
if vals is not None: metrics_macro.append([vals[0], vals[1], vals[2]])
return confusion_matrix_official, accuracy, metrics_indiv_relations, metrics_micro, metrics_macro
# Generate confusion matrix as a pandas dataframe
def get_confusion_matrix_as_df(confusion_matrix_official, relations_as_short_list):
index = pd.Index(relations_as_short_list, name='gold labels')
columns = | pd.Index(relations_as_short_list, name='predicted') | pandas.Index |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
| tm.assert_almost_equal(df.values, expected) | pandas.util.testing.assert_almost_equal |
import os
import gc
import argparse
import logging
import joblib
import numpy as np
import pandas as pd
import torch
import hydra
from hydra.utils import get_original_cwd
import mlflow
from util import TARGET, seed_everything, reduce_mem_usage
from model import *
def dump(acc, unc, name):
acc = reduce_mem_usage(acc)
unc = reduce_mem_usage(unc)
save_dir = pathlib.Path('../data/submissions')
if not save_dir.exists():
save_dir.mkdir(parents=True)
now = pd.Timestamp.now()
acc_name = f'{name}-acc-{now:%Y%m%d}-{now:%H%M%S}.joblib'
unc_name = f'{name}-unc-{now:%Y%m%d}-{now:%H%M%S}.joblib'
joblib.dump(acc, save_dir / acc_name, compress=True)
joblib.dump(unc, save_dir / unc_name, compress=True)
def predict(args, module, criterion, data_dict, weight, te, evaluation=False):
remove_last4w = 0 if evaluation else 1
testset = M5TestDataset(data_dict, weight, te, remove_last4w=remove_last4w)
test_loader = torch.utils.data.DataLoader(
testset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers
)
device = next(iter(module.parameters())).device
data_id_list = []
y_pred_list = []
samples_list = []
for batch in test_loader:
with torch.no_grad():
module.eval()
data_id, (x1, y1, x2, te), _, _ = batch
data_id_list += data_id
x1, y1, x2, te = x1.to(device), y1.to(device), x2.to(device), te.to(device)
params = module((x1, y1, x2, te))
y_pred = criterion.predict(params)
y_pred_list.append(y_pred)
samples = criterion.sample(params, 1000)
samples_list.append(samples)
y_pred = torch.cat(y_pred_list, dim=0).cpu().numpy()
samples = torch.cat(samples_list, dim=1).cpu().numpy()
quantiles = [0.005, 0.025, 0.165, 0.25, 0.5, 0.75, 0.835, 0.975, 0.995]
pred_columns = [f'F{i+1}' for i in range(28)]
if args.each:
submission_accuracy = pd.DataFrame(y_pred, index=pd.Index(data_id_list, name='id'))
submission_accuracy.columns = pred_columns
submission_accuracy = submission_accuracy.reset_index()
if evaluation:
submission_accuracy['id'] = submission_accuracy['id'].str[:-10] + 'evaluation'
else:
submission_accuracy['id'] = submission_accuracy['id'].str[:-10] + 'validation'
q = np.quantile(samples, quantiles, axis=0)
q = q.transpose((1, 0, 2)) # quantile * id * date -> id * quantile * date
q = q.reshape(-1, 28)
multi_index = pd.MultiIndex.from_product([data_id_list, quantiles], names=['id', 'quantile'])
df_q = pd.DataFrame(q, index=multi_index)
df_q.columns = pred_columns
df_q = df_q.reset_index()
if evaluation:
df_q['id'] = df_q['id'].str[:-10] + df_q['quantile'].map('{:.3f}'.format) + '_evaluation'
else:
df_q['id'] = df_q['id'].str[:-10] + df_q['quantile'].map('{:.3f}'.format) + '_validation'
df_q.drop('quantile', axis=1, inplace=True)
submission_uncertainty = df_q
else:
sales_transformers = joblib.load('../data/05_preprocess/agg_item/sales_transformers.joblib')
pred_list = []
q_list = []
for i, data_id in enumerate(data_id_list):
scaler = sales_transformers[data_id]
inverse_pred = scaler.inverse_transform(y_pred[i, :].reshape(-1, 1)).reshape(1, 28)
inverse = scaler.inverse_transform(samples[:, i, :].reshape(-1, 1)).reshape(1000, 28)
df_pred = pd.DataFrame(inverse_pred)
df_pred.columns = pred_columns
if evaluation:
df_pred.insert(0, 'id', data_id[:-10] + 'evaluation')
else:
df_pred.insert(0, 'id', data_id[:-10] + 'validation')
pred_list.append(df_pred)
q = np.quantile(inverse, quantiles, axis=0)
df_q = pd.DataFrame(q, index=quantiles).reset_index()
df_q.columns = ['quantile'] + pred_columns
if evaluation:
id_name = data_id[:-10] + df_q['quantile'].map('{:.3f}'.format) + '_evaluation'
else:
id_name = data_id[:-10] + df_q['quantile'].map('{:.3f}'.format) + '_validation'
df_q.insert(0, 'id', id_name)
df_q.drop('quantile', axis=1, inplace=True)
q_list.append(df_q)
submission_accuracy = pd.concat(pred_list).reset_index(drop=True)
submission_uncertainty = pd.concat(q_list).reset_index(drop=True)
return submission_accuracy, submission_uncertainty
def train(args):
os.chdir(get_original_cwd())
run_name = 'each' if args.each else 'agg'
run_name += '_submit' if args.submit else '_cv'
logging.info('start ' + run_name)
seed_everything(args.seed)
if args.each:
v_sales_dict = joblib.load('../data/05_preprocess/each_item/v_sales_dict.joblib')
data_count = joblib.load('../data/05_preprocess/each_item/data_count.joblib')
dims = joblib.load('../data/05_preprocess/each_item/dims.joblib')
weight = joblib.load('../data/06_weight/weight_each.joblib')
te = joblib.load('../data/07_te/each_te.joblib')
else:
v_sales_dict = joblib.load('../data/05_preprocess/agg_item/v_sales_dict.joblib')
data_count = joblib.load('../data/05_preprocess/agg_item/data_count.joblib')
dims = joblib.load('../data/05_preprocess/agg_item/dims.joblib')
weight = joblib.load('../data/06_weight/weight_agg.joblib')
te = joblib.load('../data/07_te/agg_te.joblib')
v_sales = next(iter(v_sales_dict.values()))
drop_columns = ['sort_key', 'id', 'cat_id', 'd', 'release_date', 'date', 'weekday', 'year', 'week_of_month', 'holidy']
if not args.use_prices:
drop_columns += [
'release_ago', 'sell_price', 'diff_price',
'price_max', 'price_min', 'price_std', 'price_mean', 'price_trend', 'price_norm', 'diff_price_norm', 'price_nunique',
'dept_max', 'dept_min', 'dept_std', 'dept_mean', 'price_in_dept', 'mean_in_dept',
'cat_max', 'cat_min', 'cat_std', 'cat_mean', 'price_in_cat', 'mean_in_cat',
'price_in_month', 'price_in_year',
]
cat_columns = ['aggregation_level', 'item_id', 'dept_id', 'store_id', 'state_id', 'month', 'event_name_1', 'event_type_1', 'event_name_2', 'event_type_2', 'day_of_week']
features = [col for col in v_sales.columns if col not in drop_columns + [TARGET]]
is_cats = [col in cat_columns for col in features]
cat_dims = []
emb_dims = []
for col in features:
if col in cat_columns:
cat_dims.append(dims['cat_dims'][col])
emb_dims.append(dims['emb_dims'][col])
dims = pd.DataFrame({
'cat_dims': cat_dims,
'emb_dims': emb_dims
})
logging.info('data loaded')
if args.submit:
logging.info('train for submit')
# train model for submission
index = 1 if args.useval else 2
valid_term = 2
train_index = index if args.patience == 0 else (index+valid_term)
trainset = M5Dataset(
v_sales_dict, data_count, features, weight, te,
remove_last4w=train_index, min_data_4w=0, over_sample=args.over_sample
)
validset = M5ValidationDataset(trainset.data_dict, weight, te, remove_last4w=index, term=valid_term)
train_loader = torch.utils.data.DataLoader(
trainset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers,
worker_init_fn=get_worker_init_fn(args.seed)
)
valid_loader = torch.utils.data.DataLoader(
validset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers
)
model = M5MLPLSTMModel(is_cats, dims, n_hidden=args.n_hidden, dropout=args.dropout, use_te=args.use_te)
criterion = M5Distribution(dist=args.dist, df=args.df)
module = M5LightningModule(model, criterion, train_loader, valid_loader, None, args)
trainer = M5Trainer(args.experiment, run_name, args.max_epochs, args.min_epochs, args.patience, args.val_check)
trainer.fit(module)
trainer.logger.experiment.log_artifact(trainer.logger.run_id, trainer.checkpoint_callback.kth_best_model)
logging.info('predict')
module.load_state_dict(torch.load(trainer.checkpoint_callback.kth_best_model)['state_dict'])
# for reproducibility
dmp_filename = '../data/cuda_rng_state_each.dmp' if args.each else '../data/cuda_rng_state_agg.dmp'
torch.save(torch.cuda.get_rng_state(), dmp_filename)
trainer.logger.experiment.log_artifact(trainer.logger.run_id, dmp_filename)
val_acc, val_unc = predict(args, module, criterion, trainset.data_dict, weight, te, evaluation=False)
eva_acc, eva_unc = predict(args, module, criterion, trainset.data_dict, weight, te, evaluation=True)
submission_accuracy = pd.concat([val_acc, eva_acc])
submission_uncertainty = | pd.concat([val_unc, eva_unc]) | pandas.concat |
from __future__ import division
import copy
import bt
from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy
from bt.core import FixedIncomeStrategy, HedgeSecurity, FixedIncomeSecurity
from bt.core import CouponPayingSecurity, CouponPayingHedgeSecurity
from bt.core import is_zero
import pandas as pd
import numpy as np
from nose.tools import assert_almost_equal as aae
import sys
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
def test_node_tree1():
# Create a regular strategy
c1 = Node('c1')
c2 = Node('c2')
p = Node('p', children=[c1, c2, 'c3', 'c4'])
assert 'c1' in p.children
assert 'c2' in p.children
assert p['c1'] != c1
assert p['c1'] != c2
c1 = p['c1']
c2 = p['c2']
assert len(p.children) == 2
assert p == c1.parent
assert p == c2.parent
assert p == c1.root
assert p == c2.root
# Create a new parent strategy with a child sub-strategy
m = Node('m', children=[p, c1])
p = m['p']
mc1 = m['c1']
c1 = p['c1']
c2 = p['c2']
assert len(m.children) == 2
assert 'p' in m.children
assert 'c1' in m.children
assert mc1 != c1
assert p.parent == m
assert len(p.children) == 2
assert 'c1' in p.children
assert 'c2' in p.children
assert p == c1.parent
assert p == c2.parent
assert m == p.root
assert m == c1.root
assert m == c2.root
# Add a new node into the strategy
c0 = Node('c0', parent=p)
c0 = p['c0']
assert 'c0' in p.children
assert p == c0.parent
assert m == c0.root
assert len(p.children) == 3
# Add a new sub-strategy into the parent strategy
p2 = Node( 'p2', children = [c0, c1], parent=m )
p2 = m['p2']
c0 = p2['c0']
c1 = p2['c1']
assert 'p2' in m.children
assert p2.parent == m
assert len(p2.children) == 2
assert 'c0' in p2.children
assert 'c1' in p2.children
assert c0 != p['c0']
assert c1 != p['c1']
assert p2 == c0.parent
assert p2 == c1.parent
assert m == p2.root
assert m == c0.root
assert m == c1.root
def test_node_tree2():
# Just like test_node_tree1, but using the dictionary constructor
c = Node('template')
p = Node('p', children={'c1':c, 'c2':c, 'c3':'', 'c4':''})
assert 'c1' in p.children
assert 'c2' in p.children
assert p['c1'] != c
assert p['c1'] != c
c1 = p['c1']
c2 = p['c2']
assert len(p.children) == 2
assert c1.name == 'c1'
assert c2.name == 'c2'
assert p == c1.parent
assert p == c2.parent
assert p == c1.root
assert p == c2.root
def test_node_tree3():
c1 = Node('c1')
c2 = Node('c1') # Same name!
raised = False
try:
p = Node('p', children=[c1, c2, 'c3', 'c4'])
except ValueError:
raised = True
assert raised
raised = False
try:
p = Node('p', children=['c1', 'c1'])
except ValueError:
raised = True
assert raised
c1 = Node('c1')
c2 = Node('c2')
p = Node('p', children=[c1, c2, 'c3', 'c4'])
raised = False
try:
Node('c1', parent = p )
except ValueError:
raised = True
assert raised
# This does not raise, as it's just providing an implementation of 'c3',
# which had been declared earlier
c3 = Node('c3', parent = p )
assert 'c3' in p.children
def test_integer_positions():
c1 = Node('c1')
c2 = Node('c2')
c1.integer_positions = False
p = Node('p', children=[c1, c2])
c1 = p['c1']
c2 = p['c2']
assert p.integer_positions
assert c1.integer_positions
assert c2.integer_positions
p.use_integer_positions(False)
assert not p.integer_positions
assert not c1.integer_positions
assert not c2.integer_positions
c3 = Node('c3', parent=p)
c3 = p['c3']
assert not c3.integer_positions
p2 = Node( 'p2', children = [p] )
p = p2['p']
c1 = p['c1']
c2 = p['c2']
assert p2.integer_positions
assert p.integer_positions
assert c1.integer_positions
assert c2.integer_positions
def test_strategybase_tree():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
s1 = s['s1']
s2 = s['s2']
assert len(s.children) == 2
assert 's1' in s.children
assert 's2' in s.children
assert s == s1.parent
assert s == s2.parent
def test_node_members():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
s1 = s['s1']
s2 = s['s2']
actual = s.members
assert len(actual) == 3
assert s1 in actual
assert s2 in actual
assert s in actual
actual = s1.members
assert len(actual) == 1
assert s1 in actual
actual = s2.members
assert len(actual) == 1
assert s2 in actual
def test_node_full_name():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
# we cannot access s1 and s2 directly since they are copied
# we must therefore access through s
assert s.full_name == 'p'
assert s['s1'].full_name == 'p>s1'
assert s['s2'].full_name == 'p>s2'
def test_security_setup_prices():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 105
assert len(c1.prices) == 1
assert c1.prices[0] == 105
assert c2.price == 95
assert len(c2.prices) == 1
assert c2.prices[0] == 95
# now with setup
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 105
assert len(c1.prices) == 1
assert c1.prices[0] == 105
assert c2.price == 95
assert len(c2.prices) == 1
assert c2.prices[0] == 95
def test_strategybase_tree_setup():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
assert len(s.data) == 3
assert len(c1.data) == 3
assert len(c2.data) == 3
assert len(s._prices) == 3
assert len(c1._prices) == 3
assert len(c2._prices) == 3
assert len(s._values) == 3
assert len(c1._values) == 3
assert len(c2._values) == 3
def test_strategybase_tree_adjust():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.adjust(1000)
assert s.capital == 1000
assert s.value == 1000
assert c1.value == 0
assert c2.value == 0
assert c1.weight == 0
assert c2.weight == 0
s.update(dts[0])
assert s.flows[ dts[0] ] == 1000
def test_strategybase_tree_update():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 100
assert c2.price == 100
i = 1
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 105
assert c2.price == 95
i = 2
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 100
assert c2.price == 100
def test_update_fails_if_price_is_nan_and_position_open():
c1 = SecurityBase('c1')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1'], data=100)
data['c1'][dts[1]] = np.nan
c1.setup(data)
i = 0
# mock in position
c1._position = 100
c1.update(dts[i], data.loc[dts[i]])
# test normal case - position & non-nan price
assert c1._value == 100 * 100
i = 1
# this should fail, because we have non-zero position, and price is nan, so
# bt has no way of updating the _value
try:
c1.update(dts[i], data.loc[dts[i]])
assert False
except Exception as e:
assert str(e).startswith('Position is open')
# on the other hand, if position was 0, this should be fine, and update
# value to 0
c1._position = 0
c1.update(dts[i], data.loc[dts[i]])
assert c1._value == 0
def test_strategybase_tree_allocate():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = | pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100) | pandas.DataFrame |
import numpy as np
import pandas
import random
import re
import sys
from scipy.stats import pearsonr, spearmanr
# ausiliary functions
def buildSeriesByCategory(df, categories):
res = []
for cat in categories:
occ = df.loc[df["category"] == cat].shape[0]
res.append(occ)
res_series = pandas.Series(res, index=categories)
return res_series
# convert dataframe that can be used to generate likert plot
def convertToRLikertFormat(dataframe):
# get rows from dataframe
version_series = dataframe.loc["Versioning"]
manual_series = dataframe.loc["Manual-Job"]
retry_series = dataframe.loc["Job-Retry"]
allow_series = dataframe.loc["Job-Allow-Failure"]
overall_series = dataframe.loc["overall"]
# convert to R format and fill the dictionary
dict_of_columns = {}
dict_of_columns['Fuzzy Version'] = fillList(version_series)
dict_of_columns['Manual Execution'] = fillList(manual_series)
dict_of_columns['Retry Failure'] = fillList(retry_series)
dict_of_columns['Fake Success'] = fillList(allow_series)
dict_of_columns['Overall'] = fillList(overall_series)
# merge everything in one dataframe
result = pandas.DataFrame(dict_of_columns)
return result
def fillList(series):
list = []
for label, value in series.items():
if label != "reacted":
num = value
while num > 0:
list.append(label)
num = num - 1
return pandas.Series(list)
def printStatsByCategory(df, categories):
for cat in categories:
occ = df.loc[df["category"] == cat].shape[0]
print(cat + ": " + str(occ))
def buildOverallResultSeries(resulting_series):
res_list = []
for label, serie in resulting_series.items():
sum = 0;
for value in serie:
sum += value
res_list.append(sum)
return res_list
def fillTwoLists(typ, cat, labels_string, cat_list, label_list):
labels = labels_string.split(",")
for label in labels:
if label != 'nan':
cat_list.append(cat)
label_list.append(typ + ":" + label.strip())
def computeProjectSet(list, splitRule, categories):
resulting_set = set()
base_link = "https://gitlab.com"
for name in list:
if splitRule != "":
splittedName = name.split(splitRule)
new_name = base_link
for token in splittedName:
if token in categories:
break
else:
token = re.sub(r"[ <>#%\"{}|^'`;\[\]/?:@&=+$,\.()\\\\]", my_replace, token)
new_name += "/" + token
resulting_set.add(new_name)
return resulting_set
def my_replace(match):
return "-"
def printOccurrences(antipatterns, tot_smells, tot_ana_projects, tot_ana_owners):
num_smells = antipatterns['ID'].shape[0]
percentage_num_smells = round(num_smells / tot_smells * 100, 1)
print("#smells: " + str(num_smells) + "(" + str(percentage_num_smells) + "%)")
num_smelly_projects = antipatterns['Project'].unique().shape[0]
percentage_num_smelly_projects = round(num_smelly_projects / tot_ana_projects * 100, 1)
print("#smelly-projects: " + str(num_smelly_projects) + "(" + str(percentage_num_smelly_projects) + "%)")
num_smelly_owners = antipatterns['Owner'].unique().shape[0]
percentage_num_smelly_owners = round(num_smelly_owners / tot_ana_owners * 100, 1)
print("#smelly-owners: " + str(num_smelly_owners) + "(" + str(percentage_num_smelly_owners) + "%)")
def printOccurrencesPerCluster(apdf, tot_smells, tot_ana_projects, tot_ana_owners, tot_ana_projects_versioning,
tot_ana_owners_versioning):
print("\n-> Versioning")
versioning = apdf.loc[apdf["Category"] == "Versioning"]
printOccurrences(versioning, tot_smells, tot_ana_projects_versioning, tot_ana_owners_versioning)
print("\n-> Job-Allow-Failure")
allow_failure = apdf.loc[apdf["Category"] == "Job-Allow-Failure"]
printOccurrences(allow_failure, tot_smells, tot_ana_projects, tot_ana_owners)
print("\n-> Job-Retry")
retry = apdf.loc[apdf["Category"] == "Job-Retry"]
printOccurrences(retry, tot_smells, tot_ana_projects, tot_ana_owners)
print("\n-> Manual-Job")
manual = apdf.loc[apdf["Category"] == "Manual-Job"]
printOccurrences(manual, tot_smells, tot_ana_projects, tot_ana_owners)
def printOccurrences2(df, tot):
num_smells = df['ID'].shape[0]
percentage_num_smells = round(num_smells / tot * 100, 1)
print("#smells: " + str(num_smells) + "(" + str(percentage_num_smells) + "%)")
def printOccurrencesPerCluster2(apdf, tot_versioning, tot_allow, tot_retry, tot_manual):
print("-> Versioning")
versioning = apdf.loc[apdf["Category"] == "Versioning"]
printOccurrences2(versioning, tot_versioning)
print("-> Job-Allow-Failure")
allow_failure = apdf.loc[apdf["Category"] == "Job-Allow-Failure"]
printOccurrences2(allow_failure, tot_allow)
print("-> Job-Retry")
retry = apdf.loc[apdf["Category"] == "Job-Retry"]
printOccurrences2(retry, tot_retry)
print("-> Manual-Job")
manual = apdf.loc[apdf["Category"] == "Manual-Job"]
printOccurrences2(manual, tot_manual)
def versioning_occurrences_byfile(smell):
tot_incidents = smell.shape[0]
affected_files = smell["Remote Configuration File Link"].unique().shape[0]
yml_incidents = smell.loc[smell["Configuration File Name"] == ".gitlab-ci.yml"]
tot_yml_incidents = yml_incidents.shape[0]
tot_affected_yml = yml_incidents["Remote Configuration File Link"].unique().shape[0]
req_incidents = smell.loc[smell["Configuration File Name"] == "requirements.txt"]
tot_req_incidents = req_incidents.shape[0]
tot_affected_req = req_incidents["Remote Configuration File Link"].unique().shape[0]
pom_incidents = smell.loc[(smell["Configuration File Name"] != ".gitlab-ci.yml") &
(smell["Configuration File Name"] != "requirements.txt")]
tot_pom_incidents = pom_incidents.shape[0]
tot_affected_pom = pom_incidents["Remote Configuration File Link"].unique().shape[0]
print("tot_incidents: " + str(tot_incidents))
print("affected_files: " + str(affected_files))
print("tot_yml_incidents: " + str(tot_yml_incidents) + "(" + str(
round(tot_yml_incidents / tot_incidents * 100, 2)) + "%)")
print("affected_yml_files: " + str(tot_affected_yml))
print("tot_req_incidents: " + str(tot_req_incidents) + "(" + str(
round(tot_req_incidents / tot_incidents * 100, 2)) + "%)")
print("affected_req_files: " + str(tot_affected_req))
print("tot_pom_incidents: " + str(tot_pom_incidents) + "(" + str(
round(tot_pom_incidents / tot_incidents * 100, 2)) + "%)")
print("affected_pom_files: " + str(tot_affected_pom))
# RQ1 data analysis
def rqone_results(issueReportFile, path):
with open(issueReportFile, 'r', encoding="utf-8") as input_file:
frame = pandas.read_csv(input_file) # read file in a dataframe
tot_opened_issues = frame.loc[pandas.notna(frame["category"]) &
(frame["category"] != "Vulnerability") &
(frame["category"] != "Job-Retry (duplicate)")].shape[0]
print("### Tot opened issues: " + str(tot_opened_issues))
# select only active projects
df = frame.loc[(frame["commitsSinceIssue"] >= 1) &
(frame["category"] != "Vulnerability") &
pandas.notna(frame["category"]) &
(frame["category"] != "Job-Retry (duplicate)")]
categories = df.category.unique()
print("\n#### Opened issues (active projects since September 2019) ####")
openend_series = buildSeriesByCategory(df, categories)
printStatsByCategory(df, categories)
activeProjects = df.shape[0]
print("Total: " + str(activeProjects) + " (" +
str(tot_opened_issues - activeProjects) + " inactive)")
print("\n#### Issues (with a reaction) ####")
# select rows by column values
react_df = df.loc[(df["fixed (y/n/m)"] == "y") |
(df["numUpvotes"] > 0) |
(df["numDownvotes"] > 0) |
((pandas.notna(df["reaction"])) &
(df["reaction"] != "invalid")
) |
(df["isAssigned"]) |
(df["state"] == "closed")
]
react_series = buildSeriesByCategory(react_df, categories)
printStatsByCategory(react_df, categories)
tot_rissues = 0
for value in react_series:
tot_rissues += value
print("Total issues with a reaction: " + str(tot_rissues))
comments = react_df["numComments"]
tot_comments = 0
for value in comments:
tot_comments += value
print("Total num. of comments: " + str(tot_comments))
print("\n#### Ignored issues ####")
ignored_df = df.loc[(df["fixed (y/n/m)"] == "n") &
(df["state"] == "closed") &
(df["numUpvotes"] == 0) &
(df["numDownvotes"] == 0) &
(( | pandas.isna(df["reaction"]) | pandas.isna |
import os
import copy
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import matplotlib.dates as mdates
from datetime import date, timedelta, datetime
import seaborn as sns
import geopandas as gpd
import matplotlib.colors as colors
from plotting.colors import load_color_palette
mpl.rcParams['pdf.fonttype'] = 42
LL_date = '210412'
idph_data_path = '/Volumes/fsmresfiles/PrevMed/Covid-19-Modeling/IDPH line list'
cleaned_line_list_fname = os.path.join(idph_data_path,
'LL_%s_JGcleaned_no_race.csv' % LL_date)
box_data_path = '/Users/jlg1657/Box/NU-malaria-team/data/covid_IDPH'
project_path = '/Users/jlg1657/Box/NU-malaria-team/projects/covid_chicago'
plot_path = os.path.join(project_path, 'Plots + Graphs', '_trend_tracking')
emr_fname = os.path.join(box_data_path, 'emresource_by_region.csv')
spec_coll_fname = os.path.join(box_data_path, 'Corona virus reports', '%s_LL_cases_by_EMS_spec_collection.csv' % LL_date)
shp_path = os.path.join(box_data_path, 'shapefiles')
def load_cleaned_line_list() :
df = pd.read_csv(cleaned_line_list_fname)
return df
def make_heatmap(ax, adf, col) :
palette = sns.color_palette('RdYlBu_r', 101)
df = adf.dropna(subset=[col])
df = df.groupby([col, 'EMS'])['id'].agg(len).reset_index()
df = df.rename(columns={'id' : col,
col : 'date'})
df['date'] = pd.to_datetime(df['date'])
df = df.sort_values(by=['EMS', 'date'])
ax.fill_between([np.min(df['date']), np.max(df['date']) + timedelta(days=1)],
[0.5, 0.5], [11.5, 11.5], linewidth=0, color=palette[0])
for ems, edf in df.groupby('EMS') :
max_in_col = np.max(edf[col])
print(ems, max_in_col)
for r, row in edf.iterrows() :
ax.fill_between([row['date'], row['date'] + timedelta(days=1)],
[ems-0.5, ems-0.5], [ems+0.5, ems+0.5],
color=palette[int(row[col]/max_in_col*100)],
linewidth=0)
ax.set_title(col)
ax.set_ylabel('EMS region')
formatter = mdates.DateFormatter("%m-%d")
ax.xaxis.set_major_formatter(formatter)
ax.xaxis.set_major_locator(mdates.MonthLocator())
def heatmap() :
adf = load_cleaned_line_list()
fig = plt.figure(figsize=(10,5))
fig.subplots_adjust(left=0.05, right=0.97)
cols = ['specimen_collection', 'deceased_date']
for c, col in enumerate(cols) :
ax = fig.add_subplot(1,len(cols),c+1)
make_heatmap(ax, adf, col)
plt.savefig(os.path.join(plot_path, 'EMS_cases_deaths_heatmap_%sLL.png' % LL_date))
plt.show()
def aggregate_to_date_spec_collection() :
adf = load_cleaned_line_list()
col = 'specimen_collection'
df = adf.dropna(subset=[col])
df = df.groupby([col, 'EMS'])['id'].agg(len).reset_index()
df = df.rename(columns={'id' : col,
col : 'date'})
df = df.sort_values(by=['EMS', 'date'])
df.to_csv(spec_coll_fname, index=False)
def plot_EMS_by_line(colname) :
df = pd.read_csv(os.path.join(box_data_path, 'Cleaned Data', '%s_jg_aggregated_covidregion.csv' % LL_date))
df = df[df['covid_region'].isin(range(1,12))]
df['date'] = pd.to_datetime(df['date'])
# df = df[(df['date'] > date(2020, 2, 29)) & (df['date'] < date(2021, 1, 1))]
col = 'moving_ave'
sns.set_style('whitegrid', {'axes.linewidth' : 0.5})
fig = plt.figure(figsize=(11,6))
fig.subplots_adjust(left=0.07, right=0.97, bottom=0.05, top=0.95, hspace=0.3, wspace=0.25)
palette = sns.color_palette('Set1')
formatter = mdates.DateFormatter("%m-%d")
for e, (ems, edf) in enumerate(df.groupby('covid_region')) :
ax = fig.add_subplot(3,4,e+1)
edf['moving_ave'] = edf[colname].rolling(window=7, center=False).mean()
max_in_col = np.max(edf[col])
ax.plot(edf['date'], edf[col], color=palette[0], label=ems)
ax.fill_between(edf['date'].values, [0]*len(edf[col]), edf[col],
color=palette[0], linewidth=0, alpha=0.3)
ax.set_title('region %d' % ems)
ax.set_ylim(0, max_in_col*1.05)
ax.set_xlim(date(2020,3,10), np.max(df['date']))
ax.xaxis.set_major_formatter(formatter)
ax.xaxis.set_major_locator(mdates.MonthLocator())
if e%4 == 0 :
ax.set_ylabel(colname)
fig.suptitle(colname)
plt.savefig(os.path.join(plot_path, 'covid_region_%s_%sLL.png' % (colname, LL_date)))
# plt.savefig(os.path.join(plot_path, 'covid_region_%s_%sLL_2020.pdf' % (colname, LL_date)), format='PDF')
def format_ax(ax, name) :
ax.set_title(name)
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
class MidpointNormalize(colors.Normalize):
def __init__(self, vmin=None, vmax=None, vcenter=None, clip=False):
self.vcenter = vcenter
colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.vcenter, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
def plot_ratio_ems() :
def get_ratio(adf, ems, w):
edf = adf[adf['EMS'] == ems]
col = 'specimen_collection'
d = edf[col].values
if w == 0:
recent = np.mean(d[-7:])
else:
recent = np.mean(d[-7 * (w + 1):-7 * w])
back = np.mean(d[-7 * (w + 2):-7 * (w + 1)])
return recent / back
df = pd.read_csv(spec_coll_fname)
df['date'] = pd.to_datetime(df['date'])
max_date = date(2020, 7, 8)
df = df[df['date'] <= max_date]
ems_shp = gpd.read_file(os.path.join(shp_path, 'EMS_Regions', 'EMS_Regions.shp'))
ems_shp['REGION'] = ems_shp['REGION'].astype(int)
fig = plt.figure(figsize=(12, 10))
fig.subplots_adjust(top=0.95)
vmin, vmax = 0.4, 3
norm = MidpointNormalize(vmin=vmin, vcenter=1, vmax=vmax)
for week in range(6) :
ax = fig.add_subplot(2,3,6-week)
format_ax(ax, '%d weeks ago vs %d weeks ago' % (week, week+1))
ems_shp['ratio'] = ems_shp['REGION'].apply(lambda x : get_ratio(df, x, week))
ems_shp.plot(column='ratio', ax=ax, cmap='RdYlBu_r', edgecolor='0.8',
linewidth=0.8, legend=False, norm=norm)
sm = plt.cm.ScalarMappable(cmap='RdYlBu_r', norm=norm)
sm._A = []
cbar = fig.colorbar(sm, ax=ax)
fig.suptitle('week over week ratio of cases by specimen collection date\nLL data ending ' + str(max_date))
plt.savefig(os.path.join(plot_path, 'EMS_weekly_case_ratio_%sLL.png' % LL_date))
def load_county_map_with_public_data() :
from public_idph_data import load_county_cases
df = load_county_cases()
county_shp = gpd.read_file(os.path.join(shp_path, 'IL_BNDY_County', 'IL_BNDY_County_Py.shp'))
cols = ['Positive_Cases', 'Deaths', 'Tested']
sdf = df[df['County'].isin(['Cook', 'Chicago'])]
sdf = sdf.groupby('update_date')[cols].agg(np.sum).reset_index()
sdf['County'] = 'Cook'
df = df[~df['County'].isin(['Cook', 'Chicago'])]
df = pd.concat([df, sdf], sort=True)
df['County'] = df['County'].apply(lambda x : x.upper())
df.loc[df['County'] == 'DE WITT', 'County'] = 'DEWITT'
# ds_shp = pd.merge(left=county_shp, right=df, left_on='COUNTY_NAM', right_on='County')
df = df.sort_values(by=['County', 'update_date'])
return county_shp, df
def plot_ratio_county() :
ds_shp, df = load_county_map_with_public_data()
max_date = np.max(df['update_date'])
fig = plt.figure(figsize=(12, 10))
fig.subplots_adjust(top=0.95)
vmin, vmax = 0, 3
norm = MidpointNormalize(vmin=vmin, vcenter=1, vmax=vmax)
def get_ratio(adf, county, w):
cdf = adf[adf['County'] == county.upper()]
# if len(cdf) == 0 :
# return 100
cdf['daily_pos'] = np.insert(np.diff(cdf['Positive_Cases']), 0, 0)
d = cdf['daily_pos'].values
if w == 0:
recent = np.mean(d[-7:])
else:
recent = np.mean(d[-7 * (w + 1):-7 * w])
back = np.mean(d[-7 * (w + 2):-7 * (w + 1)])
if back == 0 and recent == 0 :
return -1
if back == 0 :
return vmax
return min([recent / back, vmax])
for week in range(6) :
ax = fig.add_subplot(2,3,6-week)
format_ax(ax, '%d weeks ago vs %d weeks ago' % (week, week+1))
ds_shp['ratio'] = ds_shp['COUNTY_NAM'].apply(lambda x : get_ratio(df, x, week))
ds_shp.plot(ax=ax, color='#969696', edgecolor='0.8',
linewidth=0.8, legend=False)
pdf = ds_shp[ds_shp['ratio'] == -1]
pdf.plot(ax=ax, color='#313695', edgecolor='0.8',
linewidth=0.8, legend=False)
pdf = ds_shp[ds_shp['ratio'] >= 0]
pdf.plot(column='ratio', ax=ax, cmap='RdYlBu_r', edgecolor='0.8',
linewidth=0.8, legend=False, norm=norm)
sm = plt.cm.ScalarMappable(cmap='RdYlBu_r', norm=norm)
sm._A = []
cbar = fig.colorbar(sm, ax=ax)
fig.suptitle('week over week ratio of cases\npublic data ending ' + str(max_date))
plt.savefig(os.path.join(plot_path, 'county_weekly_case_ratio.png'))
def plot_LL_all_IL() :
df = pd.read_csv(os.path.join(box_data_path, 'Cleaned Data', '%s_jg_aggregated_covidregion.csv' % LL_date))
df = df.groupby('date')[['cases', 'deaths', 'admissions']].agg(np.sum).reset_index()
df['date'] = | pd.to_datetime(df['date']) | pandas.to_datetime |
import requests
import io
import pandas as pd
def earthdata_granule_search(product, version, bounding_box, timestampsUTC, search_time_window=1, search_time_window_units='h', list_filepath=None, ext=None):
'''
Returns a list of data access URLs from the NASA EarthData API for a specific product, version, time window, and location window.
The URLs can be then used for downloading with wget.
(<NAME>, <EMAIL>, 2020)
Parameters:
product (str): product id
version (str): product version
bounding_box (list of ints): [lower left longitude, lower left latitude, upper right longitude, upper right latitude]
timestampsUTC (list of pd.Timestamp or datetime): time and date in UTC to search for data product
search_time_window (int): how far out from each timestampsUTC to search for data products (for pd.timedelta())
search_time_window_units (str): time units of search_time_window, e.g. 'h' for hours or 'd' for days (for pd.timedelta())
list_filepath (str): (optional) filepath for a text file to save a list of the access URLs to
ext (str): (optional) file extension to search for (e.g. "tif" or "hdf")
Returns:
data_url_list (list): list of 'Online Access URLs' (strings) for each search result
'''
# set the base URL for searching granules through the EarthData API
url = "https://cmr.earthdata.nasa.gov/search/granules.csv?"
# compose search strings for data product and version
shortname_id_str = "short_name={}".format(product)
version_str = "version={}".format(version)
# compose search string for bounding box
bounding_box_str = "bounding_box[]={ll_lon},{ll_lat},{ur_lon},{ur_lat}".format(ll_lon=bounding_box[0],
ll_lat=bounding_box[1],
ur_lon=bounding_box[2],
ur_lat=bounding_box[3])
# if timestamp provided is not an iterable (such as if it is a single timestamp or datetime object), try to make it a list
if type(timestampsUTC) != list:
timestampsUTC = [timestampsUTC]
# create an empty list to hold download URLs
data_url_list = []
# for each timestamp, use it as the start date for a search
for i, start_date in enumerate(timestampsUTC):
# make sure we have a pd.Timestamp, this will convert datetime.datetime to pd.Timestamp
start_date = pd.Timestamp(start_date)
# make the end date for the seearch, incrementing by search_time_window
end_date = start_date + pd.to_timedelta(search_time_window, unit=search_time_window_units)
# compose the time range string for the request
start_date_str = "{}:00:00Z".format(start_date.strftime('%Y-%m-%dT%H'))
end_date_str = "{}:00:00Z".format(end_date.strftime('%Y-%m-%dT%H'))
time_range_str = "temporal=" + start_date_str + "," + end_date_str
# build the whole request URL and make the request.get
response = requests.get(url+"&"+shortname_id_str+"&"+version_str+"&"+bounding_box_str+"&"+time_range_str)
# read the response CSV and put in a temporary dataframe
df = pd.read_csv(io.StringIO(response.text))
# add the access url from which we can download the file to our list
for j, data_url in enumerate(df['Online Access URLs']):
if | pd.isnull(data_url) | pandas.isnull |
from flask import Flask, redirect, url_for, render_template, request, session, jsonify, flash
from werkzeug.utils import secure_filename
from Bio import Entrez
import itertools
import os
import json
import pandas as pd
import secrets
import time
from helperfun import *
# change ddgcalc_os to ddgcalc_win when using windows
# from ddgcalc_win import ddgcalcs
from ddgcalc_os import ddgcalcs
app = Flask(__name__)
# MAX 30 mb
app.config['MAX_CONTENT_LENGTH'] = 1024 * 1024 * 30
app.config['UPLOAD_FOLDER'] = 'uploads/'
app.config['SECRET_KEY'] = "temporarysecretkey"
app.config['SESSION_PERMANENT'] = True
database = "snp-db/snpDB_v5.pkl"
@app.route("/")
@app.route("/home")
def index():
uploadfiles = [os.path.join(app.config['UPLOAD_FOLDER'], filename) for filename in os.listdir( app.config['UPLOAD_FOLDER'])]
tmpfiles = [os.path.join('prediction/tmp/', filename) for filename in os.listdir('prediction/tmp/')]
deletefiles(uploadfiles)
deletefiles(tmpfiles)
return render_template("index.html")
@app.route("/about")
def about():
return render_template("about.html")
@app.route("/endsession")
def endsession():
if session.get("file") != None:
if session["file"] != "N/A":
os.remove(app.config['UPLOAD_FOLDER']+session["file"]+'.pkl')
os.remove(app.config['UPLOAD_FOLDER']+session["file"]+'unfiltered.pkl')
[session.pop(key) for key in list(session.keys())]
flash('Thanks for using COMET. All session data and files have been deleted.')
else:
[session.pop(key) for key in list(session.keys())]
flash('Thanks for using COMET. All session data has been deleted.')
else:
flash('Nothing to be deleted!')
return redirect(url_for('index'))
@app.route("/input", methods=["POST", "GET"])
def input():
if request.method == "POST":
if 'file' in request.files:
file = request.files['file']
if allowed_file(file.filename):
filename = secure_filename(file.filename)
filename = filename[:-4] + secrets.token_urlsafe(16)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
session["file"] = filename
rs_list = []
for line in open(app.config['UPLOAD_FOLDER']+filename):
if '#' not in line and 'i' not in line and "rsid" not in line:
# 23andMe
if 'rs' in line and len(re.split(r'\t', line.rstrip('\n'))) == 4:
temp = re.split(r'\t', line.rstrip('\n'))
if temp[3] != "--":
rs_list.append(temp)
# Ancestry
elif 'rs' in line and len(re.split(r'\t', line.rstrip('\n'))) == 5:
temp = re.split(r'\t', line.rstrip('\n'))
if temp[3] != "0" and temp[3] != "I" and temp[3] != "D":
temp[3] = temp[3] + temp[4]
del temp[-1]
rs_list.append(temp)
# MyHeritage
elif 'rs' in line and len(re.split(',', line.rstrip('\n'))) == 4:
temp = re.split('","', line.rstrip('\n'))
if temp[3][:-1] != "--":
temp[0] = temp[0][1:]
temp[3] = temp[3][:-1]
rs_list.append(temp)
if len(rs_list) > 0:
rs_df = pd.DataFrame(rs_list,columns=['rsid','chr','pos','geno'])
rs_df.replace({'chr': {"23": "X", "24": "Y", "25": "Y", "26" : "MT"}})
db = pd.read_pickle(database)
db = db.drop(['chr', 'pos'], axis=1)
rs_df = rs_df.join(db.set_index('rsid'), on='rsid')
rs_df.to_pickle(app.config['UPLOAD_FOLDER']+filename+"unfiltered.pkl")
rs_df = rs_df.dropna()
rs_df = rs_df.reset_index(drop=True)
os.remove(app.config['UPLOAD_FOLDER']+filename)
rs_df.to_pickle(app.config['UPLOAD_FOLDER']+filename+".pkl")
return redirect(url_for("select"))
else:
os.remove(app.config['UPLOAD_FOLDER']+filename)
return render_template("input.html", error = "Error: File could not be parsed. Please check that this is a raw genotype file.")
else:
return render_template("input.html", error = "Error: Incorrect file type. Please upload a .txt file.")
else:
return render_template("input.html")
@app.route("/select", methods=["POST", "GET"])
def select():
if request.method == "POST":
if 'turnoff' in request.form:
session["turnoffdb"] = True
if 'turnon' in request.form:
session.pop("turnoffdb")
if 'chrom' in request.form:
session["chrom"] = request.form['chrom']
if "file" in session and session["file"] != "N/A":
if "turnoffdb" in session:
return render_template("select.html", list=list, geno=True, dboff = 1)
return render_template("select.html", list=list, geno=True, dboff = 0)
else:
session["file"] = "N/A"
return render_template("select.html", list=list, geno=False, dboff = -1)
@app.route('/showtable')
def index_get_data():
chrom = session["chrom"] if session.get("chrom") != None else str(1)
if session["file"] != "N/A":
filename = session["file"]
if "turnoffdb" in session:
rs_df = pd.read_pickle(app.config['UPLOAD_FOLDER']+filename+"unfiltered.pkl")
cols = ['rsid', 'chromosome', 'position', 'genotype', 'substitution', 'gene','freq']
else:
rs_df = | pd.read_pickle(app.config['UPLOAD_FOLDER']+filename+".pkl") | pandas.read_pickle |
"""
Author: <NAME>
Copyright: <NAME>, RCH Engineering, 2021
License: BSD Clear 3 Clause License
All rights reserved
"""
import datetime
import os
import warnings
import affine
import geopandas as gpd
import h5py
import netCDF4 as nc
import numpy as np
import pandas as pd
import rasterio.features as riof
import requests
import xarray as xr
from pydap.cas.urs import setup_session
try:
import pygrib
except ImportError:
pygrib = None
from ._coords import _map_coords_to_slice
from ._utils import _assign_eng
from ._utils import _guess_time_var
from ._utils import _array_by_eng
from ._utils import _array_to_stat_list
from ._utils import _attr_by_eng
from ._utils import _delta_to_time
from ._utils import _gen_stat_list
from ._consts import ALL_ENGINES
from ._consts import SPATIAL_X_VARS
from ._consts import SPATIAL_Y_VARS
__all__ = ['TimeSeries', ]
class TimeSeries:
"""
Creates a time series of values from arrays contained in netCDF, grib, hdf, or geotiff formats. Values in the
series are extracted by specifying coordinates of a point, range of coordinates, a spatial data file, or computing
statistics for the entire array.
Args:
files (list): A list (even if len==1) of either absolute file paths to netcdf, grib, hdf5, or geotiff files or
urls to an OPeNDAP service (but beware the data transfer speed bottleneck)
var (str or int or list or tuple): The name of the variable(s) to query as they are stored in the file
(e.g. often 'temp' or 'T' instead of Temperature) or the band number if you are using grib files *and* you
specify the engine as pygrib. If the var is contained in a group, include the group name as a unix style
path e.g. 'group_name/var'.
dim_order (tuple): A tuple of the names of the dimensions/coordinate variables for all of the variables listed
in the "var" parameter, listed in order. If the coordinate variables are contained in a group, include the
group name as a unix style path e.g. 'group_name/coord_var'.
Keyword Args:
t_var (str): Name of the time dimension/coordinate variable if it is used in the data. Grids will attempt to
guess if it is not "time" and you do not specify one with this argument.
x_var (str): Name of the x dimension/coordinate variable if it is used in the data. Grids will attempt to
guess if it is not specified and not a standard name.
y_var (str): Name of the y dimension/coordinate variable if it is used in the data. Grids will attempt to
guess if it is not specified and not a standard name.
engine (str): the python package used to power the file reading. Defaults to best for the type of input data.
The options include 'xarray', 'opendap', 'auth-opendap', 'netcdf4', 'cfgrib', 'pygrib', 'h5py', 'rasterio'
xr_kwargs (dict): A dictionary of kwargs that you might need when opening complex grib files with xarray
user (str): a username used for authenticating remote datasets, if required by your remote data source
pswd (str): a password used for authenticating remote datasets, if required by your remote data source
session (requests.Session): a requests Session object preloaded with credentials/tokens for authentication
stats (str or tuple): How to reduce arrays of values to a single value in the series dataframe.
Provide a list of strings (e.g. ['mean', 'max']), or a comma separated string (e.g. 'mean,max,min').
Options include: mean, median, max, min, sum, std, or a percentile (e.g. '25%').
Or, provide 'all' which is interpreted as (mean, median, max, min, sum, std, ).
Or, provide 'box' or 'boxplot' which is interpreted as (max, 75%, median, mean, 25%, min, ).
Or, provide 'values' to get a flat list of all non-null values in the query so you can compute other stats.
fill_value (int): The value used for filling no_data/null values in the variable's array. Default: -9999.0
interp_units (bool): If your data conforms to the CF NetCDF standard for time data, choose True to
convert the values in the time variable to datetime strings in the pandas output. The units string for the
time variable of each file is checked separately unless you specify it in the unit_str parameter.
origin_format (str): A datetime.strptime string for extracting the origin time from the units string.
unit_str (str): a CF Standard conforming string indicating how the spacing and origin of the time values.
This is helpful if your files do not contain a units string. Only specify this if ALL files that you query
use the same units string. Usually this looks like "step_size since YYYY-MM-DD HH:MM:SS" such as
"days since 2000-01-01 00:00:00".
strp_filename (str): A datetime.strptime string for extracting datetimes from patterns in file names. Only for
datasets which contain 1 time step per file.
Methods:
point: Extracts a time series of values at a point for a given coordinate pair
multipoint: Extracts a time series of values for several points given a series of coordinate values
bound: Extracts a time series of values with a bounding box for each requested statistic
range: Alias for TimeSeries.bound()
shape: Extracts a time series of values on a line or within a polygon for each requested statistic
"""
# core parameters from user
files: list
var: tuple
dim_order: tuple
# handling non-standard dimension names or organizations
t_var: str
x_var: str
y_var: str
# help opening data
engine: str
xr_kwargs: dict
user: str
pswd: str
session: requests.session
# reducing arrays to numbers
stats: str or list or tuple or np.ndarray
fill_value: int or float or bool
# how to handle the time data
interp_units: bool
origin_format: str
unit_str: str
strp_filename: str
t_index: int # derived from dim_order and t_var
t_var_in_dims: bool # derived from dim_order and t_var
def __init__(self, files: list, var: str or int or list or tuple, dim_order: tuple, **kwargs):
# parameters configuring how the data is interpreted
self.files = (files,) if isinstance(files, str) else files
self.variables = (var,) if isinstance(var, str) else var
assert len(self.variables) >= 1, 'specify at least 1 variable'
self.dim_order = dim_order
# optional parameters describing how to access the data
self.engine = kwargs.get('engine', _assign_eng(files[0]))
assert self.engine in ALL_ENGINES, f'engine "{self.engine}" not recognized'
self.xr_kwargs = kwargs.get('xr_kwargs', None)
# optional parameters modifying how dimension/coordinate variable names are interpreted
self.t_var = kwargs.get('t_var', None)
self.x_var = kwargs.get('x_var', None)
self.y_var = kwargs.get('y_var', None)
if self.x_var is None:
for a in self.dim_order:
if a in SPATIAL_X_VARS:
self.x_var = a
if self.y_var is None:
for a in self.dim_order:
if a in SPATIAL_Y_VARS:
self.y_var = a
if self.t_var is None:
self.t_var = _guess_time_var(dim_order)
self.t_var_in_dims = self.t_var in self.dim_order
self.t_index = self.dim_order.index(self.t_var) if self.t_var_in_dims else False
# additional options describing how the time data should be interpreted
self.interp_units = kwargs.get('interp_units', False)
self.strp_filename = kwargs.get('strp_filename', False)
self.unit_str = kwargs.get('unit_str', None)
self.origin_format = kwargs.get('origin_format', '%Y-%m-%d %X')
# optional parameter modifying which statistics to process
self.stats = _gen_stat_list(kwargs.get('stats', ('mean',)))
self.fill_value = kwargs.get('fill_value', -9999.0)
# optional authentication for remote datasets
self.user = kwargs.get('user', None)
self.pswd = kwargs.get('pswd', None)
self.session = kwargs.get('session', False)
if 'opendap' in self.engine and not self.session and self.user is not None and self.pswd is not None:
a = requests.Session()
a.auth = (self.user, self.pswd)
self.session = a
# validate that some parameters are compatible
if self.engine == 'rasterio':
assert isinstance(self.variables, int), 'GeoTIFF variables must be integer band numbers'
if not self.dim_order == ('y', 'x'):
warnings.warn('For GeoTIFFs, the correct dim order is ("y", "x")')
self.dim_order = ('y', 'x')
elif self.engine == 'pygrib':
if pygrib is None:
raise ModuleNotFoundError('pygrib engine only available if optional pygrib dependency is installed')
assert isinstance(self.variables, int), 'pygrib engine variables must be integer band numbers'
def __bool__(self):
return True
def __str__(self):
string = 'grids.TimeSeries'
for p in vars(self):
if p == 'files':
string += f'\n\t{p}: {len(self.__getattribute__(p))}'
else:
string += f'\n\t{p}: {self.__getattribute__(p)}'
return string
def __repr__(self):
return self.__str__()
def point(self,
*coords: int or float or None, ) -> pd.DataFrame:
"""
Extracts a time series at a point for a given series of coordinate values
Args:
coords (int or float or None): provide a coordinate value (integer or float) for each dimension of the
array which you are creating a time series for. You need to provide exactly the same number of
coordinates as there are dimensions
Returns:
pandas.DataFrame with an index, a column named datetime, and a column named values.
"""
assert len(self.dim_order) == len(coords), 'Specify 1 coordinate for each dimension of the array'
# make the return item
results = dict(datetime=[])
for var in self.variables:
results[var] = []
# map coordinates -> cell indices -> python slice() objects
slices = self._gen_dim_slices(coords, 'point')
# iterate over each file extracting the value and time for each
for num, file in enumerate(self.files):
# open the file
opened_file = self._open_data(file)
tsteps, tslices = self._handle_time(opened_file, file, (coords, coords))
results['datetime'] += list(tsteps)
slices[self.t_index] = tslices if self.t_var_in_dims else slices[self.t_index]
for var in self.variables:
# extract the appropriate values from the variable
vs = _array_by_eng(opened_file, var, tuple(slices))
if vs.ndim == 0:
if vs == self.fill_value:
vs = np.nan
results[var].append(vs)
elif vs.ndim == 1:
vs[vs == self.fill_value] = np.nan
for v in vs:
results[var].append(v)
else:
raise ValueError('Too many dimensions remain after slicing')
if self.engine != 'pygrib':
opened_file.close()
# return the data stored in a dataframe
return pd.DataFrame(results)
def multipoint(self,
*coords: list,
labels: list = None, ) -> pd.DataFrame:
"""
Extracts a time series at many points for a given series of coordinate values. Each point should have the same
time coordinate and different coordinates for each other dimension.
Args:
coords (int or float or None): a list of coordinate tuples or a 2D numpy array. Each coordinate pair in
the list should provide a coordinate value (integer or float) for each dimension of the array, e.g.
len(coordinate_pair) == len(dim_order). See TimeSeries.point for more explanation.
labels (list): an optional list of strings which label each of the coordinates provided. len(labels) should
be equal to len(coords)
Returns:
pandas.DataFrame with an index, a column named datetime, and a column named values.
"""
assert len(self.dim_order) == len(coords[0]), 'Specify 1 coordinate for each dimension of the array'
if labels is None:
labels = [f'point{i}' for i in range(len(coords))]
assert len(labels) == len(coords), 'You must provide a label for each point or use auto numbering'
datalabels = []
for label in labels:
for var in self.variables:
datalabels.append(f'{var}_{label}')
# make the return item
results = dict(datetime=[])
for datalabel in datalabels:
results[datalabel] = []
# map coordinates -> cell indices -> python slice() objects
slices = self._gen_dim_slices(coords, 'multipoint')
# iterate over each file extracting the value and time for each
for file in self.files:
opened_file = self._open_data(file)
tsteps, tslices = self._handle_time(opened_file, file, (coords[0], coords[0]))
results['datetime'] += list(tsteps)
for var in self.variables:
for i, slc in enumerate(slices):
slc[self.t_index] = tslices if self.t_var_in_dims else slc[self.t_index]
# extract the appropriate values from the variable
vs = _array_by_eng(opened_file, var, tuple(slc))
if vs.ndim == 0:
if vs == self.fill_value:
vs = np.nan
results[f'{var}_{labels[i]}'].append(vs)
elif vs.ndim == 1:
vs[vs == self.fill_value] = np.nan
for v in vs:
results[f'{var}_{labels[i]}'].append(v)
else:
raise ValueError('There are too many dimensions after slicing')
if self.engine != 'pygrib':
opened_file.close()
# return the data stored in a dataframe
return pd.DataFrame(results)
def bound(self,
min_coords: tuple,
max_coords: tuple,
stats: str or tuple = None, ) -> pd.DataFrame:
"""
Args:
min_coords (tuple): a tuple containing minimum coordinates of a bounding box range- coordinates given
in order of the dimensions of the source arrays.
max_coords (tuple): a tuple containing maximum coordinates of a bounding box range- coordinates given
in order of the dimensions of the source arrays.
stats (str or tuple): How to reduce arrays of values to a single value for the series. See class docstring.
Returns:
pandas.DataFrame with an index, a datetime column, and a column named for each statistic specified
"""
assert len(self.dim_order) == len(min_coords) == len(max_coords), \
'Specify 1 min and 1 max coordinate for each dimension'
# handle the optional arguments
self.stats = _gen_stat_list(stats) if stats is not None else self.stats
# make the return item
results = dict(datetime=[])
# add a list for each stat requested
for var in self.variables:
for stat in self.stats:
results[f'{var}_{stat}'] = []
# map coordinates -> cell indices -> python slice() objects
slices = self._gen_dim_slices((min_coords, max_coords), 'range')
# iterate over each file extracting the value and time for each
for file in self.files:
# open the file
opened_file = self._open_data(file)
tsteps, tslices = self._handle_time(opened_file, file, (min_coords, max_coords))
results['datetime'] += list(tsteps)
slices[self.t_index] = tslices if self.t_var_in_dims else slices[self.t_index]
for var in self.variables:
# slice the variable's array, returns array with shape corresponding to dimension order and size
vs = _array_by_eng(opened_file, var, tuple(slices))
vs[vs == self.fill_value] = np.nan
for stat in self.stats:
results[f'{var}_{stat}'] += _array_to_stat_list(vs, stat)
if self.engine != 'pygrib':
opened_file.close()
# return the data stored in a dataframe
return | pd.DataFrame(results) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 9 10:54:32 2019
@author: nmei
"""
import os
from glob import glob
import pandas as pd
import numpy as np
import seaborn as sns
sns.set_style('whitegrid')
sns.set_context('poster')
import statsmodels.api as sm
from statsmodels.formula.api import ols
from shutil import copyfile
copyfile('../../../utils.py','utils.py')
import utils
from matplotlib.ticker import FormatStrFormatter
experiment = 'metasema'
here = 'encoding_model_15_ROIs'
working_dir = '../../../../results/{}/RP/{}'.format(experiment,here)
figure_dir = '../../../../figures/{}/RP/{}'.format(experiment,here)
if not os.path.exists(figure_dir):
os.mkdir(figure_dir)
working_data = glob(os.path.join(working_dir,'*.csv'))
df = pd.concat([pd.read_csv(f) for f in working_data]).groupby(
['sub_name',
'roi_name',
'model_name',
'condition',
]).mean().reset_index()
N = len(pd.unique(df['sub_name']))
alpha = 100
feature_dict = {'vgg19':'image2vec',
'densenet121':'image2vec',
'mobilenetv2_1':'image2vec',
'Fast_Text':'Word2vec',
'Glove':'Word2vec',
'Word2Vec':'Word2vec',}
df['feature_type'] = df['model_name'].map(feature_dict)
hue_order = ['vgg19', 'densenet121', 'mobilenetv2_1',
'Fast_Text', 'Glove', 'Word2Vec',
]
df = pd.concat([df[df['model_name'] == model_name] for model_name in hue_order])
temp = dict(
F = [],
df_nonimator = [],
df_denominator = [],
p = [],
feature_type = [],
condition = [],
roi_name = [],
)
for (feat,condition,roi),df_sub in df.groupby(['feature_type','condition','roi_name']):
anova = ols('mean_variance ~ model_name',data = df_sub).fit()
aov_table = sm.stats.anova_lm(anova,typ=2)
print(aov_table)
temp['F'].append(aov_table['F']['model_name'])
temp['df_nonimator'].append(aov_table['df']['model_name'])
temp['df_denominator'].append(aov_table['df']['Residual'])
temp['p'].append(aov_table['PR(>F)']['model_name'])
temp['feature_type'].append(feat)
temp['condition'].append(condition)
temp['roi_name'].append(roi)
anova_results = pd.DataFrame(temp)
temp = []
for roi,df_sub in anova_results.groupby('condition'):
df_sub = df_sub.sort_values('p')
converter = utils.MCPConverter(pvals = df_sub['p'].values)
d = converter.adjust_many()
df_sub['p_corrected'] = d['bonferroni'].values
temp.append(df_sub)
anova_results = pd.concat(temp)
anova_results['stars'] = anova_results['p_corrected'].apply(utils.stars)
anova_results = anova_results.sort_values(['roi_name','condition','feature_type'])
g = sns.catplot(x = 'roi_name',
y = 'mean_variance',
hue = 'model_name',
row = 'condition',
data = df,
kind = 'bar',
aspect = 4,
)
g._legend.set_title('Encoding Models')
(g.set_axis_labels("","Mean Variance Explained")
.set_titles("{row_name}")
.set(ylim=(0,0.05)))
g.axes[-1][0].set_xticklabels(g.axes[-1][0].xaxis.get_majorticklabels(),
rotation = 45,
horizontalalignment = 'center')
g.axes[0][0].set(title = 'Read')
g.axes[1][0].set(title = 'Think')
k = {'image2vec':-0.25,
'Word2vec':0.175}
j = 0.15
l = 0.0005
height = 0.045
for ax,condition in zip(g.axes.flatten(),['read','reenact']):
ax.yaxis.set_major_formatter(FormatStrFormatter('%.3f'))
df_sub = anova_results[anova_results['condition'] == condition]
for ii,((roi),df_star_sub) in enumerate(df_sub.groupby(['roi_name',])):
for model,df_star_sub_model in df_star_sub.groupby(['feature_type']):
ax.hlines(height,ii+k[model]-j,ii+k[model]+j)
ax.vlines(ii+k[model]-j,height+l,height-l)
ax.vlines(ii+k[model]+j,height+l,height-l)
ax.annotate(df_star_sub_model['stars'].values[0],xy=(ii+k[model]-0.25,height + 0.002))
g.fig.suptitle("Comparison between Computer Vision and Word Embedding Models\nAverage Variance Explained\nN = {}, {} (alpha = {})\nBonforroni corrected for multiple one-way ANOVAs".format(
N,"Ridge Regression",alpha),
y = 1.15)
g.savefig(os.path.join(figure_dir,
'mean variance explained.png'),
dpi = 400,
bbox_inches = 'tight')
results = []
for image in ['densenet121','mobilenetv2_1','vgg19',]:
for word in ['Fast_Text', 'Glove', 'Word2Vec',]:
df_image = df[df['model_name'] == image]
df_word = df[df['model_name'] == word]
df_image = df_image.sort_values(['sub_name','roi_name','condition'])
df_word = df_word.sort_values(['sub_name','roi_name','condition'])
MV_diff = df_image['mean_variance'].values - df_word['mean_variance'].values
BV_diff = df_image['best_variance'].values - df_word['best_variance'].values
df_diff = df_image.copy()
df_diff['mean_variance'] = MV_diff
df_diff['best_variance'] = BV_diff
df_diff['model_name'] = f"{image} - {word}"
results.append(df_diff)
df_diff = pd.concat(results)
g = sns.catplot(x = 'roi_name',
y = 'mean_variance',
hue = 'model_name',
row = 'condition',
data = df_diff,
kind = 'bar',
aspect = 4,
)
(g.set_axis_labels("","$\Delta$ Mean Variance Explained")
.set_titles("{row_name}"))
g.axes[-1][0].set_xticklabels(g.axes[-1][0].xaxis.get_majorticklabels(),
rotation = 45,
horizontalalignment = 'center')
g.axes[0][0].set(title = 'Read')
g.axes[1][0].set(title = 'Think')
g.fig.suptitle('Difference of Computer Vision Models and Word Embedding Models\nBonforroni Corrected for multiple t-tests',
y=1.05)
g.savefig(os.path.join(figure_dir,
'difference of variance explained by image2vec and word2vec.png'),
dpi = 400,
bbox_inches = 'tight')
df_condition = dict(
roi = [],
condition = [],
ps_mean = [],
ps_std = [],
diff_mean = [],
diff_std = [],)
for (roi,condition),df_sub in df.groupby(['roi_name','condition']):
df_sub_img = df_sub[df_sub['feature_type'] == 'image2vec'].groupby(['sub_name']).mean().reset_index()
df_sub_word = df_sub[df_sub['feature_type'] == 'Word2vec'].groupby(['sub_name']).mean().reset_index()
a = df_sub_img['mean_variance'].values
b = df_sub_word['mean_variance'].values
ps = utils.resample_ttest_2sample(a,b,
n_ps = 100,
n_permutation = int(1e4),
one_tail = False,
match_sample_size = True)
df_condition['roi'].append(roi)
df_condition['condition'].append(condition)
df_condition['ps_mean'].append(ps.mean())
df_condition['ps_std'].append(ps.std())
df_condition['diff_mean'].append(np.mean(a - b))
df_condition['diff_std'].append(np.std(a - b))
df_condition = pd.DataFrame(df_condition)
temp = []
for condition, df_sub in df_condition.groupby(['condition']):
df_sub = df_sub.sort_values(['ps_mean'])
converter = utils.MCPConverter(pvals = df_sub['ps_mean'].values)
d = converter.adjust_many()
df_sub['ps_corrected'] = d['bonferroni'].values
temp.append(df_sub)
df_condition = pd.concat(temp)
df_diff_diff = dict(
roi = [],
ps_mean = [],
ps_std = [],
diff_mean = [],
diff_std = [],)
for roi,df_sub in df_diff.groupby(['roi_name']):
df_read = df_sub[df_sub['condition'] == 'read']#.sort_values(['sub'])
df_reenact = df_sub[df_sub['condition'] == 'reenact']#.sort_values(['sub'])
df_read = df_read.sort_values(['sub_name','roi_name','feature_type']).reset_index()
df_reenact = df_reenact.sort_values(['sub_name','roi_name','feature_type']).reset_index()
a = df_read['mean_variance'].values
b = df_reenact['mean_variance'].values
ps = utils.resample_ttest_2sample(a,b,
n_ps = 100,
n_permutation = int(1e4),
one_tail = False,
match_sample_size = True)
df_diff_diff['roi'].append(roi)
df_diff_diff['ps_mean'].append(ps.mean())
df_diff_diff['ps_std'].append(ps.std())
df_diff_diff['diff_mean'].append(np.mean(np.abs(a - b)))
df_diff_diff['diff_std'].append(np.std(np.abs(a - b)))
df_diff_diff = pd.DataFrame(df_diff_diff)
df_diff_diff = df_diff_diff.sort_values(['ps_mean'])
converter = utils.MCPConverter(pvals = df_diff_diff['ps_mean'].values)
d = converter.adjust_many()
df_diff_diff['ps_corrected'] = d['bonferroni'].values
df_diff_diff['star'] = df_diff_diff['ps_corrected'].apply(utils.stars)
roi_order = | pd.unique(df['roi_name']) | pandas.unique |
from datetime import datetime, timedelta, timezone
import random
from tabnanny import check
import unittest
import pandas as pd
import pytz
if __name__ == "__main__":
from pathlib import Path
import sys
sys.path.insert(0, str(Path(__file__).resolve().parents[2]))
from datatube.dtype import check_dtypes
class TestObj:
pass
unittest.TestCase.maxDiff = None
SIZE = 3
TEST_DATA = {
int: {
"integers":
[-1 * SIZE // 2 + i + 1 for i in range(SIZE)],
"whole floats":
[-1 * SIZE // 2 + i + 1.0 for i in range(SIZE)],
"real whole complex":
[complex(-1 * SIZE // 2 + i + 1, 0) for i in range(SIZE)],
},
float: {
"decimal floats":
[-1 * SIZE // 2 + i + 1 + random.random() for i in range(SIZE)],
"real decimal complex":
[complex(-1 * SIZE // 2 + i + 1 + random.random(), 0)
for i in range(SIZE)],
},
complex: {
"imaginary complex":
[complex(-1 * SIZE // 2 + i + 1 + random.random(),
-1 * SIZE // 2 + i + 1 + random.random())
for i in range(SIZE)],
},
str: {
"integer strings":
[str(-1 * SIZE // 2 + i + 1) for i in range(SIZE)],
"whole float strings":
[str(-1 * SIZE // 2 + i + 1.0) for i in range(SIZE)],
"decimal float strings":
[str(-1 * SIZE // 2 + i + 1 + random.random())
for i in range(SIZE)],
"real whole complex strings":
[str(complex(-1 * SIZE // 2 + i + 1, 0)) for i in range(SIZE)],
"real decimal complex strings":
[str(complex(-1 * SIZE // 2 + i + 1 + random.random(), 0))
for i in range(SIZE)],
"imaginary complex strings":
[str(complex(-1 * SIZE // 2 + i + 1 + random.random(),
-1 * SIZE // 2 + i + 1 + random.random()))
for i in range(SIZE)],
"character strings":
[chr(i % 26 + ord("a")) for i in range(SIZE)],
"boolean strings":
[str(bool((i + 1) % 2)) for i in range(SIZE)],
"aware datetime strings":
[str(datetime.fromtimestamp(i, tz=timezone.utc))
for i in range(SIZE)],
"aware ISO 8601 strings":
[datetime.fromtimestamp(i, tz=timezone.utc).isoformat()
for i in range(SIZE)],
"naive datetime strings":
[str(datetime.fromtimestamp(i)) for i in range(SIZE)],
"naive ISO 8601 strings":
[datetime.fromtimestamp(i).isoformat() for i in range(SIZE)],
"aware/naive datetime strings":
[str(datetime.fromtimestamp(i, tz=timezone.utc)) if i % 2
else str(datetime.fromtimestamp(i)) for i in range(SIZE)],
"aware/naive ISO 8601 strings":
[datetime.fromtimestamp(i, tz=timezone.utc).isoformat() if i % 2
else datetime.fromtimestamp(i).isoformat()
for i in range(SIZE)],
"mixed timezone datetime strings":
[str(
datetime.fromtimestamp(
i,
tz=pytz.timezone(
pytz.all_timezones[i % len(pytz.all_timezones)]
)
)
) for i in range(SIZE)],
"mixed timezone ISO 8601 strings":
[datetime.fromtimestamp(
i,
tz=pytz.timezone(
pytz.all_timezones[i % len(pytz.all_timezones)]
)
).isoformat() for i in range(SIZE)],
"timedelta strings":
[str(timedelta(seconds=i + 1)) for i in range(SIZE)],
"pd.Timedelta strings":
[str(pd.Timedelta(timedelta(seconds=i + 1))) for i in range(SIZE)]
},
bool: {
"booleans":
[bool((i + 1) % 2) for i in range(SIZE)]
},
datetime: {
"aware datetimes":
[datetime.fromtimestamp(i, tz=timezone.utc) for i in range(SIZE)],
"naive datetimes":
[datetime.fromtimestamp(i) for i in range(SIZE)],
"aware/naive datetimes":
[datetime.fromtimestamp(i, tz=timezone.utc) if i % 2
else datetime.fromtimestamp(i) for i in range(SIZE)],
"mixed timezone datetimes":
[datetime.fromtimestamp(
i,
tz = pytz.timezone(
pytz.all_timezones[i % len(pytz.all_timezones)]
)
) for i in range(SIZE)]
},
timedelta: {
"timedeltas":
[timedelta(seconds=i + 1) for i in range(SIZE)]
},
object: {
"Nones":
[None for _ in range(SIZE)],
"custom objects":
[TestObj() for _ in range(SIZE)]
}
}
ALL_DATA = {col_name: data for v in TEST_DATA.values()
for col_name, data in v.items()}
class CheckDtypeTests(unittest.TestCase):
def test_check_integers_series_no_na(self):
failed = []
for col_name, data in ALL_DATA.items():
series = pd.Series(data)
result = check_dtypes(series, int)
expected = col_name in TEST_DATA[int]
try:
self.assertEqual(result, expected)
except AssertionError:
context = f"check_dtypes({data[:3]}..., int) != {expected}"
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_integers_series_with_na(self):
failed = []
for col_name, data in ALL_DATA.items():
series = pd.Series(data + [None])
result = check_dtypes(series, int)
expected = col_name in TEST_DATA[int]
try:
self.assertEqual(result, expected)
except AssertionError:
context = f"check_dtypes({data[:3]}..., int) != {expected}"
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_integers_df_no_na(self):
df = pd.DataFrame(ALL_DATA)
failed = []
for col_name in df.columns:
result = check_dtypes(df, {col_name: int})
expected = col_name in TEST_DATA[int]
try:
self.assertEqual(result, expected)
except AssertionError:
context = (f"check_dtypes(df, {{{repr(col_name)}: int}}) != "
f"{expected}")
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_integers_df_with_na(self):
with_na = {k: v + [None] for k, v in ALL_DATA.items()}
df = | pd.DataFrame(with_na) | pandas.DataFrame |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_0(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=0, inplace=inplace)
actual = gdf.drop(labels=labels, axis=0, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"index",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_index(pdf, index, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace)
actual = gdf.drop(index=index, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5},
index=pd.MultiIndex(
levels=[
["lama", "cow", "falcon"],
["speed", "weight", "length"],
],
codes=[
[0, 0, 0, 1, 1, 1, 2, 2, 2, 1],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 1],
],
),
)
],
)
@pytest.mark.parametrize(
"index,level",
[
("cow", 0),
("lama", 0),
("falcon", 0),
("speed", 1),
("weight", 1),
("length", 1),
pytest.param(
"cow",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"lama",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"falcon",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_multiindex(pdf, index, level, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace, level=level)
actual = gdf.drop(index=index, inplace=inplace, level=level)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_1(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=1, inplace=inplace)
actual = gdf.drop(labels=labels, axis=1, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
def test_dataframe_drop_error():
df = cudf.DataFrame({"a": [1], "b": [2], "c": [3]})
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "d"}),
rfunc_args_and_kwargs=([], {"columns": "d"}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
rfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
expected_error_message="Cannot specify both",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"axis": 1}),
rfunc_args_and_kwargs=([], {"axis": 1}),
expected_error_message="Need to specify at least",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([[2, 0]],),
rfunc_args_and_kwargs=([[2, 0]],),
expected_error_message="One or more values not found in axis",
)
def test_dataframe_drop_raises():
df = cudf.DataFrame(
{"a": [1, 2, 3], "c": [10, 20, 30]}, index=["x", "y", "z"]
)
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["p"],),
rfunc_args_and_kwargs=(["p"],),
expected_error_message="One or more values not found in axis",
)
# label dtype mismatch
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([3],),
rfunc_args_and_kwargs=([3],),
expected_error_message="One or more values not found in axis",
)
expect = pdf.drop("p", errors="ignore")
actual = df.drop("p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "p"}),
rfunc_args_and_kwargs=([], {"columns": "p"}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(columns="p", errors="ignore")
actual = df.drop(columns="p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
rfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(labels="p", axis=1, errors="ignore")
actual = df.drop(labels="p", axis=1, errors="ignore")
assert_eq(actual, expect)
def test_dataframe_column_add_drop_via_setitem():
df = cudf.DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = cudf.DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = cudf.DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas()
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = cudf.DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
string = str(df)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
data = np.arange(6)
mask = np.zeros(1, dtype=cudf.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = cudf.Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is cudf.NA
pd.options.display.max_rows = 10
got = df.to_string()
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide(monkeypatch):
monkeypatch.setenv("COLUMNS", "79")
# Test basic
df = cudf.DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = cudf.DataFrame()
got = df.to_string()
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = cudf.DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = cudf.DataFrame(
{k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()}
)
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = cudf.DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = cudf.DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = cudf.DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = cudf.DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = cudf.Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = cudf.Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=True
)
assert test2_null["a"].nullable
assert test2_null["a"].null_count == 20
test2_nan = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=False
)
assert test2_nan["a"].null_count == 0
gpu_ary = cupy.asarray(ary)
test3_null = cudf.Series(gpu_ary, nan_as_null=True)
assert test3_null.nullable
assert test3_null.null_count == 20
test3_nan = cudf.Series(gpu_ary, nan_as_null=False)
assert test3_nan.null_count == 0
test4 = cudf.DataFrame()
lst = [1, 2, None, 4, 5, 6, None, 8, 9]
test4["lst"] = lst
assert test4["lst"].nullable
assert test4["lst"].null_count == 2
def test_dataframe_append_to_empty():
pdf = pd.DataFrame()
pdf["a"] = []
pdf["b"] = [1, 2, 3]
gdf = cudf.DataFrame()
gdf["a"] = []
gdf["b"] = [1, 2, 3]
assert_eq(gdf, pdf)
def test_dataframe_setitem_index_len1():
gdf = cudf.DataFrame()
gdf["a"] = [1]
gdf["b"] = gdf.index._values
np.testing.assert_equal(gdf.b.to_array(), [0])
def test_empty_dataframe_setitem_df():
gdf1 = cudf.DataFrame()
gdf2 = cudf.DataFrame({"a": [1, 2, 3, 4, 5]})
gdf1["a"] = gdf2["a"]
assert_eq(gdf1, gdf2)
def test_assign():
gdf = cudf.DataFrame({"x": [1, 2, 3]})
gdf2 = gdf.assign(y=gdf.x + 1)
assert list(gdf.columns) == ["x"]
assert list(gdf2.columns) == ["x", "y"]
np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000])
def test_dataframe_hash_columns(nrows):
gdf = cudf.DataFrame()
data = np.asarray(range(nrows))
data[0] = data[-1] # make first and last the same
gdf["a"] = data
gdf["b"] = gdf.a + 100
out = gdf.hash_columns(["a", "b"])
assert isinstance(out, cupy.ndarray)
assert len(out) == nrows
assert out.dtype == np.int32
# Check default
out_all = gdf.hash_columns()
np.testing.assert_array_equal(cupy.asnumpy(out), cupy.asnumpy(out_all))
# Check single column
out_one = cupy.asnumpy(gdf.hash_columns(["a"]))
# First matches last
assert out_one[0] == out_one[-1]
# Equivalent to the cudf.Series.hash_values()
np.testing.assert_array_equal(cupy.asnumpy(gdf.a.hash_values()), out_one)
@pytest.mark.parametrize("nrows", [3, 10, 100, 1000])
@pytest.mark.parametrize("nparts", [1, 2, 8, 13])
@pytest.mark.parametrize("nkeys", [1, 2])
def test_dataframe_hash_partition(nrows, nparts, nkeys):
np.random.seed(123)
gdf = cudf.DataFrame()
keycols = []
for i in range(nkeys):
keyname = "key{}".format(i)
gdf[keyname] = np.random.randint(0, 7 - i, nrows)
keycols.append(keyname)
gdf["val1"] = np.random.randint(0, nrows * 2, nrows)
got = gdf.partition_by_hash(keycols, nparts=nparts)
# Must return a list
assert isinstance(got, list)
# Must have correct number of partitions
assert len(got) == nparts
# All partitions must be DataFrame type
assert all(isinstance(p, cudf.DataFrame) for p in got)
# Check that all partitions have unique keys
part_unique_keys = set()
for p in got:
if len(p):
# Take rows of the keycolumns and build a set of the key-values
unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))
# Ensure that none of the key-values have occurred in other groups
assert not (unique_keys & part_unique_keys)
part_unique_keys |= unique_keys
assert len(part_unique_keys)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_value(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["val"] = gdf["val"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.key])
expected_value = row.key + 100 if valid else np.nan
got_value = row.val
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_keys(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["key"] = gdf["key"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3, keep_index=False)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.val - 100])
# val is key + 100
expected_value = row.val - 100 if valid else np.nan
got_value = row.key
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("keep_index", [True, False])
def test_dataframe_hash_partition_keep_index(keep_index):
gdf = cudf.DataFrame(
{"val": [1, 2, 3, 4], "key": [3, 2, 1, 4]}, index=[4, 3, 2, 1]
)
expected_df1 = cudf.DataFrame(
{"val": [1], "key": [3]}, index=[4] if keep_index else None
)
expected_df2 = cudf.DataFrame(
{"val": [2, 3, 4], "key": [2, 1, 4]},
index=[3, 2, 1] if keep_index else range(1, 4),
)
expected = [expected_df1, expected_df2]
parts = gdf.partition_by_hash(["key"], nparts=2, keep_index=keep_index)
for exp, got in zip(expected, parts):
assert_eq(exp, got)
def test_dataframe_hash_partition_empty():
gdf = cudf.DataFrame({"val": [1, 2], "key": [3, 2]}, index=["a", "b"])
parts = gdf.iloc[:0].partition_by_hash(["key"], nparts=3)
assert len(parts) == 3
for part in parts:
assert_eq(gdf.iloc[:0], part)
@pytest.mark.parametrize("dtype1", utils.supported_numpy_dtypes)
@pytest.mark.parametrize("dtype2", utils.supported_numpy_dtypes)
def test_dataframe_concat_different_numerical_columns(dtype1, dtype2):
df1 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype1)))
df2 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype2)))
if dtype1 != dtype2 and "datetime" in dtype1 or "datetime" in dtype2:
with pytest.raises(TypeError):
cudf.concat([df1, df2])
else:
pres = pd.concat([df1, df2])
gres = cudf.concat([cudf.from_pandas(df1), cudf.from_pandas(df2)])
assert_eq(cudf.from_pandas(pres), gres)
def test_dataframe_concat_different_column_types():
df1 = cudf.Series([42], dtype=np.float64)
df2 = cudf.Series(["a"], dtype="category")
with pytest.raises(ValueError):
cudf.concat([df1, df2])
df2 = cudf.Series(["a string"])
with pytest.raises(TypeError):
cudf.concat([df1, df2])
@pytest.mark.parametrize(
"df_1", [cudf.DataFrame({"a": [1, 2], "b": [1, 3]}), cudf.DataFrame({})]
)
@pytest.mark.parametrize(
"df_2", [cudf.DataFrame({"a": [], "b": []}), cudf.DataFrame({})]
)
def test_concat_empty_dataframe(df_1, df_2):
got = cudf.concat([df_1, df_2])
expect = pd.concat([df_1.to_pandas(), df_2.to_pandas()], sort=False)
# ignoring dtypes as pandas upcasts int to float
# on concatenation with empty dataframes
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"df1_d",
[
{"a": [1, 2], "b": [1, 2], "c": ["s1", "s2"], "d": [1.0, 2.0]},
{"b": [1.9, 10.9], "c": ["s1", "s2"]},
{"c": ["s1"], "b": [None], "a": [False]},
],
)
@pytest.mark.parametrize(
"df2_d",
[
{"a": [1, 2, 3]},
{"a": [1, None, 3], "b": [True, True, False], "c": ["s3", None, "s4"]},
{"a": [], "b": []},
{},
],
)
def test_concat_different_column_dataframe(df1_d, df2_d):
got = cudf.concat(
[cudf.DataFrame(df1_d), cudf.DataFrame(df2_d), cudf.DataFrame(df1_d)],
sort=False,
)
expect = pd.concat(
[pd.DataFrame(df1_d), pd.DataFrame(df2_d), pd.DataFrame(df1_d)],
sort=False,
)
# numerical columns are upcasted to float in cudf.DataFrame.to_pandas()
# casts nan to 0 in non-float numerical columns
numeric_cols = got.dtypes[got.dtypes != "object"].index
for col in numeric_cols:
got[col] = got[col].astype(np.float64).fillna(np.nan)
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"ser_1", [pd.Series([1, 2, 3]), pd.Series([], dtype="float64")]
)
@pytest.mark.parametrize("ser_2", [pd.Series([], dtype="float64")])
def test_concat_empty_series(ser_1, ser_2):
got = cudf.concat([cudf.Series(ser_1), cudf.Series(ser_2)])
expect = pd.concat([ser_1, ser_2])
assert_eq(got, expect)
def test_concat_with_axis():
df1 = pd.DataFrame(dict(x=np.arange(5), y=np.arange(5)))
df2 = pd.DataFrame(dict(a=np.arange(5), b=np.arange(5)))
concat_df = pd.concat([df1, df2], axis=1)
cdf1 = cudf.from_pandas(df1)
cdf2 = cudf.from_pandas(df2)
# concat only dataframes
concat_cdf = cudf.concat([cdf1, cdf2], axis=1)
assert_eq(concat_cdf, concat_df)
# concat only series
concat_s = pd.concat([df1.x, df1.y], axis=1)
cs1 = cudf.Series.from_pandas(df1.x)
cs2 = cudf.Series.from_pandas(df1.y)
concat_cdf_s = cudf.concat([cs1, cs2], axis=1)
assert_eq(concat_cdf_s, concat_s)
# concat series and dataframes
s3 = pd.Series(np.random.random(5))
cs3 = cudf.Series.from_pandas(s3)
concat_cdf_all = cudf.concat([cdf1, cs3, cdf2], axis=1)
concat_df_all = pd.concat([df1, s3, df2], axis=1)
assert_eq(concat_cdf_all, concat_df_all)
# concat manual multi index
midf1 = cudf.from_pandas(df1)
midf1.index = cudf.MultiIndex(
levels=[[0, 1, 2, 3], [0, 1]], codes=[[0, 1, 2, 3, 2], [0, 1, 0, 1, 0]]
)
midf2 = midf1[2:]
midf2.index = cudf.MultiIndex(
levels=[[3, 4, 5], [2, 0]], codes=[[0, 1, 2], [1, 0, 1]]
)
mipdf1 = midf1.to_pandas()
mipdf2 = midf2.to_pandas()
assert_eq(cudf.concat([midf1, midf2]), pd.concat([mipdf1, mipdf2]))
assert_eq(cudf.concat([midf2, midf1]), pd.concat([mipdf2, mipdf1]))
assert_eq(
cudf.concat([midf1, midf2, midf1]), pd.concat([mipdf1, mipdf2, mipdf1])
)
# concat groupby multi index
gdf1 = cudf.DataFrame(
{
"x": np.random.randint(0, 10, 10),
"y": np.random.randint(0, 10, 10),
"z": np.random.randint(0, 10, 10),
"v": np.random.randint(0, 10, 10),
}
)
gdf2 = gdf1[5:]
gdg1 = gdf1.groupby(["x", "y"]).min()
gdg2 = gdf2.groupby(["x", "y"]).min()
pdg1 = gdg1.to_pandas()
pdg2 = gdg2.to_pandas()
assert_eq(cudf.concat([gdg1, gdg2]), pd.concat([pdg1, pdg2]))
assert_eq(cudf.concat([gdg2, gdg1]), pd.concat([pdg2, pdg1]))
# series multi index concat
gdgz1 = gdg1.z
gdgz2 = gdg2.z
pdgz1 = gdgz1.to_pandas()
pdgz2 = gdgz2.to_pandas()
assert_eq(cudf.concat([gdgz1, gdgz2]), pd.concat([pdgz1, pdgz2]))
assert_eq(cudf.concat([gdgz2, gdgz1]), pd.concat([pdgz2, pdgz1]))
@pytest.mark.parametrize("nrows", [0, 3, 10, 100, 1000])
def test_nonmatching_index_setitem(nrows):
np.random.seed(0)
gdf = cudf.DataFrame()
gdf["a"] = np.random.randint(2147483647, size=nrows)
gdf["b"] = np.random.randint(2147483647, size=nrows)
gdf = gdf.set_index("b")
test_values = np.random.randint(2147483647, size=nrows)
gdf["c"] = test_values
assert len(test_values) == len(gdf["c"])
assert (
gdf["c"]
.to_pandas()
.equals(cudf.Series(test_values).set_index(gdf._index).to_pandas())
)
def test_from_pandas():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
gdf = cudf.DataFrame.from_pandas(df)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = df.x
gs = cudf.Series.from_pandas(s)
assert isinstance(gs, cudf.Series)
assert_eq(s, gs)
@pytest.mark.parametrize("dtypes", [int, float])
def test_from_records(dtypes):
h_ary = np.ndarray(shape=(10, 4), dtype=dtypes)
rec_ary = h_ary.view(np.recarray)
gdf = cudf.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
df = pd.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame.from_records(rec_ary)
df = pd.DataFrame.from_records(rec_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
@pytest.mark.parametrize("columns", [None, ["first", "second", "third"]])
@pytest.mark.parametrize(
"index",
[
None,
["first", "second"],
"name",
"age",
"weight",
[10, 11],
["abc", "xyz"],
],
)
def test_from_records_index(columns, index):
rec_ary = np.array(
[("Rex", 9, 81.0), ("Fido", 3, 27.0)],
dtype=[("name", "U10"), ("age", "i4"), ("weight", "f4")],
)
gdf = cudf.DataFrame.from_records(rec_ary, columns=columns, index=index)
df = pd.DataFrame.from_records(rec_ary, columns=columns, index=index)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_construction_from_cupy_arrays():
h_ary = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
d_ary = cupy.asarray(h_ary)
gdf = cudf.DataFrame(d_ary, columns=["a", "b", "c"])
df = pd.DataFrame(h_ary, columns=["a", "b", "c"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
df = pd.DataFrame(h_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary, index=["a", "b"])
df = pd.DataFrame(h_ary, index=["a", "b"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=0, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=0, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=1, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=1, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_cupy_wrong_dimensions():
d_ary = cupy.empty((2, 3, 4), dtype=np.int32)
with pytest.raises(
ValueError, match="records dimension expected 1 or 2 but found: 3"
):
cudf.DataFrame(d_ary)
def test_dataframe_cupy_array_wrong_index():
d_ary = cupy.empty((2, 3), dtype=np.int32)
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index=["a"])
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index="a")
def test_index_in_dataframe_constructor():
a = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
b = cudf.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
assert_eq(a, b)
assert_eq(a.loc[4:], b.loc[4:])
dtypes = NUMERIC_TYPES + DATETIME_TYPES + ["bool"]
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
padf = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
gdf = cudf.DataFrame.from_arrow(padf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = pa.Array.from_pandas(df.a)
gs = cudf.Series.from_arrow(s)
assert isinstance(gs, cudf.Series)
# For some reason PyArrow to_pandas() converts to numpy array and has
# better type compatibility
np.testing.assert_array_equal(s.to_pandas(), gs.to_array())
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_to_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
pa_i = pa.Array.from_pandas(df.index)
pa_gi = gdf.index.to_arrow()
assert isinstance(pa_gi, pa.Array)
assert pa.Array.equals(pa_i, pa_gi)
@pytest.mark.parametrize("data_type", dtypes)
def test_to_from_arrow_nulls(data_type):
if data_type == "longlong":
data_type = "int64"
if data_type == "bool":
s1 = pa.array([True, None, False, None, True], type=data_type)
else:
dtype = np.dtype(data_type)
if dtype.type == np.datetime64:
time_unit, _ = np.datetime_data(dtype)
data_type = pa.timestamp(unit=time_unit)
s1 = pa.array([1, None, 3, None, 5], type=data_type)
gs1 = cudf.Series.from_arrow(s1)
assert isinstance(gs1, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s1.buffers()[0]).view("u1")[0],
gs1._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s1, gs1.to_arrow())
s2 = pa.array([None, None, None, None, None], type=data_type)
gs2 = cudf.Series.from_arrow(s2)
assert isinstance(gs2, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s2.buffers()[0]).view("u1")[0],
gs2._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s2, gs2.to_arrow())
def test_to_arrow_categorical():
df = pd.DataFrame()
df["a"] = pd.Series(["a", "b", "c"], dtype="category")
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
def test_from_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert_eq(
pd.Series(pa_cat.to_pandas()), # PyArrow returns a pd.Categorical
gd_cat.to_pandas(),
)
def test_to_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert pa.Array.equals(pa_cat, gd_cat.to_arrow())
@pytest.mark.parametrize("data_type", dtypes)
def test_from_scalar_typing(data_type):
if data_type == "datetime64[ms]":
scalar = (
np.dtype("int64")
.type(np.random.randint(0, 5))
.astype("datetime64[ms]")
)
elif data_type.startswith("datetime64"):
scalar = np.datetime64(datetime.date.today()).astype("datetime64[ms]")
data_type = "datetime64[ms]"
else:
scalar = np.dtype(data_type).type(np.random.randint(0, 5))
gdf = cudf.DataFrame()
gdf["a"] = [1, 2, 3, 4, 5]
gdf["b"] = scalar
assert gdf["b"].dtype == np.dtype(data_type)
assert len(gdf["b"]) == len(gdf["a"])
@pytest.mark.parametrize("data_type", NUMERIC_TYPES)
def test_from_python_array(data_type):
np_arr = np.random.randint(0, 100, 10).astype(data_type)
data = memoryview(np_arr)
data = arr.array(data.format, data)
gs = cudf.Series(data)
np.testing.assert_equal(gs.to_array(), np_arr)
def test_series_shape():
ps = pd.Series([1, 2, 3, 4])
cs = cudf.Series([1, 2, 3, 4])
assert ps.shape == cs.shape
def test_series_shape_empty():
ps = pd.Series(dtype="float64")
cs = cudf.Series([])
assert ps.shape == cs.shape
def test_dataframe_shape():
pdf = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.3]})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.shape == gdf.shape
def test_dataframe_shape_empty():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
assert pdf.shape == gdf.shape
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("nulls", ["none", "some", "all"])
def test_dataframe_transpose(nulls, num_cols, num_rows, dtype):
pdf = pd.DataFrame()
null_rep = np.nan if dtype in ["float32", "float64"] else None
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(np.random.randint(0, 26, num_rows).astype(dtype))
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = null_rep
elif nulls == "all":
data[:] = null_rep
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function)
assert_eq(expect, got_property)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
def test_dataframe_transpose_category(num_cols, num_rows):
pdf = pd.DataFrame()
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(list(string.ascii_lowercase), dtype="category")
data = data.sample(num_rows, replace=True).reset_index(drop=True)
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function.to_pandas())
assert_eq(expect, got_property.to_pandas())
def test_generated_column():
gdf = cudf.DataFrame({"a": (i for i in range(5))})
assert len(gdf) == 5
@pytest.fixture
def pdf():
return pd.DataFrame({"x": range(10), "y": range(10)})
@pytest.fixture
def gdf(pdf):
return cudf.DataFrame.from_pandas(pdf)
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize(
"func",
[
lambda df, **kwargs: df.min(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.product(**kwargs),
lambda df, **kwargs: df.cummin(**kwargs),
lambda df, **kwargs: df.cummax(**kwargs),
lambda df, **kwargs: df.cumsum(**kwargs),
lambda df, **kwargs: df.cumprod(**kwargs),
lambda df, **kwargs: df.mean(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.std(ddof=1, **kwargs),
lambda df, **kwargs: df.var(ddof=1, **kwargs),
lambda df, **kwargs: df.std(ddof=2, **kwargs),
lambda df, **kwargs: df.var(ddof=2, **kwargs),
lambda df, **kwargs: df.kurt(**kwargs),
lambda df, **kwargs: df.skew(**kwargs),
lambda df, **kwargs: df.all(**kwargs),
lambda df, **kwargs: df.any(**kwargs),
],
)
@pytest.mark.parametrize("skipna", [True, False, None])
def test_dataframe_reductions(data, func, skipna):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf, skipna=skipna), func(gdf, skipna=skipna))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("func", [lambda df: df.count()])
def test_dataframe_count_reduction(data, func):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf), func(gdf))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("ops", ["sum", "product", "prod"])
@pytest.mark.parametrize("skipna", [True, False, None])
@pytest.mark.parametrize("min_count", [-10, -1, 0, 1, 2, 3, 10])
def test_dataframe_min_count_ops(data, ops, skipna, min_count):
psr = pd.DataFrame(data)
gsr = cudf.DataFrame(data)
if PANDAS_GE_120 and psr.shape[0] * psr.shape[1] < min_count:
pytest.xfail("https://github.com/pandas-dev/pandas/issues/39738")
assert_eq(
getattr(psr, ops)(skipna=skipna, min_count=min_count),
getattr(gsr, ops)(skipna=skipna, min_count=min_count),
check_dtype=False,
)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_df(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf, pdf)
g = binop(gdf, gdf)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_df(pdf, gdf, binop):
d = binop(pdf, pdf + 1)
g = binop(gdf, gdf + 1)
assert_eq(d, g)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_series(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf.x, pdf.y)
g = binop(gdf.x, gdf.y)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_series(pdf, gdf, binop):
d = binop(pdf.x, pdf.y + 1)
g = binop(gdf.x, gdf.y + 1)
assert_eq(d, g)
@pytest.mark.parametrize("unaryop", [operator.neg, operator.inv, operator.abs])
def test_unaryops_df(pdf, gdf, unaryop):
d = unaryop(pdf - 5)
g = unaryop(gdf - 5)
assert_eq(d, g)
@pytest.mark.parametrize(
"func",
[
lambda df: df.empty,
lambda df: df.x.empty,
lambda df: df.x.fillna(123, limit=None, method=None, axis=None),
lambda df: df.drop("x", axis=1, errors="raise"),
],
)
def test_unary_operators(func, pdf, gdf):
p = func(pdf)
g = func(gdf)
assert_eq(p, g)
def test_is_monotonic(gdf):
pdf = pd.DataFrame({"x": [1, 2, 3]}, index=[3, 1, 2])
gdf = cudf.DataFrame.from_pandas(pdf)
assert not gdf.index.is_monotonic
assert not gdf.index.is_monotonic_increasing
assert not gdf.index.is_monotonic_decreasing
def test_iter(pdf, gdf):
assert list(pdf) == list(gdf)
def test_iteritems(gdf):
for k, v in gdf.iteritems():
assert k in gdf.columns
assert isinstance(v, cudf.Series)
assert_eq(v, gdf[k])
@pytest.mark.parametrize("q", [0.5, 1, 0.001, [0.5], [], [0.005, 0.5, 1]])
@pytest.mark.parametrize("numeric_only", [True, False])
def test_quantile(q, numeric_only):
ts = pd.date_range("2018-08-24", periods=5, freq="D")
td = pd.to_timedelta(np.arange(5), unit="h")
pdf = pd.DataFrame(
{"date": ts, "delta": td, "val": np.random.randn(len(ts))}
)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf["date"].quantile(q), gdf["date"].quantile(q))
assert_eq(pdf["delta"].quantile(q), gdf["delta"].quantile(q))
assert_eq(pdf["val"].quantile(q), gdf["val"].quantile(q))
if numeric_only:
assert_eq(pdf.quantile(q), gdf.quantile(q))
else:
q = q if isinstance(q, list) else [q]
assert_eq(
pdf.quantile(
q if isinstance(q, list) else [q], numeric_only=False
),
gdf.quantile(q, numeric_only=False),
)
def test_empty_quantile():
pdf = pd.DataFrame({"x": []})
df = cudf.DataFrame({"x": []})
actual = df.quantile()
expected = pdf.quantile()
assert_eq(actual, expected)
def test_from_pandas_function(pdf):
gdf = cudf.from_pandas(pdf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(pdf, gdf)
gdf = cudf.from_pandas(pdf.x)
assert isinstance(gdf, cudf.Series)
assert_eq(pdf.x, gdf)
with pytest.raises(TypeError):
cudf.from_pandas(123)
@pytest.mark.parametrize("preserve_index", [True, False])
def test_arrow_pandas_compat(pdf, gdf, preserve_index):
pdf["z"] = range(10)
pdf = pdf.set_index("z")
gdf["z"] = range(10)
gdf = gdf.set_index("z")
pdf_arrow_table = pa.Table.from_pandas(pdf, preserve_index=preserve_index)
gdf_arrow_table = gdf.to_arrow(preserve_index=preserve_index)
assert pa.Table.equals(pdf_arrow_table, gdf_arrow_table)
gdf2 = cudf.DataFrame.from_arrow(pdf_arrow_table)
pdf2 = pdf_arrow_table.to_pandas()
assert_eq(pdf2, gdf2)
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000, 100000])
def test_series_hash_encode(nrows):
data = np.asarray(range(nrows))
# Python hash returns different value which sometimes
# results in enc_with_name_arr and enc_arr to be same.
# And there is no other better way to make hash return same value.
# So using an integer name to get constant value back from hash.
s = cudf.Series(data, name=1)
num_features = 1000
encoded_series = s.hash_encode(num_features)
assert isinstance(encoded_series, cudf.Series)
enc_arr = encoded_series.to_array()
assert np.all(enc_arr >= 0)
assert np.max(enc_arr) < num_features
enc_with_name_arr = s.hash_encode(num_features, use_name=True).to_array()
assert enc_with_name_arr[0] != enc_arr[0]
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
def test_cuda_array_interface(dtype):
np_data = np.arange(10).astype(dtype)
cupy_data = cupy.array(np_data)
pd_data = pd.Series(np_data)
cudf_data = cudf.Series(cupy_data)
assert_eq(pd_data, cudf_data)
gdf = cudf.DataFrame()
gdf["test"] = cupy_data
pd_data.name = "test"
assert_eq(pd_data, gdf["test"])
@pytest.mark.parametrize("nelem", [0, 2, 3, 100])
@pytest.mark.parametrize("nchunks", [1, 2, 5, 10])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow_chunked_arrays(nelem, nchunks, data_type):
np_list_data = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array = pa.chunked_array(np_list_data)
expect = pd.Series(pa_chunk_array.to_pandas())
got = cudf.Series(pa_chunk_array)
assert_eq(expect, got)
np_list_data2 = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array2 = pa.chunked_array(np_list_data2)
pa_table = pa.Table.from_arrays(
[pa_chunk_array, pa_chunk_array2], names=["a", "b"]
)
expect = pa_table.to_pandas()
got = cudf.DataFrame.from_arrow(pa_table)
assert_eq(expect, got)
@pytest.mark.skip(reason="Test was designed to be run in isolation")
def test_gpu_memory_usage_with_boolmask():
ctx = cuda.current_context()
def query_GPU_memory(note=""):
memInfo = ctx.get_memory_info()
usedMemoryGB = (memInfo.total - memInfo.free) / 1e9
return usedMemoryGB
cuda.current_context().deallocations.clear()
nRows = int(1e8)
nCols = 2
dataNumpy = np.asfortranarray(np.random.rand(nRows, nCols))
colNames = ["col" + str(iCol) for iCol in range(nCols)]
pandasDF = pd.DataFrame(data=dataNumpy, columns=colNames, dtype=np.float32)
cudaDF = cudf.core.DataFrame.from_pandas(pandasDF)
boolmask = cudf.Series(np.random.randint(1, 2, len(cudaDF)).astype("bool"))
memory_used = query_GPU_memory()
cudaDF = cudaDF[boolmask]
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col0"].index._values.data_array_view.device_ctypes_pointer
)
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col1"].index._values.data_array_view.device_ctypes_pointer
)
assert memory_used == query_GPU_memory()
def test_boolmask(pdf, gdf):
boolmask = np.random.randint(0, 2, len(pdf)) > 0
gdf = gdf[boolmask]
pdf = pdf[boolmask]
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"mask_shape",
[
(2, "ab"),
(2, "abc"),
(3, "ab"),
(3, "abc"),
(3, "abcd"),
(4, "abc"),
(4, "abcd"),
],
)
def test_dataframe_boolmask(mask_shape):
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.random.randint(0, 10, 3)
pdf_mask = pd.DataFrame()
for col in mask_shape[1]:
pdf_mask[col] = np.random.randint(0, 2, mask_shape[0]) > 0
gdf = cudf.DataFrame.from_pandas(pdf)
gdf_mask = cudf.DataFrame.from_pandas(pdf_mask)
gdf = gdf[gdf_mask]
pdf = pdf[pdf_mask]
assert np.array_equal(gdf.columns, pdf.columns)
for col in gdf.columns:
assert np.array_equal(
gdf[col].fillna(-1).to_pandas().values, pdf[col].fillna(-1).values
)
@pytest.mark.parametrize(
"mask",
[
[True, False, True],
pytest.param(
cudf.Series([True, False, True]),
marks=pytest.mark.xfail(
reason="Pandas can't index a multiindex with a Series"
),
),
],
)
def test_dataframe_multiindex_boolmask(mask):
gdf = cudf.DataFrame(
{"w": [3, 2, 1], "x": [1, 2, 3], "y": [0, 1, 0], "z": [1, 1, 1]}
)
gdg = gdf.groupby(["w", "x"]).count()
pdg = gdg.to_pandas()
assert_eq(gdg[mask], pdg[mask])
def test_dataframe_assignment():
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.array([0, 1, 1, -2, 10])
gdf = cudf.DataFrame.from_pandas(pdf)
gdf[gdf < 0] = 999
pdf[pdf < 0] = 999
assert_eq(gdf, pdf)
def test_1row_arrow_table():
data = [pa.array([0]), pa.array([1])]
batch = pa.RecordBatch.from_arrays(data, ["f0", "f1"])
table = pa.Table.from_batches([batch])
expect = table.to_pandas()
got = cudf.DataFrame.from_arrow(table)
assert_eq(expect, got)
def test_arrow_handle_no_index_name(pdf, gdf):
gdf_arrow = gdf.to_arrow()
pdf_arrow = pa.Table.from_pandas(pdf)
assert pa.Table.equals(pdf_arrow, gdf_arrow)
got = cudf.DataFrame.from_arrow(gdf_arrow)
expect = pdf_arrow.to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize("num_rows", [1, 3, 10, 100])
@pytest.mark.parametrize("num_bins", [1, 2, 4, 20])
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
@pytest.mark.parametrize("series_bins", [True, False])
def test_series_digitize(num_rows, num_bins, right, dtype, series_bins):
data = np.random.randint(0, 100, num_rows).astype(dtype)
bins = np.unique(np.sort(np.random.randint(2, 95, num_bins).astype(dtype)))
s = cudf.Series(data)
if series_bins:
s_bins = cudf.Series(bins)
indices = s.digitize(s_bins, right)
else:
indices = s.digitize(bins, right)
np.testing.assert_array_equal(
np.digitize(data, bins, right), indices.to_array()
)
def test_series_digitize_invalid_bins():
s = cudf.Series(np.random.randint(0, 30, 80), dtype="int32")
bins = cudf.Series([2, None, None, 50, 90], dtype="int32")
with pytest.raises(
ValueError, match="`bins` cannot contain null entries."
):
_ = s.digitize(bins)
def test_pandas_non_contiguious():
arr1 = np.random.sample([5000, 10])
assert arr1.flags["C_CONTIGUOUS"] is True
df = pd.DataFrame(arr1)
for col in df.columns:
assert df[col].values.flags["C_CONTIGUOUS"] is False
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.to_pandas(), df)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
@pytest.mark.parametrize("null_type", [np.nan, None, "mixed"])
def test_series_all_null(num_elements, null_type):
if null_type == "mixed":
data = []
data1 = [np.nan] * int(num_elements / 2)
data2 = [None] * int(num_elements / 2)
for idx in range(len(data1)):
data.append(data1[idx])
data.append(data2[idx])
else:
data = [null_type] * num_elements
# Typecast Pandas because None will return `object` dtype
expect = pd.Series(data, dtype="float64")
got = cudf.Series(data)
assert_eq(expect, got)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
def test_series_all_valid_nan(num_elements):
data = [np.nan] * num_elements
sr = cudf.Series(data, nan_as_null=False)
np.testing.assert_equal(sr.null_count, 0)
def test_series_rename():
pds = pd.Series([1, 2, 3], name="asdf")
gds = cudf.Series([1, 2, 3], name="asdf")
expect = pds.rename("new_name")
got = gds.rename("new_name")
assert_eq(expect, got)
pds = pd.Series(expect)
gds = cudf.Series(got)
assert_eq(pds, gds)
pds = pd.Series(expect, name="name name")
gds = cudf.Series(got, name="name name")
assert_eq(pds, gds)
@pytest.mark.parametrize("data_type", dtypes)
@pytest.mark.parametrize("nelem", [0, 100])
def test_head_tail(nelem, data_type):
def check_index_equality(left, right):
assert left.index.equals(right.index)
def check_values_equality(left, right):
if len(left) == 0 and len(right) == 0:
return None
np.testing.assert_array_equal(left.to_pandas(), right.to_pandas())
def check_frame_series_equality(left, right):
check_index_equality(left, right)
check_values_equality(left, right)
gdf = cudf.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
check_frame_series_equality(gdf.head(), gdf[:5])
check_frame_series_equality(gdf.head(3), gdf[:3])
check_frame_series_equality(gdf.head(-2), gdf[:-2])
check_frame_series_equality(gdf.head(0), gdf[0:0])
check_frame_series_equality(gdf["a"].head(), gdf["a"][:5])
check_frame_series_equality(gdf["a"].head(3), gdf["a"][:3])
check_frame_series_equality(gdf["a"].head(-2), gdf["a"][:-2])
check_frame_series_equality(gdf.tail(), gdf[-5:])
check_frame_series_equality(gdf.tail(3), gdf[-3:])
check_frame_series_equality(gdf.tail(-2), gdf[2:])
check_frame_series_equality(gdf.tail(0), gdf[0:0])
check_frame_series_equality(gdf["a"].tail(), gdf["a"][-5:])
check_frame_series_equality(gdf["a"].tail(3), gdf["a"][-3:])
check_frame_series_equality(gdf["a"].tail(-2), gdf["a"][2:])
def test_tail_for_string():
gdf = cudf.DataFrame()
gdf["id"] = cudf.Series(["a", "b"], dtype=np.object_)
gdf["v"] = cudf.Series([1, 2])
assert_eq(gdf.tail(3), gdf.to_pandas().tail(3))
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index(pdf, gdf, drop):
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_named_index(pdf, gdf, drop):
pdf.index.name = "cudf"
gdf.index.name = "cudf"
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index_inplace(pdf, gdf, drop):
pdf.reset_index(drop=drop, inplace=True)
gdf.reset_index(drop=drop, inplace=True)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 2, 3, 4, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize(
"index",
[
"a",
["a", "b"],
pd.CategoricalIndex(["I", "II", "III", "IV", "V"]),
pd.Series(["h", "i", "k", "l", "m"]),
["b", pd.Index(["I", "II", "III", "IV", "V"])],
["c", [11, 12, 13, 14, 15]],
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5), # corner case
[pd.Series(["h", "i", "k", "l", "m"]), pd.RangeIndex(0, 5)],
[
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5),
],
],
)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
def test_set_index(data, index, drop, append, inplace):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
expected = pdf.set_index(index, inplace=inplace, drop=drop, append=append)
actual = gdf.set_index(index, inplace=inplace, drop=drop, append=append)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 1, 2, 2, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize("index", ["a", pd.Index([1, 1, 2, 2, 3])])
@pytest.mark.parametrize("verify_integrity", [True])
@pytest.mark.xfail
def test_set_index_verify_integrity(data, index, verify_integrity):
gdf = cudf.DataFrame(data)
gdf.set_index(index, verify_integrity=verify_integrity)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("nelem", [10, 200, 1333])
def test_set_index_multi(drop, nelem):
np.random.seed(0)
a = np.arange(nelem)
np.random.shuffle(a)
df = pd.DataFrame(
{
"a": a,
"b": np.random.randint(0, 4, size=nelem),
"c": np.random.uniform(low=0, high=4, size=nelem),
"d": np.random.choice(["green", "black", "white"], nelem),
}
)
df["e"] = df["d"].astype("category")
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.set_index("a", drop=drop), gdf.set_index(["a"], drop=drop))
assert_eq(
df.set_index(["b", "c"], drop=drop),
gdf.set_index(["b", "c"], drop=drop),
)
assert_eq(
df.set_index(["d", "b"], drop=drop),
gdf.set_index(["d", "b"], drop=drop),
)
assert_eq(
df.set_index(["b", "d", "e"], drop=drop),
gdf.set_index(["b", "d", "e"], drop=drop),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_0(copy):
# TODO (ptaylor): pandas changes `int` dtype to `float64`
# when reindexing and filling new label indices with NaN
gdf = cudf.datasets.randomdata(
nrows=6,
dtypes={
"a": "category",
# 'b': int,
"c": float,
"d": str,
},
)
pdf = gdf.to_pandas()
# Validate reindex returns a copy unmodified
assert_eq(pdf.reindex(copy=True), gdf.reindex(copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_1(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis defaults to 0
assert_eq(pdf.reindex(index, copy=True), gdf.reindex(index, copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_2(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(index, axis=0, copy=True),
gdf.reindex(index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_3(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=0
assert_eq(
pdf.reindex(columns, axis=1, copy=True),
gdf.reindex(columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_4(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(labels=index, axis=0, copy=True),
gdf.reindex(labels=index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_5(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=1
assert_eq(
pdf.reindex(labels=columns, axis=1, copy=True),
gdf.reindex(labels=columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_6(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis='index'
assert_eq(
pdf.reindex(labels=index, axis="index", copy=True),
gdf.reindex(labels=index, axis="index", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_7(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis='columns'
assert_eq(
pdf.reindex(labels=columns, axis="columns", copy=True),
gdf.reindex(labels=columns, axis="columns", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_8(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes labels when index=labels
assert_eq(
pdf.reindex(index=index, copy=True),
gdf.reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_9(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes column names when columns=labels
assert_eq(
pdf.reindex(columns=columns, copy=True),
gdf.reindex(columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_10(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_change_dtype(copy):
if PANDAS_GE_110:
kwargs = {"check_freq": False}
else:
kwargs = {}
index = pd.date_range("12/29/2009", periods=10, freq="D")
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
**kwargs,
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_categorical_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"a": "category"})
pdf = gdf.to_pandas()
assert_eq(pdf["a"].reindex(copy=True), gdf["a"].reindex(copy=copy))
assert_eq(
pdf["a"].reindex(index, copy=True), gdf["a"].reindex(index, copy=copy)
)
assert_eq(
pdf["a"].reindex(index=index, copy=True),
gdf["a"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_float_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"c": float})
pdf = gdf.to_pandas()
assert_eq(pdf["c"].reindex(copy=True), gdf["c"].reindex(copy=copy))
assert_eq(
pdf["c"].reindex(index, copy=True), gdf["c"].reindex(index, copy=copy)
)
assert_eq(
pdf["c"].reindex(index=index, copy=True),
gdf["c"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_string_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"d": str})
pdf = gdf.to_pandas()
assert_eq(pdf["d"].reindex(copy=True), gdf["d"].reindex(copy=copy))
assert_eq(
pdf["d"].reindex(index, copy=True), gdf["d"].reindex(index, copy=copy)
)
assert_eq(
pdf["d"].reindex(index=index, copy=True),
gdf["d"].reindex(index=index, copy=copy),
)
def test_to_frame(pdf, gdf):
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = "foo"
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = False
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(gdf_new_name, pdf_new_name)
assert gdf_new_name.columns[0] is name
def test_dataframe_empty_sort_index():
pdf = pd.DataFrame({"x": []})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.sort_index()
got = gdf.sort_index()
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_sort_index(
axis, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{"b": [1, 3, 2], "a": [1, 4, 3], "c": [4, 1, 5]},
index=[3.0, 1.0, np.nan],
)
gdf = cudf.DataFrame.from_pandas(pdf)
expected = pdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
got = gdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
assert_eq(pdf, gdf)
else:
assert_eq(expected, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize(
"level",
[
0,
"b",
1,
["b"],
"a",
["a", "b"],
["b", "a"],
[0, 1],
[1, 0],
[0, 2],
None,
],
)
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_mulitindex_sort_index(
axis, level, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{
"b": [1.0, 3.0, np.nan],
"a": [1, 4, 3],
1: ["a", "b", "c"],
"e": [3, 1, 4],
"d": [1, 2, 8],
}
).set_index(["b", "a", 1])
gdf = cudf.DataFrame.from_pandas(pdf)
# ignore_index is supported in v.1.0
expected = pdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
inplace=inplace,
na_position=na_position,
)
if ignore_index is True:
expected = expected
got = gdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
if ignore_index is True:
pdf = pdf.reset_index(drop=True)
assert_eq(pdf, gdf)
else:
if ignore_index is True:
expected = expected.reset_index(drop=True)
assert_eq(expected, got)
@pytest.mark.parametrize("dtype", dtypes + ["category"])
def test_dataframe_0_row_dtype(dtype):
if dtype == "category":
data = pd.Series(["a", "b", "c", "d", "e"], dtype="category")
else:
data = np.array([1, 2, 3, 4, 5], dtype=dtype)
expect = cudf.DataFrame()
expect["x"] = data
expect["y"] = data
got = expect.head(0)
for col_name in got.columns:
assert expect[col_name].dtype == got[col_name].dtype
expect = cudf.Series(data)
got = expect.head(0)
assert expect.dtype == got.dtype
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_series_list_nanasnull(nan_as_null):
data = [1.0, 2.0, 3.0, np.nan, None]
expect = pa.array(data, from_pandas=nan_as_null)
got = cudf.Series(data, nan_as_null=nan_as_null).to_arrow()
# Bug in Arrow 0.14.1 where NaNs aren't handled
expect = expect.cast("int64", safe=False)
got = got.cast("int64", safe=False)
assert pa.Array.equals(expect, got)
def test_column_assignment():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float}
)
new_cols = ["q", "r", "s"]
gdf.columns = new_cols
assert list(gdf.columns) == new_cols
def test_select_dtype():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float, "d": str}
)
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("float64"), gdf.select_dtypes("float64"))
assert_eq(pdf.select_dtypes(np.float64), gdf.select_dtypes(np.float64))
assert_eq(
pdf.select_dtypes(include=["float64"]),
gdf.select_dtypes(include=["float64"]),
)
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["int64", "float64"]),
gdf.select_dtypes(include=["int64", "float64"]),
)
assert_eq(
pdf.select_dtypes(include=np.number),
gdf.select_dtypes(include=np.number),
)
assert_eq(
pdf.select_dtypes(include=[np.int64, np.float64]),
gdf.select_dtypes(include=[np.int64, np.float64]),
)
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(exclude=np.number),
gdf.select_dtypes(exclude=np.number),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
rfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
rfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
)
gdf = cudf.DataFrame(
{"A": [3, 4, 5], "C": [1, 2, 3], "D": ["a", "b", "c"]}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["object"], exclude=["category"]),
gdf.select_dtypes(include=["object"], exclude=["category"]),
)
gdf = cudf.DataFrame({"a": range(10), "b": range(10, 20)})
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(include=["float"]),
gdf.select_dtypes(include=["float"]),
)
assert_eq(
pdf.select_dtypes(include=["object"]),
gdf.select_dtypes(include=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"]), gdf.select_dtypes(include=["int"])
)
assert_eq(
pdf.select_dtypes(exclude=["float"]),
gdf.select_dtypes(exclude=["float"]),
)
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes, rfunc=gdf.select_dtypes,
)
gdf = cudf.DataFrame(
{"a": cudf.Series([], dtype="int"), "b": cudf.Series([], dtype="str")}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
def test_select_dtype_datetime():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("datetime64"), gdf.select_dtypes("datetime64"))
assert_eq(
pdf.select_dtypes(np.dtype("datetime64")),
gdf.select_dtypes(np.dtype("datetime64")),
)
assert_eq(
pdf.select_dtypes(include="datetime64"),
gdf.select_dtypes(include="datetime64"),
)
def test_select_dtype_datetime_with_frequency():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_exceptions_equal(
pdf.select_dtypes,
gdf.select_dtypes,
(["datetime64[ms]"],),
(["datetime64[ms]"],),
)
def test_array_ufunc():
gdf = cudf.DataFrame({"x": [2, 3, 4.0], "y": [9.0, 2.5, 1.1]})
pdf = gdf.to_pandas()
assert_eq(np.sqrt(gdf), np.sqrt(pdf))
assert_eq(np.sqrt(gdf.x), np.sqrt(pdf.x))
@pytest.mark.parametrize("nan_value", [-5, -5.0, 0, 5, 5.0, None, "pandas"])
def test_series_to_gpu_array(nan_value):
s = cudf.Series([0, 1, None, 3])
np.testing.assert_array_equal(
s.to_array(nan_value), s.to_gpu_array(nan_value).copy_to_host()
)
def test_dataframe_describe_exclude():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(exclude=["float"])
pdf_results = pdf.describe(exclude=["float"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_include():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include=["int"])
pdf_results = pdf.describe(include=["int"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_default():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe()
pdf_results = pdf.describe()
assert_eq(pdf_results, gdf_results)
def test_series_describe_include_all():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
df["animal"] = np.random.choice(["dog", "cat", "bird"], data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include="all")
pdf_results = pdf.describe(include="all")
assert_eq(gdf_results[["x", "y"]], pdf_results[["x", "y"]])
assert_eq(gdf_results.index, pdf_results.index)
assert_eq(gdf_results.columns, pdf_results.columns)
assert_eq(
gdf_results[["animal"]].fillna(-1).astype("str"),
pdf_results[["animal"]].fillna(-1).astype("str"),
)
def test_dataframe_describe_percentiles():
np.random.seed(12)
data_length = 10000
sample_percentiles = [0.0, 0.1, 0.33, 0.84, 0.4, 0.99]
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(percentiles=sample_percentiles)
pdf_results = pdf.describe(percentiles=sample_percentiles)
assert_eq(pdf_results, gdf_results)
def test_get_numeric_data():
pdf = pd.DataFrame(
{"x": [1, 2, 3], "y": [1.0, 2.0, 3.0], "z": ["a", "b", "c"]}
)
gdf = cudf.from_pandas(pdf)
assert_eq(pdf._get_numeric_data(), gdf._get_numeric_data())
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_shift(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
shifted_outcome = gdf.a.shift(period).fillna(0)
expected_outcome = pdf.a.shift(period).fillna(0).astype(dtype)
if data_empty:
assert_eq(shifted_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(shifted_outcome, expected_outcome)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_diff(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
expected_outcome = pdf.a.diff(period)
diffed_outcome = gdf.a.diff(period).astype(expected_outcome.dtype)
if data_empty:
assert_eq(diffed_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(diffed_outcome, expected_outcome)
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_isnull_isna(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.isnull(), gdf.isnull())
assert_eq(df.isna(), gdf.isna())
# Test individual columns
for col in df:
assert_eq(df[col].isnull(), gdf[col].isnull())
assert_eq(df[col].isna(), gdf[col].isna())
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_notna_notnull(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.notnull(), gdf.notnull())
assert_eq(df.notna(), gdf.notna())
# Test individual columns
for col in df:
assert_eq(df[col].notnull(), gdf[col].notnull())
assert_eq(df[col].notna(), gdf[col].notna())
def test_ndim():
pdf = pd.DataFrame({"x": range(5), "y": range(5, 10)})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.ndim == gdf.ndim
assert pdf.x.ndim == gdf.x.ndim
s = pd.Series(dtype="float64")
gs = cudf.Series()
assert s.ndim == gs.ndim
@pytest.mark.parametrize(
"decimals",
[
-3,
0,
5,
pd.Series([1, 4, 3, -6], index=["w", "x", "y", "z"]),
cudf.Series([-4, -2, 12], index=["x", "y", "z"]),
{"w": -1, "x": 15, "y": 2},
],
)
def test_dataframe_round(decimals):
pdf = pd.DataFrame(
{
"w": np.arange(0.5, 10.5, 1),
"x": np.random.normal(-100, 100, 10),
"y": np.array(
[
14.123,
2.343,
np.nan,
0.0,
-8.302,
np.nan,
94.313,
-112.236,
-8.029,
np.nan,
]
),
"z": np.repeat([-0.6459412758761901], 10),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
if isinstance(decimals, cudf.Series):
pdecimals = decimals.to_pandas()
else:
pdecimals = decimals
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
# with nulls, maintaining existing null mask
for c in pdf.columns:
arr = pdf[c].to_numpy().astype("float64") # for pandas nulls
arr.ravel()[np.random.choice(10, 5, replace=False)] = np.nan
pdf[c] = gdf[c] = arr
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
for c in gdf.columns:
np.array_equal(gdf[c].nullmask.to_array(), result[c].to_array())
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: all does not "
"support columns of object dtype."
)
],
),
],
)
def test_all(data):
# Pandas treats `None` in object type columns as True for some reason, so
# replacing with `False`
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data).replace(
[None], False
)
gdata = cudf.Series.from_pandas(pdata)
else:
pdata = pd.DataFrame(data, columns=["a", "b"]).replace([None], False)
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.all(bool_only=True)
expected = pdata.all(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.all(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.all(level="a")
got = gdata.all()
expected = pdata.all()
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[0, 0, 0, 0, 0],
[0, 0, None, 0],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: any does not "
"support columns of object dtype."
)
],
),
],
)
@pytest.mark.parametrize("axis", [0, 1])
def test_any(data, axis):
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data)
gdata = cudf.Series.from_pandas(pdata)
if axis == 1:
with pytest.raises(NotImplementedError):
gdata.any(axis=axis)
else:
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
else:
pdata = pd.DataFrame(data, columns=["a", "b"])
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.any(bool_only=True)
expected = pdata.any(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.any(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.any(level="a")
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
@pytest.mark.parametrize("axis", [0, 1])
def test_empty_dataframe_any(axis):
pdf = pd.DataFrame({}, columns=["a", "b"])
gdf = cudf.DataFrame.from_pandas(pdf)
got = gdf.any(axis=axis)
expected = pdf.any(axis=axis)
assert_eq(got, expected, check_index_type=False)
@pytest.mark.parametrize("indexed", [False, True])
def test_dataframe_sizeof(indexed):
rows = int(1e6)
index = list(i for i in range(rows)) if indexed else None
gdf = cudf.DataFrame({"A": [8] * rows, "B": [32] * rows}, index=index)
for c in gdf._data.columns:
assert gdf._index.__sizeof__() == gdf._index.__sizeof__()
cols_sizeof = sum(c.__sizeof__() for c in gdf._data.columns)
assert gdf.__sizeof__() == (gdf._index.__sizeof__() + cols_sizeof)
@pytest.mark.parametrize("a", [[], ["123"]])
@pytest.mark.parametrize("b", ["123", ["123"]])
@pytest.mark.parametrize(
"misc_data",
["123", ["123"] * 20, 123, [1, 2, 0.8, 0.9] * 50, 0.9, 0.00001],
)
@pytest.mark.parametrize("non_list_data", [123, "abc", "zyx", "rapids", 0.8])
def test_create_dataframe_cols_empty_data(a, b, misc_data, non_list_data):
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = b
actual["b"] = b
assert_eq(actual, expected)
expected = pd.DataFrame({"a": []})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = misc_data
actual["b"] = misc_data
assert_eq(actual, expected)
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = non_list_data
actual["b"] = non_list_data
assert_eq(actual, expected)
def test_empty_dataframe_describe():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
expected = pdf.describe()
actual = gdf.describe()
assert_eq(expected, actual)
def test_as_column_types():
col = column.as_column(cudf.Series([]))
assert_eq(col.dtype, np.dtype("float64"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float64"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="float32")
assert_eq(col.dtype, np.dtype("float32"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float32"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="str")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="str"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="object")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="object"))
assert_eq(pds, gds)
pds = pd.Series(np.array([1, 2, 3]), dtype="float32")
gds = cudf.Series(column.as_column(np.array([1, 2, 3]), dtype="float32"))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 3], dtype="float32")
gds = cudf.Series([1, 2, 3], dtype="float32")
assert_eq(pds, gds)
pds = pd.Series([], dtype="float64")
gds = cudf.Series(column.as_column(pds))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 4], dtype="int64")
gds = cudf.Series(column.as_column(cudf.Series([1, 2, 4]), dtype="int64"))
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="float32")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="float32")
)
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="str")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="str")
)
assert_eq(pds, gds)
pds = pd.Series(pd.Index(["1", "18", "9"]), dtype="int")
gds = cudf.Series(
cudf.core.index.StringIndex(["1", "18", "9"]), dtype="int"
)
assert_eq(pds, gds)
def test_one_row_head():
gdf = cudf.DataFrame({"name": ["carl"], "score": [100]}, index=[123])
pdf = gdf.to_pandas()
head_gdf = gdf.head()
head_pdf = pdf.head()
assert_eq(head_pdf, head_gdf)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric(dtype, as_dtype):
psr = pd.Series([1, 2, 4, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric_nulls(dtype, as_dtype):
data = [1, 2, None, 3]
sr = cudf.Series(data, dtype=dtype)
got = sr.astype(as_dtype)
expect = cudf.Series([1, 2, None, 3], dtype=as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_numeric_to_other(dtype, as_dtype):
psr = pd.Series([1, 2, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05"]
else:
data = ["1", "2", "3"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_datetime_to_other(as_dtype):
data = ["2001-01-01", "2002-02-02", "2001-01-05"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"inp",
[
("datetime64[ns]", "2011-01-01 00:00:00.000000000"),
("datetime64[us]", "2011-01-01 00:00:00.000000"),
("datetime64[ms]", "2011-01-01 00:00:00.000"),
("datetime64[s]", "2011-01-01 00:00:00"),
],
)
def test_series_astype_datetime_to_string(inp):
dtype, expect = inp
base_date = "2011-01-01"
sr = cudf.Series([base_date], dtype=dtype)
got = sr.astype(str)[0]
assert expect == got
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_series_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
gsr = cudf.from_pandas(psr)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
psr.astype("int32").astype(ordered_dtype_pd).astype("int32"),
gsr.astype("int32").astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_cat_ordered_to_unordered(ordered):
pd_dtype = pd.CategoricalDtype(categories=[1, 2, 3], ordered=ordered)
pd_to_dtype = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=not ordered
)
gd_dtype = cudf.CategoricalDtype.from_pandas(pd_dtype)
gd_to_dtype = cudf.CategoricalDtype.from_pandas(pd_to_dtype)
psr = pd.Series([1, 2, 3], dtype=pd_dtype)
gsr = cudf.Series([1, 2, 3], dtype=gd_dtype)
expect = psr.astype(pd_to_dtype)
got = gsr.astype(gd_to_dtype)
assert_eq(expect, got)
def test_series_astype_null_cases():
data = [1, 2, None, 3]
# numerical to other
assert_eq(cudf.Series(data, dtype="str"), cudf.Series(data).astype("str"))
assert_eq(
cudf.Series(data, dtype="category"),
cudf.Series(data).astype("category"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="int32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="uint32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data).astype("datetime64[ms]"),
)
# categorical to other
assert_eq(
cudf.Series(data, dtype="str"),
cudf.Series(data, dtype="category").astype("str"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="category").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data, dtype="category").astype("datetime64[ms]"),
)
# string to other
assert_eq(
cudf.Series([1, 2, None, 3], dtype="int32"),
cudf.Series(["1", "2", None, "3"]).astype("int32"),
)
assert_eq(
cudf.Series(
["2001-01-01", "2001-02-01", None, "2001-03-01"],
dtype="datetime64[ms]",
),
cudf.Series(["2001-01-01", "2001-02-01", None, "2001-03-01"]).astype(
"datetime64[ms]"
),
)
assert_eq(
cudf.Series(["a", "b", "c", None], dtype="category").to_pandas(),
cudf.Series(["a", "b", "c", None]).astype("category").to_pandas(),
)
# datetime to other
data = [
"2001-01-01 00:00:00.000000",
"2001-02-01 00:00:00.000000",
None,
"2001-03-01 00:00:00.000000",
]
assert_eq(
cudf.Series(data),
cudf.Series(data, dtype="datetime64[us]").astype("str"),
)
assert_eq(
pd.Series(data, dtype="datetime64[ns]").astype("category"),
cudf.from_pandas(pd.Series(data, dtype="datetime64[ns]")).astype(
"category"
),
)
def test_series_astype_null_categorical():
sr = cudf.Series([None, None, None], dtype="category")
expect = cudf.Series([None, None, None], dtype="int32")
got = sr.astype("int32")
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
(
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
),
[
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
],
],
)
def test_create_dataframe_from_list_like(data):
pdf = pd.DataFrame(data, index=["count", "mean", "std", "min"])
gdf = cudf.DataFrame(data, index=["count", "mean", "std", "min"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def test_create_dataframe_column():
pdf = pd.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
gdf = cudf.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
gdf = cudf.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pd.Categorical(["a", "b", "c"]),
["m", "a", "d", "v"],
],
)
def test_series_values_host_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
np.testing.assert_array_equal(pds.values, gds.values_host)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pytest.param(
pd.Categorical(["a", "b", "c"]),
marks=pytest.mark.xfail(raises=NotImplementedError),
),
pytest.param(
["m", "a", "d", "v"],
marks=pytest.mark.xfail(raises=NotImplementedError),
),
],
)
def test_series_values_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
gds_vals = gds.values
assert isinstance(gds_vals, cupy.ndarray)
np.testing.assert_array_equal(gds_vals.get(), pds.values)
@pytest.mark.parametrize(
"data",
[
{"A": [1, 2, 3], "B": [4, 5, 6]},
{"A": [1.0, 2.0, 3.0], "B": [4.0, 5.0, 6.0]},
{"A": [1, 2, 3], "B": [1.0, 2.0, 3.0]},
{"A": np.float32(np.arange(3)), "B": np.float64(np.arange(3))},
pytest.param(
{"A": [1, None, 3], "B": [1, 2, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [None, None, None], "B": [None, None, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [], "B": []},
marks=pytest.mark.xfail(reason="Requires at least 1 row"),
),
pytest.param(
{"A": [1, 2, 3], "B": ["a", "b", "c"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": pd.Categorical(["a", "b", "c"]), "B": ["d", "e", "f"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
],
)
def test_df_values_property(data):
pdf = pd.DataFrame.from_dict(data)
gdf = cudf.DataFrame.from_pandas(pdf)
pmtr = pdf.values
gmtr = gdf.values.get()
np.testing.assert_array_equal(pmtr, gmtr)
def test_value_counts():
pdf = pd.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
gdf = cudf.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
assert_eq(
pdf.numeric.value_counts().sort_index(),
gdf.numeric.value_counts().sort_index(),
check_dtype=False,
)
assert_eq(
pdf.alpha.value_counts().sort_index(),
gdf.alpha.value_counts().sort_index(),
check_dtype=False,
)
@pytest.mark.parametrize(
"data",
[
[],
[0, 12, 14],
[0, 14, 12, 12, 3, 10, 12, 14],
np.random.randint(-100, 100, 200),
pd.Series([0.0, 1.0, None, 10.0]),
[None, None, None, None],
[np.nan, None, -1, 2, 3],
],
)
@pytest.mark.parametrize(
"values",
[
np.random.randint(-100, 100, 10),
[],
[np.nan, None, -1, 2, 3],
[1.0, 12.0, None, None, 120],
[0, 14, 12, 12, 3, 10, 12, 14, None],
[None, None, None],
["0", "12", "14"],
["0", "12", "14", "a"],
],
)
def test_isin_numeric(data, values):
index = np.random.randint(0, 100, len(data))
psr = cudf.utils.utils._create_pandas_series(data=data, index=index)
gsr = cudf.Series.from_pandas(psr, nan_as_null=False)
expected = psr.isin(values)
got = gsr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["2018-01-01", "2019-04-03", None, "2019-12-30"],
dtype="datetime64[ns]",
),
pd.Series(
[
"2018-01-01",
"2019-04-03",
None,
"2019-12-30",
"2018-01-01",
"2018-01-01",
],
dtype="datetime64[ns]",
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
[1514764800000000000, 1577664000000000000],
[
1514764800000000000,
1577664000000000000,
1577664000000000000,
1577664000000000000,
1514764800000000000,
],
["2019-04-03", "2019-12-30", "2012-01-01"],
[
"2012-01-01",
"2012-01-01",
"2012-01-01",
"2019-04-03",
"2019-12-30",
"2012-01-01",
],
],
)
def test_isin_datetime(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["this", "is", None, "a", "test"]),
pd.Series(["test", "this", "test", "is", None, "test", "a", "test"]),
pd.Series(["0", "12", "14"]),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[None, None, None],
["12", "14", "19"],
pytest.param(
[12, 14, 19],
marks=pytest.mark.xfail(
not PANDAS_GE_120,
reason="pandas's failure here seems like a bug(in < 1.2) "
"given the reverse succeeds",
),
),
["is", "this", "is", "this", "is"],
],
)
def test_isin_string(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["a", "b", "c", "c", "c", "d", "e"], dtype="category"),
pd.Series(["a", "b", None, "c", "d", "e"], dtype="category"),
pd.Series([0, 3, 10, 12], dtype="category"),
pd.Series([0, 3, 10, 12, 0, 10, 3, 0, 0, 3, 3], dtype="category"),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["a", "b", None, "f", "words"],
["0", "12", None, "14"],
[0, 10, 12, None, 39, 40, 1000],
[0, 0, 0, 0, 3, 3, 3, None, 1, 2, 3],
],
)
def test_isin_categorical(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["this", "is", None, "a", "test"], index=["a", "b", "c", "d", "e"]
),
pd.Series([0, 15, 10], index=[0, None, 9]),
pd.Series(
range(25),
index=pd.date_range(
start="2019-01-01", end="2019-01-02", freq="H"
),
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[0, 19, 13],
["2019-01-01 04:00:00", "2019-01-01 06:00:00", "2018-03-02"],
],
)
def test_isin_index(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.index.isin(values)
expected = psr.index.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]], names=("number", "color")
),
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
pd.MultiIndex.from_arrays(
[[1, 2, 3, 10, 100], ["red", "blue", "green", "pink", "white"]],
names=("number", "color"),
),
],
)
@pytest.mark.parametrize(
"values,level,err",
[
(["red", "orange", "yellow"], "color", None),
(["red", "white", "yellow"], "color", None),
([0, 1, 2, 10, 11, 15], "number", None),
([0, 1, 2, 10, 11, 15], None, TypeError),
(pd.Series([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 8, 11, 15]), "number", None),
(pd.Index(["red", "white", "yellow"]), "color", None),
([(1, "red"), (3, "red")], None, None),
(((1, "red"), (3, "red")), None, None),
(
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]],
names=("number", "color"),
),
None,
None,
),
(
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
None,
None,
),
(
pd.MultiIndex.from_arrays(
[
[1, 2, 3, 10, 100],
["red", "blue", "green", "pink", "white"],
],
names=("number", "color"),
),
None,
None,
),
],
)
def test_isin_multiindex(data, values, level, err):
pmdx = data
gmdx = cudf.from_pandas(data)
if err is None:
expected = pmdx.isin(values, level=level)
if isinstance(values, pd.MultiIndex):
values = cudf.from_pandas(values)
got = gmdx.isin(values, level=level)
assert_eq(got, expected)
else:
assert_exceptions_equal(
lfunc=pmdx.isin,
rfunc=gmdx.isin,
lfunc_args_and_kwargs=([values], {"level": level}),
rfunc_args_and_kwargs=([values], {"level": level}),
check_exception_type=False,
expected_error_message=re.escape(
"values need to be a Multi-Index or set/list-like tuple "
"squences when `level=None`."
),
)
@pytest.mark.parametrize(
"data",
[
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [8, 2, 1, 0, 2, 4, 5],
"num_wings": [2, 0, 2, 1, 2, 4, -1],
}
),
],
)
@pytest.mark.parametrize(
"values",
[
[0, 2],
{"num_wings": [0, 3]},
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
["sparrow", "pigeon"],
pd.Series(["sparrow", "pigeon"], dtype="category"),
pd.Series([1, 2, 3, 4, 5]),
"abc",
123,
],
)
def test_isin_dataframe(data, values):
pdf = data
gdf = cudf.from_pandas(pdf)
if cudf.utils.dtypes.is_scalar(values):
assert_exceptions_equal(
lfunc=pdf.isin,
rfunc=gdf.isin,
lfunc_args_and_kwargs=([values],),
rfunc_args_and_kwargs=([values],),
)
else:
try:
expected = pdf.isin(values)
except ValueError as e:
if str(e) == "Lengths must match.":
pytest.xfail(
not PANDAS_GE_110,
"https://github.com/pandas-dev/pandas/issues/34256",
)
if isinstance(values, (pd.DataFrame, pd.Series)):
values = cudf.from_pandas(values)
got = gdf.isin(values)
assert_eq(got, expected)
def test_constructor_properties():
df = cudf.DataFrame()
key1 = "a"
key2 = "b"
val1 = np.array([123], dtype=np.float64)
val2 = np.array([321], dtype=np.float64)
df[key1] = val1
df[key2] = val2
# Correct use of _constructor (for DataFrame)
assert_eq(df, df._constructor({key1: val1, key2: val2}))
# Correct use of _constructor (for cudf.Series)
assert_eq(df[key1], df[key2]._constructor(val1, name=key1))
# Correct use of _constructor_sliced (for DataFrame)
assert_eq(df[key1], df._constructor_sliced(val1, name=key1))
# Correct use of _constructor_expanddim (for cudf.Series)
assert_eq(df, df[key2]._constructor_expanddim({key1: val1, key2: val2}))
# Incorrect use of _constructor_sliced (Raises for cudf.Series)
with pytest.raises(NotImplementedError):
df[key1]._constructor_sliced
# Incorrect use of _constructor_expanddim (Raises for DataFrame)
with pytest.raises(NotImplementedError):
df._constructor_expanddim
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", ALL_TYPES)
def test_df_astype_numeric_to_all(dtype, as_dtype):
if "uint" in dtype:
data = [1, 2, None, 4, 7]
elif "int" in dtype or "longlong" in dtype:
data = [1, 2, None, 4, -7]
elif "float" in dtype:
data = [1.0, 2.0, None, 4.0, np.nan, -7.0]
gdf = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype=dtype)
gdf["bar"] = cudf.Series(data, dtype=dtype)
insert_data = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = insert_data.astype(as_dtype)
expect["bar"] = insert_data.astype(as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_df_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
# change None to "NaT" after this issue is fixed:
# https://github.com/rapidsai/cudf/issues/5117
data = ["2001-01-01", "2002-02-02", "2000-01-05", None]
elif as_dtype == "int32":
data = [1, 2, 3]
elif as_dtype == "category":
data = ["1", "2", "3", None]
elif "float" in as_dtype:
data = [1.0, 2.0, 3.0, np.nan]
insert_data = cudf.Series.from_pandas(pd.Series(data, dtype="str"))
expect_data = cudf.Series(data, dtype=as_dtype)
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = insert_data
gdf["bar"] = insert_data
expect["foo"] = expect_data
expect["bar"] = expect_data
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int64",
"datetime64[s]",
"datetime64[us]",
"datetime64[ns]",
"str",
"category",
],
)
def test_df_astype_datetime_to_other(as_dtype):
data = [
"1991-11-20 00:00:00.000",
"2004-12-04 00:00:00.000",
"2016-09-13 00:00:00.000",
None,
]
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype="datetime64[ms]")
gdf["bar"] = cudf.Series(data, dtype="datetime64[ms]")
if as_dtype == "int64":
expect["foo"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
expect["bar"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
elif as_dtype == "str":
expect["foo"] = cudf.Series(data, dtype="str")
expect["bar"] = cudf.Series(data, dtype="str")
elif as_dtype == "category":
expect["foo"] = cudf.Series(gdf["foo"], dtype="category")
expect["bar"] = cudf.Series(gdf["bar"], dtype="category")
else:
expect["foo"] = cudf.Series(data, dtype=as_dtype)
expect["bar"] = cudf.Series(data, dtype=as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_df_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf.astype(as_dtype), gdf.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_df_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
pdf.astype(ordered_dtype_pd).astype("int32"),
gdf.astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize(
"dtype,args",
[(dtype, {}) for dtype in ALL_TYPES]
+ [("category", {"ordered": True}), ("category", {"ordered": False})],
)
def test_empty_df_astype(dtype, args):
df = cudf.DataFrame()
kwargs = {}
kwargs.update(args)
assert_eq(df, df.astype(dtype=dtype, **kwargs))
@pytest.mark.parametrize(
"errors",
[
pytest.param(
"raise", marks=pytest.mark.xfail(reason="should raise error here")
),
pytest.param("other", marks=pytest.mark.xfail(raises=ValueError)),
"ignore",
pytest.param(
"warn", marks=pytest.mark.filterwarnings("ignore:Traceback")
),
],
)
def test_series_astype_error_handling(errors):
sr = cudf.Series(["random", "words"])
got = sr.astype("datetime64", errors=errors)
assert_eq(sr, got)
@pytest.mark.parametrize("dtype", ALL_TYPES)
def test_df_constructor_dtype(dtype):
if "datetime" in dtype:
data = ["1991-11-20", "2004-12-04", "2016-09-13", None]
elif dtype == "str":
data = ["a", "b", "c", None]
elif "float" in dtype:
data = [1.0, 0.5, -1.1, np.nan, None]
elif "bool" in dtype:
data = [True, False, None]
else:
data = [1, 2, 3, None]
sr = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = sr
expect["bar"] = sr
got = cudf.DataFrame({"foo": data, "bar": data}, dtype=dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": int}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": str}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": bool, "b": int, "c": float, "d": str}
),
cudf.DataFrame(),
cudf.DataFrame({"a": [0, 1, 2], "b": [1, None, 3]}),
cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
),
cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False
),
}
),
],
)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops(data, op, skipna):
gdf = data
pdf = gdf.to_pandas()
if op in ("var", "std"):
expected = getattr(pdf, op)(axis=1, ddof=0, skipna=skipna)
got = getattr(gdf, op)(axis=1, ddof=0, skipna=skipna)
else:
expected = getattr(pdf, op)(axis=1, skipna=skipna)
got = getattr(gdf, op)(axis=1, skipna=skipna)
assert_eq(expected, got, check_exact=False)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
def test_rowwise_ops_nullable_dtypes_all_null(op):
gdf = cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
)
expected = cudf.Series([None, None, None, None], dtype="float64")
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series(
[10.0, None, np.NaN, 2234.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"min",
cudf.Series(
[10.0, None, np.NaN, 13.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"sum",
cudf.Series(
[20.0, None, np.NaN, 2247.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"product",
cudf.Series(
[100.0, None, np.NaN, 29042.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"mean",
cudf.Series(
[10.0, None, np.NaN, 1123.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"var",
cudf.Series(
[0.0, None, np.NaN, 1233210.25, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"std",
cudf.Series(
[0.0, None, np.NaN, 1110.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
],
)
def test_rowwise_ops_nullable_dtypes_partial_null(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series([10, None, None, 2234, None, 453], dtype="int64",),
),
("min", cudf.Series([10, None, None, 13, None, 15], dtype="int64",),),
(
"sum",
cudf.Series([20, None, None, 2247, None, 468], dtype="int64",),
),
(
"product",
cudf.Series([100, None, None, 29042, None, 6795], dtype="int64",),
),
(
"mean",
cudf.Series(
[10.0, None, None, 1123.5, None, 234.0], dtype="float32",
),
),
(
"var",
cudf.Series(
[0.0, None, None, 1233210.25, None, 47961.0], dtype="float32",
),
),
(
"std",
cudf.Series(
[0.0, None, None, 1110.5, None, 219.0], dtype="float32",
),
),
],
)
def test_rowwise_ops_nullable_int_dtypes(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, None, 13, None, 15],
"b": cudf.Series(
[10, None, 323, 2234, None, 453], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ns]"
),
"t3": cudf.Series(
["1960-08-31 06:00:00", "2030-08-02 10:00:00"], dtype="<M8[s]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[us]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(["1940-08-31 06:00:00", None], dtype="<M8[ms]"),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
"b1": cudf.Series([True, False], dtype="bool"),
},
],
)
@pytest.mark.parametrize("op", ["max", "min"])
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops_datetime_dtypes(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data,op,skipna",
[
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"max",
True,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
False,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
True,
),
],
)
def test_rowwise_ops_datetime_dtypes_2(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
(
{
"t1": pd.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ns]",
),
"t2": pd.Series(
["1940-08-31 06:00:00", pd.NaT], dtype="<M8[ns]"
),
}
)
],
)
def test_rowwise_ops_datetime_dtypes_pdbug(data):
pdf = pd.DataFrame(data)
gdf = cudf.from_pandas(pdf)
expected = pdf.max(axis=1, skipna=False)
got = gdf.max(axis=1, skipna=False)
if PANDAS_GE_120:
assert_eq(got, expected)
else:
# PANDAS BUG: https://github.com/pandas-dev/pandas/issues/36907
with pytest.raises(AssertionError, match="numpy array are different"):
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[5.0, 6.0, 7.0],
"single value",
np.array(1, dtype="int64"),
np.array(0.6273643, dtype="float64"),
],
)
def test_insert(data):
pdf = pd.DataFrame.from_dict({"A": [1, 2, 3], "B": ["a", "b", "c"]})
gdf = cudf.DataFrame.from_pandas(pdf)
# insertion by index
pdf.insert(0, "foo", data)
gdf.insert(0, "foo", data)
assert_eq(pdf, gdf)
pdf.insert(3, "bar", data)
gdf.insert(3, "bar", data)
assert_eq(pdf, gdf)
pdf.insert(1, "baz", data)
gdf.insert(1, "baz", data)
assert_eq(pdf, gdf)
# pandas insert doesn't support negative indexing
pdf.insert(len(pdf.columns), "qux", data)
gdf.insert(-1, "qux", data)
assert_eq(pdf, gdf)
def test_cov():
gdf = cudf.datasets.randomdata(10)
pdf = gdf.to_pandas()
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.xfail(reason="cupy-based cov does not support nulls")
def test_cov_nans():
pdf = pd.DataFrame()
pdf["a"] = [None, None, None, 2.00758632, None]
pdf["b"] = [0.36403686, None, None, None, None]
pdf["c"] = [None, None, None, 0.64882227, None]
pdf["d"] = [None, -1.46863125, None, 1.22477948, -0.06031689]
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.parametrize(
"gsr",
[
cudf.Series([4, 2, 3]),
cudf.Series([4, 2, 3], index=["a", "b", "c"]),
cudf.Series([4, 2, 3], index=["a", "b", "d"]),
cudf.Series([4, 2], index=["a", "b"]),
cudf.Series([4, 2, 3], index=cudf.core.index.RangeIndex(0, 3)),
pytest.param(
cudf.Series([4, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"]),
marks=pytest.mark.xfail,
),
],
)
@pytest.mark.parametrize("colnames", [["a", "b", "c"], [0, 1, 2]])
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_df_sr_binop(gsr, colnames, op):
data = [[3.0, 2.0, 5.0], [3.0, None, 5.0], [6.0, 7.0, np.nan]]
data = dict(zip(colnames, data))
gsr = gsr.astype("float64")
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas(nullable=True)
psr = gsr.to_pandas(nullable=True)
expect = op(pdf, psr)
got = op(gdf, gsr).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
expect = op(psr, pdf)
got = op(gsr, gdf).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
# comparison ops will temporarily XFAIL
# see PR https://github.com/rapidsai/cudf/pull/7491
pytest.param(operator.eq, marks=pytest.mark.xfail()),
pytest.param(operator.lt, marks=pytest.mark.xfail()),
pytest.param(operator.le, marks=pytest.mark.xfail()),
pytest.param(operator.gt, marks=pytest.mark.xfail()),
pytest.param(operator.ge, marks=pytest.mark.xfail()),
pytest.param(operator.ne, marks=pytest.mark.xfail()),
],
)
@pytest.mark.parametrize(
"gsr", [cudf.Series([1, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"])]
)
def test_df_sr_binop_col_order(gsr, op):
colnames = [0, 1, 2]
data = [[0, 2, 5], [3, None, 5], [6, 7, np.nan]]
data = dict(zip(colnames, data))
gdf = cudf.DataFrame(data)
pdf = pd.DataFrame.from_dict(data)
psr = gsr.to_pandas()
expect = op(pdf, psr).astype("float")
out = op(gdf, gsr).astype("float")
got = out[expect.columns]
assert_eq(expect, got)
@pytest.mark.parametrize("set_index", [None, "A", "C", "D"])
@pytest.mark.parametrize("index", [True, False])
@pytest.mark.parametrize("deep", [True, False])
def test_memory_usage(deep, index, set_index):
# Testing numerical/datetime by comparing with pandas
# (string and categorical columns will be different)
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int64"),
"B": np.arange(rows, dtype="int32"),
"C": np.arange(rows, dtype="float64"),
}
)
df["D"] = pd.to_datetime(df.A)
if set_index:
df = df.set_index(set_index)
gdf = cudf.from_pandas(df)
if index and set_index is None:
# Special Case: Assume RangeIndex size == 0
assert gdf.index.memory_usage(deep=deep) == 0
else:
# Check for Series only
assert df["B"].memory_usage(index=index, deep=deep) == gdf[
"B"
].memory_usage(index=index, deep=deep)
# Check for entire DataFrame
assert_eq(
df.memory_usage(index=index, deep=deep).sort_index(),
gdf.memory_usage(index=index, deep=deep).sort_index(),
)
@pytest.mark.xfail
def test_memory_usage_string():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
gdf = cudf.from_pandas(df)
# Check deep=False (should match pandas)
assert gdf.B.memory_usage(deep=False, index=False) == df.B.memory_usage(
deep=False, index=False
)
# Check string column
assert gdf.B.memory_usage(deep=True, index=False) == df.B.memory_usage(
deep=True, index=False
)
# Check string index
assert gdf.set_index("B").index.memory_usage(
deep=True
) == df.B.memory_usage(deep=True, index=False)
def test_memory_usage_cat():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
df["B"] = df.B.astype("category")
gdf = cudf.from_pandas(df)
expected = (
gdf.B._column.cat().categories.__sizeof__()
+ gdf.B._column.cat().codes.__sizeof__()
)
# Check cat column
assert gdf.B.memory_usage(deep=True, index=False) == expected
# Check cat index
assert gdf.set_index("B").index.memory_usage(deep=True) == expected
def test_memory_usage_list():
df = cudf.DataFrame({"A": [[0, 1, 2, 3], [4, 5, 6], [7, 8], [9]]})
expected = (
df.A._column.offsets._memory_usage()
+ df.A._column.elements._memory_usage()
)
assert expected == df.A.memory_usage()
@pytest.mark.xfail
def test_memory_usage_multi():
rows = int(100)
deep = True
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(np.arange(3, dtype="int64"), rows),
"C": np.random.choice(np.arange(3, dtype="float64"), rows),
}
).set_index(["B", "C"])
gdf = cudf.from_pandas(df)
# Assume MultiIndex memory footprint is just that
# of the underlying columns, levels, and codes
expect = rows * 16 # Source Columns
expect += rows * 16 # Codes
expect += 3 * 8 # Level 0
expect += 3 * 8 # Level 1
assert expect == gdf.index.memory_usage(deep=deep)
@pytest.mark.parametrize(
"list_input",
[
pytest.param([1, 2, 3, 4], id="smaller"),
pytest.param([1, 2, 3, 4, 5, 6], id="larger"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_list(list_input, key):
gdf = cudf.datasets.randomdata(5)
with pytest.raises(
ValueError, match=("All columns must be of equal length")
):
gdf[key] = list_input
@pytest.mark.parametrize(
"series_input",
[
pytest.param(cudf.Series([1, 2, 3, 4]), id="smaller_cudf"),
pytest.param(cudf.Series([1, 2, 3, 4, 5, 6]), id="larger_cudf"),
pytest.param(cudf.Series([1, 2, 3], index=[4, 5, 6]), id="index_cudf"),
pytest.param(pd.Series([1, 2, 3, 4]), id="smaller_pandas"),
pytest.param(pd.Series([1, 2, 3, 4, 5, 6]), id="larger_pandas"),
pytest.param(pd.Series([1, 2, 3], index=[4, 5, 6]), id="index_pandas"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_series(series_input, key):
gdf = cudf.datasets.randomdata(5)
pdf = gdf.to_pandas()
pandas_input = series_input
if isinstance(pandas_input, cudf.Series):
pandas_input = pandas_input.to_pandas()
expect = pdf
expect[key] = pandas_input
got = gdf
got[key] = series_input
# Pandas uses NaN and typecasts to float64 if there's missing values on
# alignment, so need to typecast to float64 for equality comparison
expect = expect.astype("float64")
got = got.astype("float64")
assert_eq(expect, got)
def test_tupleize_cols_False_set():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
pdf[("a", "b")] = [1]
gdf[("a", "b")] = [1]
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_init_multiindex_from_dict():
pdf = pd.DataFrame({("a", "b"): [1]})
gdf = cudf.DataFrame({("a", "b"): [1]})
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_change_column_dtype_in_empty():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
assert_eq(pdf, gdf)
pdf["b"] = pdf["b"].astype("int64")
gdf["b"] = gdf["b"].astype("int64")
assert_eq(pdf, gdf)
def test_dataframe_from_table_empty_index():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
odict = df._data
tbl = cudf._lib.table.Table(odict)
result = cudf.DataFrame._from_table(tbl) # noqa: F841
@pytest.mark.parametrize("dtype", ["int64", "str"])
def test_dataframe_from_dictionary_series_same_name_index(dtype):
pd_idx1 = pd.Index([1, 2, 0], name="test_index").astype(dtype)
pd_idx2 = pd.Index([2, 0, 1], name="test_index").astype(dtype)
pd_series1 = pd.Series([1, 2, 3], index=pd_idx1)
pd_series2 = pd.Series([1, 2, 3], index=pd_idx2)
gd_idx1 = cudf.from_pandas(pd_idx1)
gd_idx2 = cudf.from_pandas(pd_idx2)
gd_series1 = cudf.Series([1, 2, 3], index=gd_idx1)
gd_series2 = cudf.Series([1, 2, 3], index=gd_idx2)
expect = pd.DataFrame({"a": pd_series1, "b": pd_series2})
got = cudf.DataFrame({"a": gd_series1, "b": gd_series2})
if dtype == "str":
# Pandas actually loses its index name erroneously here...
expect.index.name = "test_index"
assert_eq(expect, got)
assert expect.index.names == got.index.names
@pytest.mark.parametrize(
"arg", [slice(2, 8, 3), slice(1, 20, 4), slice(-2, -6, -2)]
)
def test_dataframe_strided_slice(arg):
mul = pd.DataFrame(
{
"Index": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"AlphaIndex": ["a", "b", "c", "d", "e", "f", "g", "h", "i"],
}
)
pdf = pd.DataFrame(
{"Val": [10, 9, 8, 7, 6, 5, 4, 3, 2]},
index=pd.MultiIndex.from_frame(mul),
)
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf[arg]
got = gdf[arg]
assert_eq(expect, got)
@pytest.mark.parametrize(
"data,condition,other,error",
[
(pd.Series(range(5)), pd.Series(range(5)) > 0, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, 10, None),
(
pd.Series(range(5)),
pd.Series(range(5)) > 1,
pd.Series(range(5, 10)),
None,
),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"])
% 3
)
== 0,
-pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) == 4,
None,
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) != 4,
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True, False],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True, True, False], [True, True, True, False]],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cuda.to_device(
np.array(
[[True, True], [False, True], [True, False], [False, True]]
)
),
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cupy.array(
[[True, True], [False, True], [True, False], [False, True]]
),
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
],
None,
ValueError,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) == 4,
None,
None,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) == 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6], dtype="category"),
pd.Series([4, np.nan, 6], dtype="category") != 4,
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
"s",
None,
),
(
pd.Series([1, 2, 3, 2, 5]),
pd.Series([1, 2, 3, 2, 5]) == 2,
pd.DataFrame(
{
"a": pd.Series([1, 2, 3, 2, 5]),
"b": pd.Series([1, 2, 3, 2, 5]),
}
),
NotImplementedError,
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_df_sr_mask_where(data, condition, other, error, inplace):
ps_where = data
gs_where = cudf.from_pandas(data)
ps_mask = ps_where.copy(deep=True)
gs_mask = gs_where.copy(deep=True)
if hasattr(condition, "__cuda_array_interface__"):
if type(condition).__module__.split(".")[0] == "cupy":
ps_condition = cupy.asnumpy(condition)
else:
ps_condition = np.array(condition).astype("bool")
else:
ps_condition = condition
if type(condition).__module__.split(".")[0] == "pandas":
gs_condition = cudf.from_pandas(condition)
else:
gs_condition = condition
ps_other = other
if type(other).__module__.split(".")[0] == "pandas":
gs_other = cudf.from_pandas(other)
else:
gs_other = other
if error is None:
expect_where = ps_where.where(
ps_condition, other=ps_other, inplace=inplace
)
got_where = gs_where.where(
gs_condition, other=gs_other, inplace=inplace
)
expect_mask = ps_mask.mask(
ps_condition, other=ps_other, inplace=inplace
)
got_mask = gs_mask.mask(gs_condition, other=gs_other, inplace=inplace)
if inplace:
expect_where = ps_where
got_where = gs_where
expect_mask = ps_mask
got_mask = gs_mask
if pd.api.types.is_categorical_dtype(expect_where):
np.testing.assert_array_equal(
expect_where.cat.codes,
got_where.cat.codes.astype(expect_where.cat.codes.dtype)
.fillna(-1)
.to_array(),
)
assert_eq(expect_where.cat.categories, got_where.cat.categories)
np.testing.assert_array_equal(
expect_mask.cat.codes,
got_mask.cat.codes.astype(expect_mask.cat.codes.dtype)
.fillna(-1)
.to_array(),
)
assert_eq(expect_mask.cat.categories, got_mask.cat.categories)
else:
assert_eq(
expect_where.fillna(-1),
got_where.fillna(-1),
check_dtype=False,
)
assert_eq(
expect_mask.fillna(-1), got_mask.fillna(-1), check_dtype=False
)
else:
assert_exceptions_equal(
lfunc=ps_where.where,
rfunc=gs_where.where,
lfunc_args_and_kwargs=(
[ps_condition],
{"other": ps_other, "inplace": inplace},
),
rfunc_args_and_kwargs=(
[gs_condition],
{"other": gs_other, "inplace": inplace},
),
compare_error_message=False
if error is NotImplementedError
else True,
)
assert_exceptions_equal(
lfunc=ps_mask.mask,
rfunc=gs_mask.mask,
lfunc_args_and_kwargs=(
[ps_condition],
{"other": ps_other, "inplace": inplace},
),
rfunc_args_and_kwargs=(
[gs_condition],
{"other": gs_other, "inplace": inplace},
),
compare_error_message=False,
)
@pytest.mark.parametrize(
"data,condition,other,has_cat",
[
(
pd.DataFrame(
{
"a": pd.Series(["a", "a", "b", "c", "a", "d", "d", "a"]),
"b": pd.Series(["o", "p", "q", "e", "p", "p", "a", "a"]),
}
),
pd.DataFrame(
{
"a": pd.Series(["a", "a", "b", "c", "a", "d", "d", "a"]),
"b": pd.Series(["o", "p", "q", "e", "p", "p", "a", "a"]),
}
)
!= "a",
None,
None,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
!= "a",
None,
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
== "a",
None,
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
!= "a",
"a",
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
== "a",
"a",
True,
),
],
)
def test_df_string_cat_types_mask_where(data, condition, other, has_cat):
ps = data
gs = cudf.from_pandas(data)
ps_condition = condition
if type(condition).__module__.split(".")[0] == "pandas":
gs_condition = cudf.from_pandas(condition)
else:
gs_condition = condition
ps_other = other
if type(other).__module__.split(".")[0] == "pandas":
gs_other = cudf.from_pandas(other)
else:
gs_other = other
expect_where = ps.where(ps_condition, other=ps_other)
got_where = gs.where(gs_condition, other=gs_other)
expect_mask = ps.mask(ps_condition, other=ps_other)
got_mask = gs.mask(gs_condition, other=gs_other)
if has_cat is None:
assert_eq(
expect_where.fillna(-1).astype("str"),
got_where.fillna(-1),
check_dtype=False,
)
assert_eq(
expect_mask.fillna(-1).astype("str"),
got_mask.fillna(-1),
check_dtype=False,
)
else:
assert_eq(expect_where, got_where, check_dtype=False)
assert_eq(expect_mask, got_mask, check_dtype=False)
@pytest.mark.parametrize(
"data,expected_upcast_type,error",
[
(
pd.Series([random.random() for _ in range(10)], dtype="float32"),
np.dtype("float32"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float16"),
np.dtype("float32"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float64"),
np.dtype("float64"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float128"),
None,
NotImplementedError,
),
],
)
def test_from_pandas_unsupported_types(data, expected_upcast_type, error):
pdf = pd.DataFrame({"one_col": data})
if error == NotImplementedError:
with pytest.raises(error):
cudf.from_pandas(data)
with pytest.raises(error):
cudf.Series(data)
with pytest.raises(error):
cudf.from_pandas(pdf)
with pytest.raises(error):
cudf.DataFrame(pdf)
else:
df = cudf.from_pandas(data)
assert_eq(data, df, check_dtype=False)
assert df.dtype == expected_upcast_type
df = cudf.Series(data)
assert_eq(data, df, check_dtype=False)
assert df.dtype == expected_upcast_type
df = cudf.from_pandas(pdf)
assert_eq(pdf, df, check_dtype=False)
assert df["one_col"].dtype == expected_upcast_type
df = cudf.DataFrame(pdf)
assert_eq(pdf, df, check_dtype=False)
assert df["one_col"].dtype == expected_upcast_type
@pytest.mark.parametrize("nan_as_null", [True, False])
@pytest.mark.parametrize("index", [None, "a", ["a", "b"]])
def test_from_pandas_nan_as_null(nan_as_null, index):
data = [np.nan, 2.0, 3.0]
if index is None:
pdf = pd.DataFrame({"a": data, "b": data})
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
else:
pdf = pd.DataFrame({"a": data, "b": data}).set_index(index)
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
expected = expected.set_index(index)
got = cudf.from_pandas(pdf, nan_as_null=nan_as_null)
assert_eq(expected, got)
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_from_pandas_for_series_nan_as_null(nan_as_null):
data = [np.nan, 2.0, 3.0]
psr = pd.Series(data)
expected = cudf.Series(column.as_column(data, nan_as_null=nan_as_null))
got = cudf.from_pandas(psr, nan_as_null=nan_as_null)
assert_eq(expected, got)
@pytest.mark.parametrize("copy", [True, False])
def test_df_series_dataframe_astype_copy(copy):
gdf = cudf.DataFrame({"col1": [1, 2], "col2": [3, 4]})
pdf = gdf.to_pandas()
assert_eq(
gdf.astype(dtype="float", copy=copy),
pdf.astype(dtype="float", copy=copy),
)
assert_eq(gdf, pdf)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
assert_eq(
gsr.astype(dtype="float", copy=copy),
psr.astype(dtype="float", copy=copy),
)
assert_eq(gsr, psr)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
actual = gsr.astype(dtype="int64", copy=copy)
expected = psr.astype(dtype="int64", copy=copy)
assert_eq(expected, actual)
assert_eq(gsr, psr)
actual[0] = 3
expected[0] = 3
assert_eq(gsr, psr)
@pytest.mark.parametrize("copy", [True, False])
def test_df_series_dataframe_astype_dtype_dict(copy):
gdf = cudf.DataFrame({"col1": [1, 2], "col2": [3, 4]})
pdf = gdf.to_pandas()
assert_eq(
gdf.astype(dtype={"col1": "float"}, copy=copy),
pdf.astype(dtype={"col1": "float"}, copy=copy),
)
assert_eq(gdf, pdf)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
assert_eq(
gsr.astype(dtype={None: "float"}, copy=copy),
psr.astype(dtype={None: "float"}, copy=copy),
)
assert_eq(gsr, psr)
assert_exceptions_equal(
lfunc=psr.astype,
rfunc=gsr.astype,
lfunc_args_and_kwargs=([], {"dtype": {"a": "float"}, "copy": copy}),
rfunc_args_and_kwargs=([], {"dtype": {"a": "float"}, "copy": copy}),
)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
actual = gsr.astype({None: "int64"}, copy=copy)
expected = psr.astype({None: "int64"}, copy=copy)
assert_eq(expected, actual)
assert_eq(gsr, psr)
actual[0] = 3
expected[0] = 3
assert_eq(gsr, psr)
@pytest.mark.parametrize(
"data,columns",
[
([1, 2, 3, 100, 112, 35464], ["a"]),
(range(100), None),
([], None),
((-10, 21, 32, 32, 1, 2, 3), ["p"]),
((), None),
([[1, 2, 3], [1, 2, 3]], ["col1", "col2", "col3"]),
([range(100), range(100)], ["range" + str(i) for i in range(100)]),
(((1, 2, 3), (1, 2, 3)), ["tuple0", "tuple1", "tuple2"]),
([[1, 2, 3]], ["list col1", "list col2", "list col3"]),
([range(100)], ["range" + str(i) for i in range(100)]),
(((1, 2, 3),), ["k1", "k2", "k3"]),
],
)
def test_dataframe_init_1d_list(data, columns):
expect = pd.DataFrame(data, columns=columns)
actual = cudf.DataFrame(data, columns=columns)
assert_eq(
expect, actual, check_index_type=False if len(data) == 0 else True
)
expect = pd.DataFrame(data, columns=None)
actual = cudf.DataFrame(data, columns=None)
assert_eq(
expect, actual, check_index_type=False if len(data) == 0 else True
)
@pytest.mark.parametrize(
"data,cols,index",
[
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
["a", "b", "c", "d"],
),
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
[0, 20, 30, 10],
),
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
[0, 1, 2, 3],
),
(np.array([11, 123, -2342, 232]), ["a"], [1, 2, 11, 12]),
(np.array([11, 123, -2342, 232]), ["a"], ["khsdjk", "a", "z", "kk"]),
(
cupy.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "z"],
["a", "z", "a", "z"],
),
(cupy.array([11, 123, -2342, 232]), ["z"], [0, 1, 1, 0]),
(cupy.array([11, 123, -2342, 232]), ["z"], [1, 2, 3, 4]),
(cupy.array([11, 123, -2342, 232]), ["z"], ["a", "z", "d", "e"]),
(np.random.randn(2, 4), ["a", "b", "c", "d"], ["a", "b"]),
(np.random.randn(2, 4), ["a", "b", "c", "d"], [1, 0]),
(cupy.random.randn(2, 4), ["a", "b", "c", "d"], ["a", "b"]),
(cupy.random.randn(2, 4), ["a", "b", "c", "d"], [1, 0]),
],
)
def test_dataframe_init_from_arrays_cols(data, cols, index):
gd_data = data
if isinstance(data, cupy.core.ndarray):
# pandas can't handle cupy arrays in general
pd_data = data.get()
# additional test for building DataFrame with gpu array whose
# cuda array interface has no `descr` attribute
numba_data = cuda.as_cuda_array(data)
else:
pd_data = data
numba_data = None
# verify with columns & index
pdf = pd.DataFrame(pd_data, columns=cols, index=index)
gdf = cudf.DataFrame(gd_data, columns=cols, index=index)
assert_eq(pdf, gdf, check_dtype=False)
# verify with columns
pdf = pd.DataFrame(pd_data, columns=cols)
gdf = cudf.DataFrame(gd_data, columns=cols)
assert_eq(pdf, gdf, check_dtype=False)
pdf = pd.DataFrame(pd_data)
gdf = cudf.DataFrame(gd_data)
assert_eq(pdf, gdf, check_dtype=False)
if numba_data is not None:
gdf = cudf.DataFrame(numba_data)
assert_eq(pdf, gdf, check_dtype=False)
@pytest.mark.parametrize(
"col_data",
[
range(5),
["a", "b", "x", "y", "z"],
[1.0, 0.213, 0.34332],
["a"],
[1],
[0.2323],
[],
],
)
@pytest.mark.parametrize(
"assign_val",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
def test_dataframe_assign_scalar(col_data, assign_val):
pdf = pd.DataFrame({"a": col_data})
gdf = cudf.DataFrame({"a": col_data})
pdf["b"] = (
cupy.asnumpy(assign_val)
if isinstance(assign_val, cupy.ndarray)
else assign_val
)
gdf["b"] = assign_val
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"col_data",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
@pytest.mark.parametrize(
"assign_val",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
def test_dataframe_assign_scalar_with_scalar_cols(col_data, assign_val):
pdf = pd.DataFrame(
{
"a": cupy.asnumpy(col_data)
if isinstance(col_data, cupy.ndarray)
else col_data
},
index=["dummy_mandatory_index"],
)
gdf = cudf.DataFrame({"a": col_data}, index=["dummy_mandatory_index"])
pdf["b"] = (
cupy.asnumpy(assign_val)
if isinstance(assign_val, cupy.ndarray)
else assign_val
)
gdf["b"] = assign_val
assert_eq(pdf, gdf)
def test_dataframe_info_basic():
buffer = io.StringIO()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
StringIndex: 10 entries, a to 1111
Data columns (total 10 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 0 10 non-null float64
1 1 10 non-null float64
2 2 10 non-null float64
3 3 10 non-null float64
4 4 10 non-null float64
5 5 10 non-null float64
6 6 10 non-null float64
7 7 10 non-null float64
8 8 10 non-null float64
9 9 10 non-null float64
dtypes: float64(10)
memory usage: 859.0+ bytes
"""
)
df = pd.DataFrame(
np.random.randn(10, 10),
index=["a", "2", "3", "4", "5", "6", "7", "8", "100", "1111"],
)
cudf.from_pandas(df).info(buf=buffer, verbose=True)
s = buffer.getvalue()
assert str_cmp == s
def test_dataframe_info_verbose_mem_usage():
buffer = io.StringIO()
df = pd.DataFrame({"a": [1, 2, 3], "b": ["safdas", "assa", "asdasd"]})
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 3 entries, 0 to 2
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 3 non-null int64
1 b 3 non-null object
dtypes: int64(1), object(1)
memory usage: 56.0+ bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=True)
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 3 entries, 0 to 2
Columns: 2 entries, a to b
dtypes: int64(1), object(1)
memory usage: 56.0+ bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=False)
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
df = pd.DataFrame(
{"a": [1, 2, 3], "b": ["safdas", "assa", "asdasd"]},
index=["sdfdsf", "sdfsdfds", "dsfdf"],
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
StringIndex: 3 entries, sdfdsf to dsfdf
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 3 non-null int64
1 b 3 non-null object
dtypes: int64(1), object(1)
memory usage: 91.0 bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=True, memory_usage="deep")
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
int_values = [1, 2, 3, 4, 5]
text_values = ["alpha", "beta", "gamma", "delta", "epsilon"]
float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
df = cudf.DataFrame(
{
"int_col": int_values,
"text_col": text_values,
"float_col": float_values,
}
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 int_col 5 non-null int64
1 text_col 5 non-null object
2 float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0 bytes
"""
)
df.info(buf=buffer, verbose=True, memory_usage="deep")
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
def test_dataframe_info_null_counts():
int_values = [1, 2, 3, 4, 5]
text_values = ["alpha", "beta", "gamma", "delta", "epsilon"]
float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
df = cudf.DataFrame(
{
"int_col": int_values,
"text_col": text_values,
"float_col": float_values,
}
)
buffer = io.StringIO()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Dtype
--- ------ -----
0 int_col int64
1 text_col object
2 float_col float64
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0+ bytes
"""
)
df.info(buf=buffer, verbose=True, null_counts=False)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df.info(buf=buffer, verbose=True, max_cols=0)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df = cudf.DataFrame()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 0 entries
Empty DataFrame"""
)
df.info(buf=buffer, verbose=True)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df = cudf.DataFrame(
{
"a": [1, 2, 3, None, 10, 11, 12, None],
"b": ["a", "b", "c", "sd", "sdf", "sd", None, None],
}
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 8 entries, 0 to 7
Data columns (total 2 columns):
# Column Dtype
--- ------ -----
0 a int64
1 b object
dtypes: int64(1), object(1)
memory usage: 238.0+ bytes
"""
)
pd.options.display.max_info_rows = 2
df.info(buf=buffer, max_cols=2, null_counts=None)
pd.reset_option("display.max_info_rows")
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 8 entries, 0 to 7
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 6 non-null int64
1 b 6 non-null object
dtypes: int64(1), object(1)
memory usage: 238.0+ bytes
"""
)
df.info(buf=buffer, max_cols=2, null_counts=None)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df.info(buf=buffer, null_counts=True)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
@pytest.mark.parametrize(
"data1",
[
[1, 2, 3, 4, 5, 6, 7],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[
1.9876543,
2.9876654,
3.9876543,
4.1234587,
5.23,
6.88918237,
7.00001,
],
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
-0.1221,
-2.1221,
-0.112121,
21.1212,
],
],
)
@pytest.mark.parametrize(
"data2",
[
[1, 2, 3, 4, 5, 6, 7],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[
1.9876543,
2.9876654,
3.9876543,
4.1234587,
5.23,
6.88918237,
7.00001,
],
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
-0.1221,
-2.1221,
-0.112121,
21.1212,
],
],
)
@pytest.mark.parametrize("rtol", [0, 0.01, 1e-05, 1e-08, 5e-1, 50.12])
@pytest.mark.parametrize("atol", [0, 0.01, 1e-05, 1e-08, 50.12])
def test_cudf_isclose(data1, data2, rtol, atol):
array1 = cupy.array(data1)
array2 = cupy.array(data2)
expected = cudf.Series(cupy.isclose(array1, array2, rtol=rtol, atol=atol))
actual = cudf.isclose(
cudf.Series(data1), cudf.Series(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(data1, data2, rtol=rtol, atol=atol)
assert_eq(expected, actual)
actual = cudf.isclose(
cupy.array(data1), cupy.array(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(
np.array(data1), np.array(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(
pd.Series(data1), pd.Series(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data1",
[
[
-1.9876543,
-2.9876654,
np.nan,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
np.nan,
-21.1212,
],
],
)
@pytest.mark.parametrize(
"data2",
[
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
np.nan,
np.nan,
np.nan,
21.1212,
],
],
)
@pytest.mark.parametrize("equal_nan", [True, False])
def test_cudf_isclose_nulls(data1, data2, equal_nan):
array1 = cupy.array(data1)
array2 = cupy.array(data2)
expected = cudf.Series(cupy.isclose(array1, array2, equal_nan=equal_nan))
actual = cudf.isclose(
cudf.Series(data1), cudf.Series(data2), equal_nan=equal_nan
)
assert_eq(expected, actual, check_dtype=False)
actual = cudf.isclose(data1, data2, equal_nan=equal_nan)
assert_eq(expected, actual, check_dtype=False)
def test_cudf_isclose_different_index():
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[0, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 5, 3, 4, 2],
)
expected = cudf.Series([True] * 6, index=s1.index)
assert_eq(expected, cudf.isclose(s1, s2))
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[0, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 5, 10, 4, 2],
)
expected = cudf.Series(
[True, True, True, False, True, True], index=s1.index
)
assert_eq(expected, cudf.isclose(s1, s2))
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[100, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 100, 10, 4, 2],
)
expected = cudf.Series(
[False, True, True, False, True, False], index=s1.index
)
assert_eq(expected, cudf.isclose(s1, s2))
def test_dataframe_to_dict_error():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [9, 5, 3]})
with pytest.raises(
TypeError,
match=re.escape(
r"cuDF does not support conversion to host memory "
r"via `to_dict()` method. Consider using "
r"`.to_pandas().to_dict()` to construct a Python dictionary."
),
):
df.to_dict()
with pytest.raises(
TypeError,
match=re.escape(
r"cuDF does not support conversion to host memory "
r"via `to_dict()` method. Consider using "
r"`.to_pandas().to_dict()` to construct a Python dictionary."
),
):
df["a"].to_dict()
@pytest.mark.parametrize(
"df",
[
pd.DataFrame({"a": [1, 2, 3, 4, 5, 10, 11, 12, 33, 55, 19]}),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
}
),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
},
index=[10, 20, 30, 40, 50, 60],
),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
},
index=["a", "b", "c", "d", "e", "f"],
),
pd.DataFrame(index=["a", "b", "c", "d", "e", "f"]),
pd.DataFrame(columns=["a", "b", "c", "d", "e", "f"]),
pd.DataFrame(index=[10, 11, 12]),
pd.DataFrame(columns=[10, 11, 12]),
pd.DataFrame(),
pd.DataFrame({"one": [], "two": []}),
pd.DataFrame({2: [], 1: []}),
pd.DataFrame(
{
0: [1, 2, 3, 4, 5, 10],
1: ["abc", "def", "ghi", "xyz", "pqr", "abc"],
100: ["a", "b", "b", "x", "z", "a"],
},
index=[10, 20, 30, 40, 50, 60],
),
],
)
def test_dataframe_keys(df):
gdf = cudf.from_pandas(df)
assert_eq(df.keys(), gdf.keys())
@pytest.mark.parametrize(
"ps",
[
pd.Series([1, 2, 3, 4, 5, 10, 11, 12, 33, 55, 19]),
pd.Series(["abc", "def", "ghi", "xyz", "pqr", "abc"]),
pd.Series(
[1, 2, 3, 4, 5, 10],
index=["abc", "def", "ghi", "xyz", "pqr", "abc"],
),
pd.Series(
["abc", "def", "ghi", "xyz", "pqr", "abc"],
index=[1, 2, 3, 4, 5, 10],
),
pd.Series(index=["a", "b", "c", "d", "e", "f"], dtype="float64"),
pd.Series(index=[10, 11, 12], dtype="float64"),
pd.Series(dtype="float64"),
pd.Series([], dtype="float64"),
],
)
def test_series_keys(ps):
gds = cudf.from_pandas(ps)
if len(ps) == 0 and not isinstance(ps.index, pd.RangeIndex):
assert_eq(ps.keys().astype("float64"), gds.keys())
else:
assert_eq(ps.keys(), gds.keys())
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB")),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[7, 8]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[100]),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame(
{"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]},
index=[100, 200, 300, 400, 500, 0],
),
],
)
@pytest.mark.parametrize(
"other",
[
pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("BD")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("DE")),
pd.DataFrame(),
pd.DataFrame(
{"c": [10, 11, 22, 33, 44, 100]}, index=[7, 8, 9, 10, 11, 20]
),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[200]),
pd.DataFrame([]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
pd.DataFrame([], index=[100]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
)
@pytest.mark.parametrize("sort", [False, True])
@pytest.mark.parametrize("ignore_index", [True, False])
def test_dataframe_append_dataframe(df, other, sort, ignore_index):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
other_gd = cudf.from_pandas(other)
expected = pdf.append(other_pd, sort=sort, ignore_index=ignore_index)
actual = gdf.append(other_gd, sort=sort, ignore_index=ignore_index)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame({12: [], 22: []}),
pd.DataFrame([[1, 2], [3, 4]], columns=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=[0, 1], index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=[1, 0], index=[7, 8]),
pd.DataFrame(
{
23: [315.3324, 3243.32432, 3232.332, -100.32],
33: [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
0: [315.3324, 3243.32432, 3232.332, -100.32],
1: [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
],
)
@pytest.mark.parametrize(
"other",
[
pd.Series([10, 11, 23, 234, 13]),
pytest.param(
pd.Series([10, 11, 23, 234, 13], index=[11, 12, 13, 44, 33]),
marks=pytest.mark.xfail(
reason="pandas bug: "
"https://github.com/pandas-dev/pandas/issues/35092"
),
),
{1: 1},
{0: 10, 1: 100, 2: 102},
],
)
@pytest.mark.parametrize("sort", [False, True])
def test_dataframe_append_series_dict(df, other, sort):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
if isinstance(other, pd.Series):
other_gd = cudf.from_pandas(other)
else:
other_gd = other
expected = pdf.append(other_pd, ignore_index=True, sort=sort)
actual = gdf.append(other_gd, ignore_index=True, sort=sort)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
def test_dataframe_append_series_mixed_index():
df = cudf.DataFrame({"first": [], "d": []})
sr = cudf.Series([1, 2, 3, 4])
with pytest.raises(
TypeError,
match=re.escape(
"cudf does not support mixed types, please type-cast "
"the column index of dataframe and index of series "
"to same dtypes."
),
):
df.append(sr, ignore_index=True)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB")),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[7, 8]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[100]),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame(
{"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]},
index=[100, 200, 300, 400, 500, 0],
),
],
)
@pytest.mark.parametrize(
"other",
[
[pd.DataFrame([[5, 6], [7, 8]], columns=list("AB"))],
[
pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("BD")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("DE")),
],
[pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame()],
[
pd.DataFrame(
{"c": [10, 11, 22, 33, 44, 100]}, index=[7, 8, 9, 10, 11, 20]
),
pd.DataFrame(),
pd.DataFrame(),
pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")),
],
[
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[200]),
],
[pd.DataFrame([]), pd.DataFrame([], index=[100])],
[
pd.DataFrame([]),
pd.DataFrame([], index=[100]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
],
[
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
[
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
[
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
[
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
],
],
)
@pytest.mark.parametrize("sort", [False, True])
@pytest.mark.parametrize("ignore_index", [True, False])
def test_dataframe_append_dataframe_lists(df, other, sort, ignore_index):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
other_gd = [
cudf.from_pandas(o) if isinstance(o, pd.DataFrame) else o
for o in other
]
expected = pdf.append(other_pd, sort=sort, ignore_index=ignore_index)
actual = gdf.append(other_gd, sort=sort, ignore_index=ignore_index)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB")),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[7, 8]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[100]),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame(
{"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]},
index=[100, 200, 300, 400, 500, 0],
),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
],
)
@pytest.mark.parametrize(
"other",
[
[[1, 2], [10, 100]],
[[1, 2, 10, 100, 0.1, 0.2, 0.0021]],
[[]],
[[], [], [], []],
[[0.23, 0.00023, -10.00, 100, 200, 1000232, 1232.32323]],
],
)
@pytest.mark.parametrize("sort", [False, True])
@pytest.mark.parametrize("ignore_index", [True, False])
def test_dataframe_append_lists(df, other, sort, ignore_index):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
other_gd = [
cudf.from_pandas(o) if isinstance(o, pd.DataFrame) else o
for o in other
]
expected = pdf.append(other_pd, sort=sort, ignore_index=ignore_index)
actual = gdf.append(other_gd, sort=sort, ignore_index=ignore_index)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
def test_dataframe_append_error():
df = cudf.DataFrame({"a": [1, 2, 3]})
ps = cudf.Series([1, 2, 3])
with pytest.raises(
TypeError,
match="Can only append a Series if ignore_index=True "
"or if the Series has a name",
):
df.append(ps)
def test_cudf_arrow_array_error():
df = cudf.DataFrame({"a": [1, 2, 3]})
with pytest.raises(
TypeError,
match="Implicit conversion to a host PyArrow Table via __arrow_array__"
" is not allowed, To explicitly construct a PyArrow Table, consider "
"using .to_arrow()",
):
df.__arrow_array__()
sr = cudf.Series([1, 2, 3])
with pytest.raises(
TypeError,
match="Implicit conversion to a host PyArrow Array via __arrow_array__"
" is not allowed, To explicitly construct a PyArrow Array, consider "
"using .to_arrow()",
):
sr.__arrow_array__()
sr = cudf.Series(["a", "b", "c"])
with pytest.raises(
TypeError,
match="Implicit conversion to a host PyArrow Array via __arrow_array__"
" is not allowed, To explicitly construct a PyArrow Array, consider "
"using .to_arrow()",
):
sr.__arrow_array__()
@pytest.mark.parametrize("n", [0, 2, 5, 10, None])
@pytest.mark.parametrize("frac", [0.1, 0.5, 1, 2, None])
@pytest.mark.parametrize("replace", [True, False])
@pytest.mark.parametrize("axis", [0, 1])
def test_dataframe_sample_basic(n, frac, replace, axis):
# as we currently don't support column with same name
if axis == 1 and replace:
return
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5],
"float": [0.05, 0.2, 0.3, 0.2, 0.25],
"int": [1, 3, 5, 4, 2],
},
index=[1, 2, 3, 4, 5],
)
df = cudf.DataFrame.from_pandas(pdf)
random_state = 0
try:
pout = pdf.sample(
n=n,
frac=frac,
replace=replace,
random_state=random_state,
axis=axis,
)
except BaseException:
assert_exceptions_equal(
lfunc=pdf.sample,
rfunc=df.sample,
lfunc_args_and_kwargs=(
[],
{
"n": n,
"frac": frac,
"replace": replace,
"random_state": random_state,
"axis": axis,
},
),
rfunc_args_and_kwargs=(
[],
{
"n": n,
"frac": frac,
"replace": replace,
"random_state": random_state,
"axis": axis,
},
),
)
else:
gout = df.sample(
n=n,
frac=frac,
replace=replace,
random_state=random_state,
axis=axis,
)
assert pout.shape == gout.shape
@pytest.mark.parametrize("replace", [True, False])
@pytest.mark.parametrize("random_state", [1, np.random.mtrand.RandomState(10)])
def test_dataframe_reproducibility(replace, random_state):
df = cudf.DataFrame({"a": cupy.arange(0, 1024)})
expected = df.sample(1024, replace=replace, random_state=random_state)
out = df.sample(1024, replace=replace, random_state=random_state)
assert_eq(expected, out)
@pytest.mark.parametrize("n", [0, 2, 5, 10, None])
@pytest.mark.parametrize("frac", [0.1, 0.5, 1, 2, None])
@pytest.mark.parametrize("replace", [True, False])
def test_series_sample_basic(n, frac, replace):
psr = pd.Series([1, 2, 3, 4, 5])
sr = cudf.Series.from_pandas(psr)
random_state = 0
try:
pout = psr.sample(
n=n, frac=frac, replace=replace, random_state=random_state
)
except BaseException:
assert_exceptions_equal(
lfunc=psr.sample,
rfunc=sr.sample,
lfunc_args_and_kwargs=(
[],
{
"n": n,
"frac": frac,
"replace": replace,
"random_state": random_state,
},
),
rfunc_args_and_kwargs=(
[],
{
"n": n,
"frac": frac,
"replace": replace,
"random_state": random_state,
},
),
)
else:
gout = sr.sample(
n=n, frac=frac, replace=replace, random_state=random_state
)
assert pout.shape == gout.shape
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[100, 10, 1, 0]),
| pd.DataFrame(columns=["a", "b", "c", "d"]) | pandas.DataFrame |
import pandas as pd
import numpy as np
from sklearn.ensemble import IsolationForest
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.preprocessing import MinMaxScaler
#read data
dataset = pd.read_csv('raw_dataset.csv',engine='python')
#set variable
rs = np.random.RandomState(169)
outliers_fraction = 0.05
lendata = dataset.shape[0]
#label
anomaly = [];test_data = []
#normalize
nmlz_a = -1
nmlz_b = 1
def normalize(dataset,a,b):
scaler = MinMaxScaler(feature_range=(a, b))
normalize_data = scaler.fit_transform(dataset)
return normalize_data
#read dataset x,y
x = normalize(pd.DataFrame(dataset, columns=['cr']), nmlz_a, nmlz_b)
y = normalize(pd.DataFrame(dataset, columns=['wr']), nmlz_a, nmlz_b)
# print(x)
ifm = IsolationForest(n_estimators=100, verbose=2, n_jobs=2,
max_samples=lendata, random_state=rs, max_features=2)
if __name__ == '__main__':
Iso_train_dt = np.column_stack((x, y))
ifm.fit(Iso_train_dt)
scores_pred = ifm.decision_function(Iso_train_dt)
print(scores_pred)
threshold = stats.scoreatpercentile(scores_pred, 100 * outliers_fraction)
print(threshold)
xx, yy = np.meshgrid(np.linspace(nmlz_a, nmlz_b, 50), np.linspace(nmlz_a, nmlz_b, 50))
Z = ifm.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("IsolationForest ")
otl_proportion = int(outliers_fraction * len(dataset['Date']))
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, otl_proportion), cmap=plt.cm.hot)# 绘制异常点区域,值从最小的到阈值的那部分
a = plt.contour(xx, yy, Z, levels=[threshold], linewidths=2, colors='red')# 绘制异常点区域和正常点区域的边界
plt.contourf(xx, yy, Z, levels=[threshold, Z.max()], colors='palevioletred')
# 绘制正常点区域,值从阈值到最大的那部分
for i in scores_pred:
if i <= threshold:
#print(i)
test_data.append(1)
anomaly.append(i)
else:
test_data.append(0)
ano_lable = np.column_stack(((dataset['Date'],dataset['data'],x,y,scores_pred, test_data)))
df = pd.DataFrame(data=ano_lable, columns=['Date','data','x', 'y', 'IsoFst_Score','label'])
b = plt.scatter(df['x'][df['label'] == 0], df['y'][df['label'] == 0], s=20, edgecolor='k',c='white')
c = plt.scatter(df['x'][df['label'] == 1], df['y'][df['label'] == 1], s=20, edgecolor='k',c='black')
plotlist = df.to_csv('processed_data.csv')
plt.axis('tight')
plt.xlim((nmlz_a, nmlz_b))
plt.ylim((nmlz_a, nmlz_b))
plt.legend([a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
loc="upper left")
print("孤立森林阈值 :",threshold)
print("全量数据样本数:",len(dataset),"个")
print("检测异常样本数:",len(anomaly),"个")
# print(anomaly)
plt.show()
new_dataset = | pd.read_csv('processed_data.csv',engine='python') | pandas.read_csv |
from optparse import OptionParser
import os
import numpy as np
import pandas as pd
import get_site_features
import utils
np.set_printoptions(threshold=np.inf, linewidth=200)
pd.options.mode.chained_assignment = None
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("--transcripts", dest="TRANSCRIPTS", help="transcript sequence information")
parser.add_option("--mir", dest="MIR", help="miRNA to get features for")
parser.add_option("--mirseqs", dest="MIR_SEQS", help="tsv with miRNAs and their sequences")
parser.add_option("--kds", dest="KDS", help="kd data in tsv format, this family must be included", default=None)
parser.add_option("--sa_bg", dest="SA_BG", help="SA background for 12mers")
parser.add_option("--rnaplfold_dir", dest="RNAPLFOLD_DIR", help="folder with RNAplfold info for transcripts")
parser.add_option("--pct_file", dest="PCT_FILE", default=None, help="file with PCT information")
parser.add_option("--kd_cutoff", dest="KD_CUTOFF", type=float, default=np.inf)
parser.add_option("--outfile", dest="OUTFILE", help="location to write outputs")
parser.add_option("--overlap_dist", dest="OVERLAP_DIST", help="minimum distance between neighboring sites", type=int)
parser.add_option("--upstream_limit", dest="UPSTREAM_LIMIT", help="how far upstream to look for 3p pairing", type=int)
parser.add_option("--only_canon", dest="ONLY_CANON", help="only use canonical sites", default=False, action='store_true')
(options, args) = parser.parse_args()
TRANSCRIPTS = pd.read_csv(options.TRANSCRIPTS, sep='\t', index_col='transcript')
mirseqs = pd.read_csv(options.MIR_SEQS, sep='\t', index_col='mir')
if '_pass' in options.MIR:
MIRSEQ = mirseqs.loc[options.MIR.replace('_pass', '')]['pass_seq']
FAMILY = mirseqs.loc[options.MIR.replace('_pass', '')]['pass_family']
else:
MIRSEQ = mirseqs.loc[options.MIR]['guide_seq']
FAMILY = mirseqs.loc[options.MIR]['guide_family']
SITE8 = utils.rev_comp(MIRSEQ[1:8]) + 'A'
print(options.MIR, SITE8)
# if KD file provided, find sites based on KD file
if options.KDS is not None:
KDS = pd.read_csv(options.KDS, sep='\t')
if options.ONLY_CANON:
KDS = KDS[KDS['aligned_stype'] != 'no site']
KDS = KDS[KDS['best_stype'] == KDS['aligned_stype']]
temp = KDS[KDS['mir'] == FAMILY]
if len(temp) == 0:
raise ValueError('{} not in kd files'.format(FAMILY))
mir_kd_dict = {x: y for (x, y) in zip(temp['12mer'], temp['log_kd']) if (y < options.KD_CUTOFF)}
# find all the sites and KDs
all_features = []
for row in TRANSCRIPTS.iterrows():
all_features.append(get_site_features.get_sites_from_kd_dict_improved(row[0], row[1]['orf_utr3'], mir_kd_dict, options.OVERLAP_DIST))
# otherwise, go by sequence
else:
all_features = []
for row in TRANSCRIPTS.iterrows():
all_features.append(get_site_features.get_sites_from_sequence(row[0], row[1]['orf_utr3'], SITE8,
overlap_dist=options.OVERLAP_DIST, only_canon=options.ONLY_CANON))
all_features = | pd.concat(all_features) | pandas.concat |
# Copyright 2017-2021 Lawrence Livermore National Security, LLC and other
# Hatchet Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: MIT
import json
import sys
import re
import subprocess
import os
import math
import pandas as pd
import numpy as np
import hatchet.graphframe
from hatchet.node import Node
from hatchet.graph import Graph
from hatchet.frame import Frame
from hatchet.util.timer import Timer
from hatchet.util.executable import which
unknown_label_counter = 0
class CaliperReader:
"""Read in a Caliper file (`cali` or split JSON) or file-like object."""
def __init__(self, filename_or_stream, query=""):
"""Read from Caliper files (`cali` or split JSON).
Args:
filename_or_stream (str or file-like): name of a `cali` or
`cali-query` split JSON file, OR an open file object
query (str): cali-query arguments (for cali file)
"""
self.filename_or_stream = filename_or_stream
self.filename_ext = ""
self.query = query
self.json_data = {}
self.json_cols = {}
self.json_cols_mdata = {}
self.json_nodes = {}
self.metadata = {}
self.idx_to_label = {}
self.idx_to_node = {}
self.timer = Timer()
self.nid_col_name = "nid"
if isinstance(self.filename_or_stream, str):
_, self.filename_ext = os.path.splitext(filename_or_stream)
def read_json_sections(self):
# if cali-query exists, extract data from .cali to a file-like object
if self.filename_ext == ".cali":
cali_query = which("cali-query")
if not cali_query:
raise ValueError("from_caliper() needs cali-query to query .cali file")
cali_json = subprocess.Popen(
[cali_query, "-q", self.query, self.filename_or_stream],
stdout=subprocess.PIPE,
)
self.filename_or_stream = cali_json.stdout
# if filename_or_stream is a str, then open the file, otherwise
# directly load the file-like object
if isinstance(self.filename_or_stream, str):
with open(self.filename_or_stream) as cali_json:
json_obj = json.load(cali_json)
else:
json_obj = json.loads(self.filename_or_stream.read().decode("utf-8"))
# read various sections of the Caliper JSON file
self.json_data = json_obj["data"]
self.json_cols = json_obj["columns"]
self.json_cols_mdata = json_obj["column_metadata"]
self.json_nodes = json_obj["nodes"]
# read run metadata: all top-level elements in the json object that aren't
# one of the above sections are run metadata
skip = ["data", "columns", "column_metadata", "nodes"]
keys = [k for k in json_obj.keys() if k not in skip]
self.metadata = {k: json_obj[k] for k in keys}
# decide which column to use as the primary path hierarchy
# first preference to callpath if available
if "source.function#callpath.address" in self.json_cols:
self.path_col_name = "source.function#callpath.address"
self.node_type = "function"
if "path" in self.json_cols:
self.both_hierarchies = True
else:
self.both_hierarchies = False
elif "path" in self.json_cols:
self.path_col_name = "path"
self.node_type = "region"
self.both_hierarchies = False
else:
sys.exit("No hierarchy column in input file")
# remove data entries containing None in `path` column (null in json file)
# first, get column where `path` data is
# then, parse json_data list of lists to identify lists containing None in
# `path` column
path_col = self.json_cols.index(self.path_col_name)
entries_to_remove = []
for sublist in self.json_data:
if sublist[path_col] is None:
entries_to_remove.append(sublist)
# then, remove them from the json_data list
for i in entries_to_remove:
self.json_data.remove(i)
# change column names
for idx, item in enumerate(self.json_cols):
if item == self.path_col_name:
# this column is just a pointer into the nodes section
self.json_cols[idx] = self.nid_col_name
# make other columns consistent with other readers
if item == "mpi.rank":
self.json_cols[idx] = "rank"
if item == "module#cali.sampler.pc":
self.json_cols[idx] = "module"
if item == "sum#time.duration" or item == "sum#avg#sum#time.duration":
self.json_cols[idx] = "time"
if (
item == "inclusive#sum#time.duration"
or item == "sum#avg#inclusive#sum#time.duration"
):
self.json_cols[idx] = "time (inc)"
# make list of metric columns
self.metric_columns = []
for idx, item in enumerate(self.json_cols_mdata):
if self.json_cols[idx] != "rank" and item["is_value"] is True:
self.metric_columns.append(self.json_cols[idx])
def create_graph(self):
list_roots = []
global unknown_label_counter
# find nodes in the nodes section that represent the path hierarchy
for idx, node in enumerate(self.json_nodes):
node_label = node["label"]
if node_label == "":
node_label = "UNKNOWN " + str(unknown_label_counter)
unknown_label_counter += 1
self.idx_to_label[idx] = node_label
if node["column"] == self.path_col_name:
if "parent" not in node:
# since this node does not have a parent, this is a root
graph_root = Node(
Frame({"type": self.node_type, "name": node_label}), None
)
list_roots.append(graph_root)
node_dict = {
self.nid_col_name: idx,
"name": node_label,
"node": graph_root,
}
self.idx_to_node[idx] = node_dict
else:
parent_hnode = (self.idx_to_node[node["parent"]])["node"]
hnode = Node(
Frame({"type": self.node_type, "name": node_label}),
parent_hnode,
)
parent_hnode.add_child(hnode)
node_dict = {
self.nid_col_name: idx,
"name": node_label,
"node": hnode,
}
self.idx_to_node[idx] = node_dict
return list_roots
def read(self):
"""Read the caliper JSON file to extract the calling context tree."""
with self.timer.phase("read json"):
self.read_json_sections()
with self.timer.phase("graph construction"):
list_roots = self.create_graph()
# create a dataframe of metrics from the data section
self.df_json_data = pd.DataFrame(self.json_data, columns=self.json_cols)
# when an nid has multiple entries due to the secondary hierarchy
# we need to aggregate them for each (nid, rank)
groupby_cols = [self.nid_col_name]
if "rank" in self.json_cols:
groupby_cols.append("rank")
if "sourceloc#cali.sampler.pc" in self.json_cols:
groupby_cols.append("sourceloc#cali.sampler.pc")
if self.both_hierarchies is True:
# create dict that stores aggregation function for each column
agg_dict = {}
for idx, item in enumerate(self.json_cols_mdata):
col = self.json_cols[idx]
if col != "rank" and col != "nid":
if item["is_value"] is True:
agg_dict[col] = np.sum
else:
agg_dict[col] = lambda x: x.iloc[0]
grouped = self.df_json_data.groupby(groupby_cols).aggregate(agg_dict)
self.df_json_data = grouped.reset_index()
# map non-numeric columns to their mappings in the nodes section
for idx, item in enumerate(self.json_cols_mdata):
if item["is_value"] is False and self.json_cols[idx] != self.nid_col_name:
if self.json_cols[idx] == "sourceloc#cali.sampler.pc":
# split source file and line number into two columns
self.df_json_data["file"] = self.df_json_data[
self.json_cols[idx]
].apply(
lambda x: re.match(
r"(.*):(\d+)", self.json_nodes[x]["label"]
).group(1)
)
self.df_json_data["line"] = self.df_json_data[
self.json_cols[idx]
].apply(
lambda x: re.match(
r"(.*):(\d+)", self.json_nodes[x]["label"]
).group(2)
)
self.df_json_data.drop(self.json_cols[idx], axis=1, inplace=True)
sourceloc_idx = idx
elif self.json_cols[idx] == "path":
# we will only reach here if path is the "secondary"
# hierarchy in the data
self.df_json_data["path"] = self.df_json_data["path"].apply(
lambda x: None
if (math.isnan(x))
else self.json_nodes[int(x)]["label"]
)
else:
self.df_json_data[self.json_cols[idx]] = self.df_json_data[
self.json_cols[idx]
].apply(lambda x: self.json_nodes[x]["label"])
# since we split sourceloc, we should update json_cols and
# json_cols_mdata
if "sourceloc#cali.sampler.pc" in self.json_cols:
self.json_cols.pop(sourceloc_idx)
self.json_cols_mdata.pop(sourceloc_idx)
self.json_cols.append("file")
self.json_cols.append("line")
self.json_cols_mdata.append({"is_value": False})
self.json_cols_mdata.append({"is_value": False})
max_nid = self.df_json_data[self.nid_col_name].max()
if "line" in self.df_json_data.columns:
# split nodes that have multiple file:line numbers to have a child
# each with a unique file:line number
unique_nodes = self.df_json_data.groupby(self.nid_col_name)
df_concat = [self.df_json_data]
for nid, super_node in unique_nodes:
line_groups = super_node.groupby("line")
# only need to do something if there are more than one
# file:line number entries for the node
if len(line_groups.size()) > 1:
sn_hnode = self.idx_to_node[nid]["node"]
for line, line_group in line_groups:
# create the node label
file_path = (line_group.head(1))["file"].item()
file_name = os.path.basename(file_path)
node_label = file_name + ":" + line
# create a new hatchet node
max_nid += 1
idx = max_nid
hnode = Node(
Frame(
{"type": "statement", "file": file_path, "line": line}
),
sn_hnode,
)
sn_hnode.add_child(hnode)
node_dict = {
self.nid_col_name: idx,
"name": node_label,
"node": hnode,
}
self.idx_to_node[idx] = node_dict
# change nid of the original node to new node in place
for index, row in line_group.iterrows():
self.df_json_data.loc[index, "nid"] = max_nid
# add new row for original node
node_copy = super_node.head(1).copy()
for cols in self.metric_columns:
node_copy[cols] = 0
df_concat.append(node_copy)
# concatenate all the newly created dataframes with
# self.df_json_data
self.df_fixed_data = pd.concat(df_concat)
else:
self.df_fixed_data = self.df_json_data
# create a dataframe with all nodes in the call graph
self.df_nodes = pd.DataFrame.from_dict(data=list(self.idx_to_node.values()))
# add missing intermediate nodes to the df_fixed_data dataframe
if "rank" in self.json_cols:
self.num_ranks = self.df_fixed_data["rank"].max() + 1
rank_list = range(0, self.num_ranks)
# create a standard dict to be used for filling all missing rows
default_metric_dict = {}
for idx, item in enumerate(self.json_cols_mdata):
if self.json_cols[idx] != self.nid_col_name:
if item["is_value"] is True:
default_metric_dict[self.json_cols[idx]] = 0
else:
default_metric_dict[self.json_cols[idx]] = None
# create a list of dicts, one dict for each missing row
missing_nodes = []
for iteridx, row in self.df_nodes.iterrows():
# check if df_nodes row exists in df_fixed_data
metric_rows = self.df_fixed_data.loc[
self.df_fixed_data[self.nid_col_name] == row[self.nid_col_name]
]
if "rank" not in self.json_cols:
if metric_rows.empty:
# add a single row
node_dict = dict(default_metric_dict)
node_dict[self.nid_col_name] = row[self.nid_col_name]
missing_nodes.append(node_dict)
else:
if metric_rows.empty:
# add a row per MPI rank
for rank in rank_list:
node_dict = dict(default_metric_dict)
node_dict[self.nid_col_name] = row[self.nid_col_name]
node_dict["rank"] = rank
missing_nodes.append(node_dict)
elif len(metric_rows) < self.num_ranks:
# add a row for each missing MPI rank
present_ranks = metric_rows["rank"].values
missing_ranks = [x for x in rank_list if x not in present_ranks]
for rank in missing_ranks:
node_dict = dict(default_metric_dict)
node_dict[self.nid_col_name] = row[self.nid_col_name]
node_dict["rank"] = rank
missing_nodes.append(node_dict)
self.df_missing = | pd.DataFrame.from_dict(data=missing_nodes) | pandas.DataFrame.from_dict |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
def get_min_max(x):
return pd.Series(index=['min','max'],data=[x.min(),x.max()])
def conditional_sum(x):
d = {}
try:
x = x.loc[x['CX01_CONV1_BLOW_FERCENT'] <= x['Dy. 시점'].unique()[0]]
# print('CH_NO : {}, Dy. 시점 : {}'.format(x['CHARGE_NO'].unique()[0],x['Dy. 시점'].unique()[0]))
except:
pass
columns = ['CX01_CONV1_CO_GAS', 'CX01_CONV1_CO2_GAS', 'CX01_CONV1_O2_GAS', 'CX01_CONV1_N2_FLOW', 'Dy. 시점']
for col in columns:
if col == 'Dy. 시점':
d[col] = x[col].iloc[0]
else:
d[col] = x[col].sum()
return | pd.Series(d, index=columns) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 11 14:17:50 2022
@author: m102324
"""
import sys
import logging
import pandas as pd
from cobindability.BED import bed_overlap_size, bed_to_list, bed_info
from cobindability.coefcal import ov_coef, ov_jaccard, ov_ss, ov_sd, pmi_value, npmi_value
from cobindability import version
__author__ = "<NAME>"
__copyright__ = "Copyleft"
__credits__ = []
__license__ = "MIT"
__version__ = version.version
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
def ov_stats(file1, file2, bg_size = 1400000000):
"""
Parameters
----------
file1 : str
Genomic regions in BED (Browser Extensible Data, https://genome.ucsc.edu/FAQ/FAQformat.html#format1), BED-like or BigBed format.
The BED-like format includes 'bed3','bed4','bed6','bed12','bedgraph','narrowpeak', 'broadpeak','gappedpeak'. BED and BED-like
format can be plain text, compressed (.gz, .z, .bz, .bz2, .bzip2) or remote (http://, https://, ftp://) files. Do not compress
BigBed foramt. BigBed file can also be a remote file.
file2 : str
Genomic regions in BED (Browser Extensible Data, https://genome.ucsc.edu/FAQ/FAQformat.html#format1), BED-like or BigBed format.
The BED-like format includes 'bed3','bed4','bed6','bed12','bedgraph','narrowpeak', 'broadpeak','gappedpeak'. BED and BED-like
format can be plain text, compressed (.gz, .z, .bz, .bz2, .bzip2) or remote (http://, https://, ftp://) files. Do not compress
BigBed foramt. BigBed file can also be a remote file.
draws_n : int
Times of drawing samples with replacement. Set to '0' to turn off bootstraping.
draw_frac : float, optional
The size of subsample. The default is 0.85 (85% of the orignal genomic reginos will be selected).
bg_size : int, optional
The effective background genome size. About 1.4Gb of the human genome are. The default is 1424655930.
Returns
-------
pandas.Series
Pandas Series of 'coef_obs', 'coef_exp','coef_ratio', 'coef_ratio_low', 'coef_ratio_high'.
'coef_obs' : Observed overlap coefficient between two BED files.
'coef_exp' : Expected overlap coefficient between two BED files.
'coef_ratio' : Ratio between 'coef_obs' and 'coef_exp'.
'coef_ratio_low' : The lower bound of 95% confidence interval of 'coef_ratio'.
'coef_ratio_high' : The upper bound of 95% confidence interval of 'coef_ratio'.
"""
results = {}
file1_lst = bed_to_list (file1)
file2_lst = bed_to_list (file2)
logging.info("Gathering information for \"%s\" ..." % file1)
info1 = bed_info(file1)
results['A.name'] = info1['Name']
results['A.interval_count'] = info1['Count']
results['A.interval_total_size'] = info1['Total_size']
#results['A.interval_uniq_size'] = info1['Genomic_size']
results['A.interval_mean_size'] = info1['Mean_size']
results['A.interval_median_size'] = info1['Median_size']
results['A.interval_min_size'] = info1['Min_size']
results['A.interval_max_size'] = info1['Max_size']
results['A.interval_size_SD'] = info1['STD']
uniqBase1 = info1['Genomic_size']
logging.info("Gathering information for \"%s\" ..." % file2)
info2 = bed_info(file2)
results['B.name'] = info2['Name']
results['B.interval_count'] = info2['Count']
results['B.interval_total_size'] = info2['Total_size']
#results['B.interval_uniq_size'] = info2['Genomic_size']
results['B.interval_mean_size'] = info2['Mean_size']
results['B.interval_median_size'] = info2['Median_size']
results['B.interval_min_size'] = info2['Min_size']
results['B.interval_max_size'] = info2['Max_size']
results['B.interval_size_SD'] = info2['STD']
uniqBase2 = info2['Genomic_size']
#calculate overall overlap coef
logging.debug("Calculating overlapped bases ...")
overlapBases = bed_overlap_size(file1_lst, file2_lst)
results['G.size'] = bg_size
results['A.size'] = uniqBase1
results['Not_A.size'] = bg_size - uniqBase1
results['B.size'] = uniqBase2
results['Not_B.size'] = bg_size - uniqBase2
results['A_not_B.size'] = uniqBase1 - overlapBases
results['B_not_A.size'] = uniqBase2 - overlapBases
results['A_and_B.size'] = overlapBases
results['A_and_B.exp_size'] = uniqBase1 * uniqBase2/bg_size
results['A_or_B.size'] = uniqBase1 + uniqBase2 -overlapBases
results['Neither_A_nor_B.size'] = bg_size - uniqBase1 - uniqBase2 + overlapBases
results['coef.Overlap'] = ov_coef(uniqBase1, uniqBase2, overlapBases, bg_size)
results['coef.Jaccard'] = ov_jaccard(uniqBase1, uniqBase2, overlapBases, bg_size)
results['coef.Dice'] = ov_sd(uniqBase1, uniqBase2, overlapBases, bg_size)
results['coef.SS'] = ov_ss(uniqBase1, uniqBase2, overlapBases, bg_size)
results['A_and_B.PMI'] = pmi_value(uniqBase1, uniqBase2, overlapBases, bg_size)
results['A_and_B.NPMI'] = npmi_value(uniqBase1, uniqBase2, overlapBases, bg_size)
return pd.Series(data=results)
if __name__ == '__main__':
| pd.set_option('display.float_format', lambda x: '%.4f' % x) | pandas.set_option |
""" Parsing source data into simple tsv datasets.
To parse Bgl3 and GB1, ENRICH2 MUST BE INSTALLED IN A SEPARATE CONDA ENVIRONEMNT NAMED 'enrich2' """
from os.path import isfile, join
import collections
import numpy as np
import pandas as pd
import enrich2
import utils
def parse_avgfp():
""" create the gfp dataset from raw source data """
source_fn = "source_data/avgfp/amino_acid_genotypes_to_brightness.tsv"
out_fn = "data/avgfp/avgfp.tsv"
if isfile(out_fn):
print("err: parsed avgfp dataset already exists: {}".format(out_fn))
return
# load the source data
data = pd.read_csv(source_fn, sep="\t")
# remove the wild-type entry
data = data.loc[1:]
# create columns for variants, number of mutations, and score
variants = data["aaMutations"].apply(lambda x: ",".join([x[1:] for x in x.split(":")]))
num_mutations = variants.apply(lambda x: len(x.split(",")))
score = data["medianBrightness"]
# create the dataframe
cols = ["variant", "num_mutations", "score"]
data_dict = {"variant": variants.values, "num_mutations": num_mutations.values, "score": score.values}
df = pd.DataFrame(data_dict, columns=cols)
# now add a normalized score column - these scores have the wild-type score subtracted from them
df["score_wt_norm"] = df["score"].apply(lambda x: x - 3.7192121319)
df.to_csv(out_fn, sep="\t", index=False)
def filter_dataset(df, threshold):
""" filter out variants that do not meet the required threshold for number of reads """
df = df[(df["inp"] + df["sel"]) >= threshold]
return df
def parse_bgl3_variant_list(ml, col_name):
""" creates a dataframe from the given list of variants """
# filter wild-type counts out, add to dataframe at the end
ml_no_wt = []
wt = []
for variant in ml:
if variant.startswith("WTcount"):
wt.append(int(variant.split(",")[-1].strip()))
else:
ml_no_wt.append(variant)
count_dict = collections.Counter(ml_no_wt)
frame = pd.DataFrame(index=count_dict.keys(), data=count_dict.values())
frame.columns = [col_name]
# add wild-type counts back in to datafrae
frame.loc["_wt"] = sum(wt)
return frame
def get_bgl3_count_df(output_dir=None):
""" combines the inp and sel variant lists into a single dataframe with counts """
inp_fn = "source_data/bgl3/unlabeled_Bgl3_mutations.txt"
sel_fn = "source_data/bgl3/positive_Bgl3_mutations.txt"
cache_fn = "bgl3_raw_counts.tsv"
if output_dir is None or not isfile(join(output_dir, cache_fn)):
print("Computing bgl3 count df from raw counts")
inp_variant_list = utils.load_lines(inp_fn)
sel_variant_list = utils.load_lines(sel_fn)
df = pd.concat([parse_bgl3_variant_list(inp_variant_list, "inp"),
parse_bgl3_variant_list(sel_variant_list, "sel")], axis=1, sort=True).fillna(0)
if output_dir is not None:
df.to_csv(join(output_dir, cache_fn), sep="\t")
return df
print("Loading cached count df from file: {}".format(join(output_dir, cache_fn)))
return pd.read_csv(join(output_dir, cache_fn), sep="\t", index_col=0)
def parse_bgl3():
""" create the bgl3 dataset from raw source data """
out_dir = "data/bgl3"
out_fn = "bgl3.tsv"
if isfile(join(out_dir, out_fn)):
print("err: parsed bgl3 dataset already exists: {}".format(join(out_dir, out_fn)))
return
# creates a single dataframe with counts from the given mutations lists
df = get_bgl3_count_df(output_dir=out_dir)
# filter the variants based on count threshold
threshold = 5
df = filter_dataset(df, threshold=threshold)
enrich2.create_e2_dataset(df, output_dir=out_dir, output_fn=out_fn)
def get_gb1_count_df(output_dir=None):
""" creates a single dataframe with raw counts for all gb1 variants """
cache_fn = "gb1_raw_counts.tsv"
if output_dir is None or not isfile(join(output_dir, cache_fn)):
print("Computing gb1 count df from raw counts")
single = pd.read_csv("source_data/gb1/single_mutants.csv")
double = pd.read_csv("source_data/gb1/double_mutants.csv")
wt = pd.read_csv("source_data/gb1/wild_type.csv")
# change position to a 0-index instead of the current 1-index
single["Position"] = single["Position"] - 1
double["Mut1 Position"] = double["Mut1 Position"] - 1
double["Mut2 Position"] = double["Mut2 Position"] - 1
single_strings = single.apply(lambda row: "".join(map(str, row[0:3])), axis=1)
double_strings = double.apply(lambda row: "{}{}{},{}{}{}".format(*row[0:6]), axis=1)
wt_string = | pd.Series(["_wt"]) | pandas.Series |
import matplotlib.pyplot as pl
import anndata as ad
import pandas as pd
import numpy as np
import scanpy as sc
import scvelo as scv
from scipy.sparse import issparse
import matplotlib.gridspec as gridspec
from scipy.stats import gaussian_kde, spearmanr, pearsonr
from goatools.obo_parser import GODag
from goatools.anno.genetogo_reader import Gene2GoReader
from goatools.goea.go_enrichment_ns import GOEnrichmentStudyNS
import seaborn as sns
import re
import os
import gzip
import mygene
from csv import Sniffer
signatures_path_= os.path.join(os.path.dirname(os.path.realpath(__file__)), 'metadata/')
def get_genefamily_percentage(adata, key='MT-', start=True, name='mito'):
keys = key if isinstance(key, list) else [key, '____ignore____']
if start:
family_genes = np.logical_or(*[adata.var_names.str.startswith(k) for k in keys])
else:
family_genes = np.logical_or(*[adata.var_names.str.endswith(k) for k in keys])
if issparse(adata.X):
adata.obs['percent_'+name] = np.sum(
adata[:, family_genes].X, axis=1).A1 / np.sum(adata.X, axis=1).A1
else:
adata.obs['percent_'+name] = np.sum(
adata[:, family_genes].X, axis=1) / np.sum(adata.X, axis=1)
def get_mito_percentage(adata, species='human'):
key = 'MT-' if species == 'human' else 'mt-'
get_genefamily_percentage(adata, key=key, start=True, name='mito')
def get_ribo_percentage(adata, species='human'):
key = specify_genes(['RPS', 'RPL'], species=species)
get_genefamily_percentage(adata, key=key, start=True, name='ribo')
def get_hemo_percentage(adata, species='human'):
key = specify_genes(['HBA', 'HBB'], species=species)
get_genefamily_percentage(adata, key=key, start=True, name='hemo')
def score_cell_cycle(adata, signatures_path=signatures_path_, species='human'):
adatas = adata if isinstance(adata, list) else [adata]
for i in range(len(adatas)):
adata = adatas[i]
# score cell cycle
# cc score with genes from Kowalczyk, <NAME>., et al. “Single-Cell RNA-Seq Reveals Changes in Cell Cycle and Differentiation Programs upon Aging of Hematopoietic Stem Cells.” Genome Research, vol. 25, no. 12, 2015, pp. 1860–72, doi:10.1101/gr.192237.115.
cell_cycle_genes = [x.strip() for x in open(signatures_path+'/regev_lab_cell_cycle_genes.txt')]
cell_cycle_genes = [x for x in cell_cycle_genes if x in adata.var_names]
# Split into 2 lists
s_genes = cell_cycle_genes[:43]
g2m_genes = cell_cycle_genes[43:]
# score
sc.tl.score_genes_cell_cycle(adata, s_genes=s_genes, g2m_genes=g2m_genes)
adatas[i] = adata
return adatas[0] if len(adatas)==1 else adatas
def score_smillie_str_epi_imm(adata, signatures_path=signatures_path_, species='human'):
tab=pd.read_excel(signatures_path+'/colonoid_cancer_uhlitz_markers_revised.xlsx', skiprows=1, index_col=0)
score_genes(adata, np.array(tab.index[tab['Epithelial']==1].values, dtype='str'), score_name='epi_score', species=species)
score_genes(adata, np.array(tab.index[tab['Stromal']==1].values, dtype='str'), score_name='str_score', species=species)
score_genes(adata, np.array(tab.index[tab['Immune']==1].values, dtype='str'), score_name='imm_score', species=species)
def score_tumor_immune_cells(adata, signatures_path=signatures_path_, species='human'):
# ImSigGenes immune tumor signatures
tab=pd.read_excel(signatures_path+'/ImSigGenes_immunetumor.xlsx', skiprows=2, index_col=1)
annot = dict()
for ct in pd.unique(tab.Signature):
annot[ct] = tab[tab.Signature==ct].index.values
for ct in annot.keys():
score_genes(adata, annot[ct], score_name=ct, species=species)
def calc_qc_scvelo(adata):
adatas = adata if isinstance(adata, list) else [adata]
for adata in adatas:
# obs qc
adata.obs['ucounts'] = rsum(adata.layers['unspliced'], axis=1)
adata.obs['scounts'] = rsum(adata.layers['spliced'], axis=1)
adata.obs['ufeatures'] = rsum(adata.layers['unspliced']>0, axis=1)
adata.obs['sfeatures'] = rsum(adata.layers['spliced']>0, axis=1)
# var qc
adata.var['ucounts'] = rsum(adata.layers['unspliced'], axis=0)
adata.var['scounts'] = rsum(adata.layers['spliced'], axis=0)
adata.var['ucells'] = rsum(adata.layers['unspliced']>0, axis=0)
adata.var['scells'] = rsum(adata.layers['spliced']>0, axis=0)
def calc_qc(adata, extended_genesets=False, species='detect'):
adatas = adata if isinstance(adata, list) else [adata]
for adata in adatas:
# qc counts
adata.obs['ncounts'] = rsum(adata.X, axis=1)
adata.obs['ngenes'] = rsum(adata.X>0, axis=1)
adata.var['ncounts'] = rsum(adata.X, axis=0)
adata.var['ncells'] = rsum(adata.X>0, axis=0)
species = detect_organism(adata) if species == 'detect' else species
# gene modules
# mitochondrial genes
get_mito_percentage(adata, species)
# ribosomal genes
get_ribo_percentage(adata, species)
# hemoglobin genes
get_hemo_percentage(adata, species)
if extended_genesets:
if species is not 'human':
raise ValueError(species,' species is not known. Pls do not use extended_genesets=True.')
# interferon genes, immune response
get_genefamily_percentage(adata, key='IFIT', start=True, name='ifit')
# Cell adhesion molecules genes
get_genefamily_percentage(adata, key='CAM', start=False, name='cam')
# HLA genes encode MHC I and MHC II
get_genefamily_percentage(adata, key='HLA-', start=True, name='hla') # genome specific sometimes!!!
# S100 genes, saw them often in organoids
get_genefamily_percentage(adata, key='S100', start=True, name='s100')
# FOX genes, TFs
get_genefamily_percentage(adata, key='FOX', start=True, name='fox')
# Heat shock protein genes
get_genefamily_percentage(adata, key='HSP', start=True, name='heatshock')
# ABC transporter genes, can lead to multi-drug resistance in cancer
get_genefamily_percentage(adata, key='ABC', start=True, name='abc')
def specify_genes(genes, species='human'):
genes = genes if isinstance(genes, list) else list(genes) if isinstance(genes, np.ndarray) else [genes]
if species is 'human':
return [x.upper() for x in genes]
elif species is 'mouse':
return [x.capitalize() for x in genes]
else:
raise ValueError('Species '+species+' not known.')
def score_genes(adata, gene_list, score_name, species='human', **kwargs):
gene_list_ = specify_genes(gene_list, species=species)
sc.tl.score_genes(adata, gene_list_, score_name=score_name)
def score_hallmarks(adata, subset='organoid', signatures_path=signatures_path_, species='human'):
sc.settings.verbosity = 0
# subset can be a list of hallmarks, 'organoid' (), 'CRC' (~18) or 'all' (50 scores)
tab = pd.read_csv(signatures_path + 'h.all.v6.2.symbols.gmt', sep='\t', index_col=0, header=None).drop(1, axis=1).T
hallsigs={hallmark : tab[hallmark][~pd.isna(tab[hallmark])].values for hallmark in tab.columns}
if isinstance(subset, list):
selection = subset
elif subset == 'organoid':
selection = ['HALLMARK_DNA_REPAIR', 'HALLMARK_WNT_BETA_CATENIN_SIGNALING', 'HALLMARK_APOPTOSIS']
elif subset == 'CRC': # TODO this list is bugged, some entries do not exist
selection = ['HALLMARK_DNA_REPAIR', 'HALLMARK_WNT_BETA_CATENIN_SIGNALING', 'HALLMARK_APOPTOSIS',
'HALLMARK_NOTCH_SIGNALING', 'HALLMARK_TNFA_SIGNALING_VIA_NFKB', 'HALLMARK_HYPOXIA', 'HALLMARK_TGF_BETA_SIGNALING',
'HALLMARK_MITOTIC_SPINDLE', 'HALLMARK_MTORC1_SIGNALING', 'HALLMARK_PI3K_AKT_MTOR_SIGNALING', 'HALLMARK_PROTEIN_SECRETION'
'HALLMARK_G2M_CHECKPOINT', 'HALLMARK_EPITHELIAL_MESENCHYMAL_TRANSITION', 'HALLMARK_OXIDATIVE_PHOSPHORYLATION',
'HALLMARK_P53_PATHWAY', 'HALLMARK_ANGIOGENESIS', 'HALLMARK_KRAS_SIGNALING_UP', 'HALLMARK_KRAS_SIGNALING_DN',
'HALLMARK_GLYCOLYSIS']
elif subset == 'all':
selection = hallsigs.keys()
else:
raise ValueError('Please select a valid subset of hallmark to use. You can also choose "all".')
for hm in selection:
score_genes(adata, hallsigs[hm], score_name=hm, species=species)
def lin_corr_adata(adata, x, keys, method='spearman'):
"""Linearly correlates features (genes/obs_keys) of adata with a given array.
Computes pearson linear correlation (r and p value) for each selected feature
with the given values in x.
----------
adata: An adata object.
x: numeric numpy array
For example x = adata.obsm['X_diffmap'][:,1] or another gene's
expression.
keys: Either a list of genes or a list of adata.obs.columns.
method: Either 'spearman' or 'pearson'.
Returns
-------
df: A pandas DataFrame
The dataframe has genes as index and columns pearson_r and pearson_p. It
is sorted by correlation coefficient (pearson_r).
"""
# input keys may be list or str, make list
keys = [keys] if isinstance(keys, str) else keys
# select correlation method
if method == 'spearman':
correlate = spearmanr
elif method == 'pearsonr':
correlate = pearsonr
else:
raise ValueError(f'Method {method} not valid (pearson or spearman only).')
# feature set
if all(np.isin(keys, adata.obs.columns)):
feature_type = 'obs_keys'
Y = adata.obs[keys].values
elif any(np.isin(keys, adata.var_names)):
feature_type = 'genes'
Y = adata.X.A if issparse(adata.X) else adata.X
else:
raise ValueError('Keys must be list of genes or adata.obs keys.')
# linearly correlated
lincors = []
for i, key in enumerate(keys):
y = Y[:, i]
r, p = correlate(x, y)
lincors.append([key, r, p])
# format result as pandas.DataFrame
df = pd.DataFrame(lincors, columns=[feature_type, f'{method}_r', f'{method}_p']).set_index(feature_type)
df = df.sort_values(f'{method}_r', ascending=False) # sort by correlation
return df
def kde_trajectory(adata, key, groupby, velocity=False, rug_keys=[], component=1,
figsize=[15,5], n_convolve=10, ax=None, show=True, n_eval=200,
range_percs=[0,100], linewidth=4, rug_alpha=0.1,
n=19, ylim=30, scale=8):
X = adata.obsm['X_'+key] if 'X_'+key in adata.obsm.keys() else adata.obsm[key] if key in adata.obsm.keys() else adata.obs[key]
X = X[:, component] if len(X.shape)>1 else X
if velocity:
V = adata.obsm['velocity_'+key] if 'velocity_'+key in adata.obsm.keys() else adata.obsm[key] if key in adata.obsm.keys() else adata.obs[key]
V = V[:, component] if len(V.shape)>1 else V
ax = pl.figure(figsize=figsize).gca() if ax is None else ax
xmin = np.percentile(X, range_percs[0])
xmax = np.percentile(X, range_percs[1])
ev = np.linspace(xmin, xmax, n_eval)
# plot density per group
for i, cond in enumerate(np.sort(pd.unique(adata.obs[groupby]))):
mask = adata.obs[groupby] == cond
kernel = gaussian_kde(X[mask])
ax.plot(ev, kernel(ev), label=cond, linewidth=linewidth, color=adata.uns[groupby+'_colors'][i])
if velocity:
# arrow projections
edges = np.linspace(ev[0], ev[-1], n)
bins = [(edges[k]<X) & (X<edges[k+1]) for k in range(n-1)]
in_bin = bins[2]
xs = np.array([np.mean(X[mask & in_bin]) for in_bin in bins])
ys = np.array([np.mean(kernel(X[mask & in_bin])) for in_bin in bins])
vs = np.array([np.mean(V[mask & in_bin]) if y>ylim else 0 for y, in_bin in zip(ys, bins)])
vs = vs / np.max(np.abs(vs))
# pl.plot(X[mask & in_bin], kernel(X[mask & in_bin]), label=cond, linewidth=linewidth, color='red')
# pl.quiver(X[mask & in_bin], kernel(X[mask & in_bin]), V[mask & in_bin], 0)
ix = np.abs(vs) > 0
ax.quiver(xs[ix], ys[ix], vs[ix], 0 , zorder=100, scale_units='width', scale=scale, color=adata.uns[groupby+'_colors'][i])
# plot categorical annotations as rug
rug_y = ax.get_ylim()[1]/10
rug_keys = rug_keys if isinstance(rug_keys, list) else [rug_keys]
for i, rug in enumerate(rug_keys):
for j, cond in enumerate(np.sort(pd.unique(adata.obs[rug]))):
mask = (adata.obs[rug] == cond) & (X>xmin) & (X<xmax)
plot = ax.plot(X[mask], np.zeros(np.sum(mask)) - rug_y * (i+1), '|', color=adata.uns[rug+'_colors'][j], ms=10, alpha=rug_alpha)
ax.set_xticks([])
ax.set_xlabel(key + ' component '+str(component))
ax.set_ylabel(f'Cell density (KDE) by {groupby}')
ax.set_yticks(ax.get_yticks()[ax.get_yticks()>=0])
ax.axhline(y=0, c='k')
ax.legend()
if show:
pl.show()
else:
return
def diffusion_analysis_(adata, groupby, species='human', component=1, corr_cutoff=0.1, figsize=[10,8], range_percs=[3,97], velocity_mode=None, show=True):
"""Performs a diffusion analysis on adata for a specific diffusion component.
velocity_mode may be None, 'on density', 'average' or , 'single'
----------
adata: An adata object.
Returns
-------
None
"""
ckey = 'DC'+str(component)
add_velocity_subplot = velocity_mode!=None and velocity_mode!='on density'
# set layout
fig = pl.figure(constrained_layout=True, figsize=figsize)
widths = [1, 1, 1]
n_rows = 3 + add_velocity_subplot
heights = [1] * n_rows
spec = fig.add_gridspec(ncols=3, nrows=n_rows, width_ratios=widths,
height_ratios=heights)
ax0 = fig.add_subplot(spec[0, :])
kde_trajectory(adata, key='diffmap', groupby=groupby, range_percs=range_percs, ax=ax0,
show=False, component=component,
velocity=velocity_mode=='on density'
)
ax0.set_xlabel('diffusion pseudotime')
ax0.set_ylabel('cell density')
def add_annotation(row, keys, fig, df, name):
n_top=8
ax_0 = fig.add_subplot(spec[row, 0])
ax_1 = fig.add_subplot(spec[row, 1], sharey=ax_0)
ax_2 = fig.add_subplot(spec[row, 2], sharey=ax_0)
ax_0.set_axis_off()
ax_2.set_axis_off()
# Arrows
ax_0.annotate('', xy=(.4, 1), xytext=(.6, 1),
arrowprops=dict(facecolor='black', shrink=0.05), rotation=90)
ax_2.annotate('', xy=(.6, 1), xytext=(.4, 1),
arrowprops=dict(facecolor='black', shrink=0.05), rotation=90)
# Texts
neg_df = df['spearman_r'][df['spearman_r']<-corr_cutoff].iloc[::-1][:n_top]
pos_df = df['spearman_r'][df['spearman_r']>corr_cutoff][:n_top]
for i, hallmark in enumerate(neg_df.index):
ax_0.text(0.5, .8 - i/len(hallmarks), hallmark.replace('HALLMARK_','').replace('_',' '), ha='center', va='center')
for i, hallmark in enumerate(pos_df.index):
ax_2.text(0.5, .8 - i/len(hallmarks), hallmark.replace('HALLMARK_','').replace('_',' '), ha='center', va='center')
# Barplot
ax_1.barh([.8- i/10 for i in range(len(neg_df))], neg_df.values, align='center', height=0.08, color='tab:blue')
ax_1.barh([.8- i/10 for i in range(len(pos_df))], pos_df.values, align='center', height=0.08, color='tab:red')
ax_1.spines['right'].set_visible(False)
ax_1.spines['left'].set_visible(False)
ax_1.spines['top'].set_visible(False)
ax_1.set_yticks([])
m = np.max(np.abs(df['spearman_r']))
ax_1.set_xlim([-m,m])
ax_1.set_xlabel(f'correlation between diffusion axis \n and {name} expression \n (spearman R)')
ax_1.set_ylim([0,1])
### Pathways
# aggregate hallmarks
dfs = []
hallmarks = ['HALLMARK_ANGIOGENESIS', 'HALLMARK_APOPTOSIS', 'HALLMARK_COAGULATION', 'HALLMARK_COMPLEMENT',
'HALLMARK_IL2_STAT5_SIGNALING', 'HALLMARK_INFLAMMATORY_RESPONSE',
'HALLMARK_INTERFERON_ALPHA_RESPONSE', 'HALLMARK_INTERFERON_GAMMA_RESPONSE', 'HALLMARK_PI3K_AKT_MTOR_SIGNALING',
'HALLMARK_TGF_BETA_SIGNALING', 'HALLMARK_XENOBIOTIC_METABOLISM']
if not all(np.isin(hallmarks, adata.obs.keys())): score_hallmarks(adata, species=species, subset=hallmarks)
df_hallmarks = lin_corr_adata(adata, adata.obsm['X_diffmap'][:, component], hallmarks)
df_hallmarks = df_hallmarks[~pd.isna(df_hallmarks.spearman_r)]
add_annotation(-2, hallmarks, fig, df_hallmarks, 'signature score')
### Genes
df_genes = lin_corr_adata(adata, adata.obsm['X_diffmap'][:, component], adata.var_names)
df_genes = df_genes[~pd.isna(df_genes.spearman_r)]
add_annotation(-1, hallmarks, fig, df_genes, 'gene')
### velocities
if add_velocity_subplot:
ax1 = fig.add_subplot(spec[1, :], sharex=ax0)
groups = list(adata.obs[groupby].cat.categories)
colors = adata.uns[f'{groupby}_colors']
x = adata.obsm['X_diffmap'][:, component]
v = adata.obsm['velocity_diffmap'][:, component]
mask0 = (x>np.percentile(x, range_percs[0])) & (x<np.percentile(x, range_percs[1]))
if velocity_mode=='single':
for i, group in enumerate(groups):
mask = (adata.obs[groupby] == group) & mask0
ax1.quiver(x[mask], np.random.uniform(1-i, -i, x.shape)[mask], v[mask], np.zeros_like(v)[mask], color=colors[i], scale=0.4, edgecolor='k', linewidth = .5)
ax1.set_ylabel(f'RNA velocity\nby {groupby}')
else:
from scipy.interpolate import interp1d
n_evals = 10
xint=np.linspace(np.percentile(x, range_percs[0]), np.percentile(x, range_percs[1]), n_evals)
for i, group in enumerate(groups):
mask = (adata.obs[groupby] == group) & mask0
f = interp1d(x[mask], v[mask])
x_int = xint[(xint >= np.min(x[mask])) & (xint <= np.max(x[mask]))]
v_int = f(x_int)
# Normalize
v_absmax = np.max(np.abs(v_int))
x_segment = (x_int[1] - x_int[0]) / (n_evals/5)
v_int = v_int * x_segment / v_absmax
ax1.quiver(x_int, i * np.ones_like(x_int), v_int, np.zeros_like(v_int),
headwidth=4, color=colors[i], edgecolor='k', linewidth = .5, angles='xy', scale_units='xy', scale=1)
ax1.set_ylim(-1, len(groups))
ax1.set_ylabel(f'Average RNA velocity\nby {groupby}')
ax1.set_yticks([])
# pl.suptitle('Neutrophil cell density on diffusion pseudotime')
if show: pl.show()
def identify_barcode_overlap(df1, df2, key1, key2, reg1='[ACGT]+-', reg2='[ACGT]+-', kick=-1, plot=True):
# clear index
x1 = np.array([re.findall(reg1, txt)[0][:kick] for txt in df1.index])
x2 = np.array([re.findall(reg2, txt)[0][:kick] for txt in df2.index])
# count co-occurences of barcodes by key categories
c1 = | pd.unique(df1[key1]) | pandas.unique |
import numpy
import pandas as pd
import math as m
#Moving Average
def MA(df, n):
MA = pd.Series( df['Close'].rolling(window = n,center=False).mean(), name = 'MA_' + str(n), index=df.index )
# df = df.join(MA)
return MA
#Exponential Moving Average
def EMA(df, n):
EMA = pd.Series(pd.ewma(df['Close'], span = n, min_periods = n - 1), name = 'EMA_' + str(n))
df = df.join(EMA)
return df
#Momentum
def MOM(df, n):
M = pd.Series(df['Close'].diff(n), name = 'Momentum_' + str(n))
df = df.join(M)
return df
#Rate of Change
def ROC(df, n):
M = df['Close'].diff(n - 1)
N = df['Close'].shift(n - 1)
ROC = pd.Series(M / N, name = 'ROC_' + str(n))
df = df.join(ROC)
return df
#Average True Range
def ATR_2(df, n):
def TR(args):
print(args)
return 0
i = 0
TR_l = [0]
# while i < df.index[-1]:
df.rolling(2,).apply(TR)
for i in range(0, df.shape[0]-1):
# TR = max(df.get_value(i + 1, 'High'), df.get_value(i, 'Close')) - min(df.get_value(i + 1, 'Low'), df.get_value(i, 'Close'))
TR = max( df.ix[i + 1, 'High'], df.ix[i, 'Close']) - min(df.ix[i + 1, 'Low'], df.ix[i, 'Close'])
TR_l.append(TR)
i = i + 1
TR_s = pd.Series(TR_l, index=df.index)
ATR = pd.Series(TR_s.rolling(window= n, center=False).mean(), name = 'ATR_' + str(n))
return ATR
#Average True Range
def ATR(df, n):
def TR(args):
print(args)
return 0
atr3 = pd.DataFrame( {'a' : abs( df['High'] - df['Low'] ), 'b' : abs( df['High'] - df['Close'].shift() ), 'c' : abs(df['Low']-df['Close'].shift() ) } )
return atr3.max(axis=1).rolling(window=n).mean()
#Bollinger Bands
def BBANDS(df, n):
MA = pd.Series(pd.rolling_mean(df['Close'], n))
MSD = pd.Series(pd.rolling_std(df['Close'], n))
b1 = 4 * MSD / MA
B1 = pd.Series(b1, name = 'BollingerB_' + str(n))
df = df.join(B1)
b2 = (df['Close'] - MA + 2 * MSD) / (4 * MSD)
B2 = pd.Series(b2, name = 'Bollinger%b_' + str(n))
df = df.join(B2)
return df
#Pivot Points, Supports and Resistances
def PPSR(df):
PP = pd.Series((df['High'] + df['Low'] + df['Close']) / 3)
R1 = pd.Series(2 * PP - df['Low'])
S1 = pd.Series(2 * PP - df['High'])
R2 = pd.Series(PP + df['High'] - df['Low'])
S2 = pd.Series(PP - df['High'] + df['Low'])
R3 = pd.Series(df['High'] + 2 * (PP - df['Low']))
S3 = pd.Series(df['Low'] - 2 * (df['High'] - PP))
psr = {'PP':PP, 'R1':R1, 'S1':S1, 'R2':R2, 'S2':S2, 'R3':R3, 'S3':S3}
PSR = pd.DataFrame(psr)
df = df.join(PSR)
return df
#Stochastic oscillator %K
def STOK(df):
SOk = pd.Series((df['Close'] - df['Low']) / (df['High'] - df['Low']), name = 'SO%k')
df = df.join(SOk)
return df
#Stochastic oscillator %D
def STO(df, n):
SOk = pd.Series((df['Close'] - df['Low']) / (df['High'] - df['Low']), name = 'SO%k')
SOd = pd.Series(pd.ewma(SOk, span = n, min_periods = n - 1), name = 'SO%d_' + str(n))
df = df.join(SOd)
return df
#Trix
def TRIX(df, n):
EX1 = pd.ewma(df['Close'], span = n, min_periods = n - 1)
EX2 = pd.ewma(EX1, span = n, min_periods = n - 1)
EX3 = pd.ewma(EX2, span = n, min_periods = n - 1)
i = 0
ROC_l = [0]
while i + 1 <= df.index[-1]:
ROC = (EX3[i + 1] - EX3[i]) / EX3[i]
ROC_l.append(ROC)
i = i + 1
Trix = pd.Series(ROC_l, name = 'Trix_' + str(n))
df = df.join(Trix)
return df
#Average Directional Movement Index
def ADX(df, n, n_ADX):
i = 0
UpI = []
DoI = []
while i + 1 <= df.index[-1]:
UpMove = df.get_value(i + 1, 'High') - df.get_value(i, 'High')
DoMove = df.get_value(i, 'Low') - df.get_value(i + 1, 'Low')
if UpMove > DoMove and UpMove > 0:
UpD = UpMove
else: UpD = 0
UpI.append(UpD)
if DoMove > UpMove and DoMove > 0:
DoD = DoMove
else: DoD = 0
DoI.append(DoD)
i = i + 1
i = 0
TR_l = [0]
while i < df.index[-1]:
TR = max(df.get_value(i + 1, 'High'), df.get_value(i, 'Close')) - min(df.get_value(i + 1, 'Low'), df.get_value(i, 'Close'))
TR_l.append(TR)
i = i + 1
TR_s = pd.Series(TR_l)
ATR = pd.Series(pd.ewma(TR_s, span = n, min_periods = n))
UpI = pd.Series(UpI)
DoI = pd.Series(DoI)
PosDI = pd.Series(pd.ewma(UpI, span = n, min_periods = n - 1) / ATR)
NegDI = pd.Series(pd.ewma(DoI, span = n, min_periods = n - 1) / ATR)
ADX = pd.Series(pd.ewma(abs(PosDI - NegDI) / (PosDI + NegDI), span = n_ADX, min_periods = n_ADX - 1), name = 'ADX_' + str(n) + '_' + str(n_ADX))
df = df.join(ADX)
return df
#MACD, MACD Signal and MACD difference
def MACD(df, n_fast, n_slow):
EMAfast = pd.Series(pd.ewma(df['Close'], span = n_fast, min_periods = n_slow - 1))
EMAslow = pd.Series(pd.ewma(df['Close'], span = n_slow, min_periods = n_slow - 1))
MACD = pd.Series(EMAfast - EMAslow, name = 'MACD_' + str(n_fast) + '_' + str(n_slow))
MACDsign = pd.Series(pd.ewma(MACD, span = 9, min_periods = 8), name = 'MACDsign_' + str(n_fast) + '_' + str(n_slow))
MACDdiff = pd.Series(MACD - MACDsign, name = 'MACDdiff_' + str(n_fast) + '_' + str(n_slow))
df = df.join(MACD)
df = df.join(MACDsign)
df = df.join(MACDdiff)
return df
#Mass Index
def MassI(df):
Range = df['High'] - df['Low']
EX1 = pd.ewma(Range, span = 9, min_periods = 8)
EX2 = pd.ewma(EX1, span = 9, min_periods = 8)
Mass = EX1 / EX2
MassI = pd.Series(pd.rolling_sum(Mass, 25), name = 'Mass Index')
df = df.join(MassI)
return df
#Vortex Indicator: http://www.vortexindicator.com/VFX_VORTEX.PDF
def Vortex(df, n):
i = 0
TR = [0]
while i < df.index[-1]:
Range = max(df.get_value(i + 1, 'High'), df.get_value(i, 'Close')) - min(df.get_value(i + 1, 'Low'), df.get_value(i, 'Close'))
TR.append(Range)
i = i + 1
i = 0
VM = [0]
while i < df.index[-1]:
Range = abs(df.get_value(i + 1, 'High') - df.get_value(i, 'Low')) - abs(df.get_value(i + 1, 'Low') - df.get_value(i, 'High'))
VM.append(Range)
i = i + 1
VI = pd.Series(pd.rolling_sum(pd.Series(VM), n) / pd.rolling_sum(pd.Series(TR), n), name = 'Vortex_' + str(n))
df = df.join(VI)
return df
#KST Oscillator
def KST(df, r1, r2, r3, r4, n1, n2, n3, n4):
M = df['Close'].diff(r1 - 1)
N = df['Close'].shift(r1 - 1)
ROC1 = M / N
M = df['Close'].diff(r2 - 1)
N = df['Close'].shift(r2 - 1)
ROC2 = M / N
M = df['Close'].diff(r3 - 1)
N = df['Close'].shift(r3 - 1)
ROC3 = M / N
M = df['Close'].diff(r4 - 1)
N = df['Close'].shift(r4 - 1)
ROC4 = M / N
KST = pd.Series(pd.rolling_sum(ROC1, n1) + pd.rolling_sum(ROC2, n2) * 2 + pd.rolling_sum(ROC3, n3) * 3 + pd.rolling_sum(ROC4, n4) * 4, name = 'KST_' + str(r1) + '_' + str(r2) + '_' + str(r3) + '_' + str(r4) + '_' + str(n1) + '_' + str(n2) + '_' + str(n3) + '_' + str(n4))
df = df.join(KST)
return df
#Relative Strength Index
def RSI(df, n):
i = 0
UpI = [0]
DoI = [0]
while i + 1 <= df.index[-1]:
UpMove = df.get_value(i + 1, 'High') - df.get_value(i, 'High')
DoMove = df.get_value(i, 'Low') - df.get_value(i + 1, 'Low')
if UpMove > DoMove and UpMove > 0:
UpD = UpMove
else: UpD = 0
UpI.append(UpD)
if DoMove > UpMove and DoMove > 0:
DoD = DoMove
else: DoD = 0
DoI.append(DoD)
i = i + 1
UpI = pd.Series(UpI)
DoI = pd.Series(DoI)
PosDI = pd.Series(pd.ewma(UpI, span = n, min_periods = n - 1))
NegDI = pd.Series(pd.ewma(DoI, span = n, min_periods = n - 1))
RSI = pd.Series(PosDI / (PosDI + NegDI), name = 'RSI_' + str(n))
df = df.join(RSI)
return df
#True Strength Index
def TSI(df, r, s):
M = pd.Series(df['Close'].diff(1))
aM = abs(M)
EMA1 = pd.Series(pd.ewma(M, span = r, min_periods = r - 1))
aEMA1 = pd.Series(pd.ewma(aM, span = r, min_periods = r - 1))
EMA2 = pd.Series(pd.ewma(EMA1, span = s, min_periods = s - 1))
aEMA2 = pd.Series(pd.ewma(aEMA1, span = s, min_periods = s - 1))
TSI = pd.Series(EMA2 / aEMA2, name = 'TSI_' + str(r) + '_' + str(s))
df = df.join(TSI)
return df
#Accumulation/Distribution
def ACCDIST(df, n):
ad = (2 * df['Close'] - df['High'] - df['Low']) / (df['High'] - df['Low']) * df['Volume']
M = ad.diff(n - 1)
N = ad.shift(n - 1)
ROC = M / N
AD = pd.Series(ROC, name = 'Acc/Dist_ROC_' + str(n))
df = df.join(AD)
return df
#Chaikin Oscillator
def Chaikin(df):
ad = (2 * df['Close'] - df['High'] - df['Low']) / (df['High'] - df['Low']) * df['Volume']
Chaikin = pd.Series(pd.ewma(ad, span = 3, min_periods = 2) - pd.ewma(ad, span = 10, min_periods = 9), name = 'Chaikin')
df = df.join(Chaikin)
return df
#Money Flow Index and Ratio
def MFI(df, n):
PP = (df['High'] + df['Low'] + df['Close']) / 3
i = 0
PosMF = [0]
while i < df.index[-1]:
if PP[i + 1] > PP[i]:
PosMF.append(PP[i + 1] * df.get_value(i + 1, 'Volume'))
else:
PosMF.append(0)
i = i + 1
PosMF = pd.Series(PosMF)
TotMF = PP * df['Volume']
MFR = pd.Series(PosMF / TotMF)
MFI = pd.Series(pd.rolling_mean(MFR, n), name = 'MFI_' + str(n))
df = df.join(MFI)
return df
#On-balance Volume
def OBV(df, n):
i = 0
OBV = [0]
while i < df.index[-1]:
if df.get_value(i + 1, 'Close') - df.get_value(i, 'Close') > 0:
OBV.append(df.get_value(i + 1, 'Volume'))
if df.get_value(i + 1, 'Close') - df.get_value(i, 'Close') == 0:
OBV.append(0)
if df.get_value(i + 1, 'Close') - df.get_value(i, 'Close') < 0:
OBV.append(-df.get_value(i + 1, 'Volume'))
i = i + 1
OBV = pd.Series(OBV)
OBV_ma = pd.Series(pd.rolling_mean(OBV, n), name = 'OBV_' + str(n))
df = df.join(OBV_ma)
return df
#Force Index
def FORCE(df, n):
F = pd.Series(df['Close'].diff(n) * df['Volume'].diff(n), name = 'Force_' + str(n))
df = df.join(F)
return df
#Ease of Movement
def EOM(df, n):
EoM = (df['High'].diff(1) + df['Low'].diff(1)) * (df['High'] - df['Low']) / (2 * df['Volume'])
Eom_ma = pd.Series(pd.rolling_mean(EoM, n), name = 'EoM_' + str(n))
df = df.join(Eom_ma)
return df
#Commodity Channel Index
def CCI(df, n):
PP = (df['High'] + df['Low'] + df['Close']) / 3
CCI = pd.Series((PP - pd.rolling_mean(PP, n)) / pd.rolling_std(PP, n), name = 'CCI_' + str(n))
df = df.join(CCI)
return df
#Coppock Curve
def COPP(df, n):
M = df['Close'].diff(int(n * 11 / 10) - 1)
N = df['Close'].shift(int(n * 11 / 10) - 1)
ROC1 = M / N
M = df['Close'].diff(int(n * 14 / 10) - 1)
N = df['Close'].shift(int(n * 14 / 10) - 1)
ROC2 = M / N
Copp = pd.Series(pd.ewma(ROC1 + ROC2, span = n, min_periods = n), name = 'Copp_' + str(n))
df = df.join(Copp)
return df
#Keltner Channel
def KELCH(df, n):
KelChM = pd.Series(pd.rolling_mean((df['High'] + df['Low'] + df['Close']) / 3, n), name = 'KelChM_' + str(n))
KelChU = pd.Series( | pd.rolling_mean((4 * df['High'] - 2 * df['Low'] + df['Close']) / 3, n) | pandas.rolling_mean |
from datetime import date
import unittest
import dolphindb as ddb
import pandas as pd
import numpy as np
from pandas.testing import assert_frame_equal
from setup import HOST, PORT, WORK_DIR, DATA_DIR
from numpy.testing import assert_array_equal, assert_array_almost_equal
import dolphindb.settings as keys
import statsmodels.api as sm
def createdata():
s = ddb.session()
s.connect(HOST, PORT, "admin", "123456")
stript = '''
time1=10:01:01 join 10:01:03 join 10:01:05 join 10:01:05
symbol1=take(`X`Z,4)
price1=3 3.3 3.2 3.1
size1=100 200 50 10
Trade=table(time1 as time,symbol1 as symbol,price1 as price,size1 as size)
time2=10:01:01 join 10:01:02 join 10:01:02 join 10:01:03
symbol2=take(`X`Z,4)
ask=90 150 100 52
bid=70 200 200 68
Quote=table(time2 as time,symbol2 as symbol,ask as ask,bid as bid)
share Trade as shareTrade
share Quote as shareQuote
login("admin", "123456")
if(existsDatabase("dfs://testmergepart"))
dropDatabase("dfs://testmergepart")
db = database("dfs://testmergepart", VALUE, "X" "Z")
pt1 = db.createPartitionedTable(Trade,`pt1,`symbol).append!(Trade)
pt2 = db.createPartitionedTable(Quote,`pt2,`symbol).append!(Quote)
'''
s.run(stript)
s.close()
class TestTable(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.s = ddb.session()
cls.s.connect(HOST, PORT, "admin", "123456")
createdata()
cls.pd_left = pd.DataFrame({'time': pd.to_datetime(
['1970-01-01T10:01:01', '1970-01-01T10:01:03', '1970-01-01T10:01:05', '1970-01-01T10:01:05']),
'symbol': ["X", "Z", "X", "Z"],
'price': [3, 3.3, 3.2, 3.1],
'size': [100, 200, 50, 10]})
cls.pdf_right = pd.DataFrame({'time': pd.to_datetime(
['1970-01-01T10:01:01', '1970-01-01T10:01:02', '1970-01-01T10:01:02', '1970-01-01T10:01:03']),
'symbol': ["X", "Z", "X", "Z"],
'ask': [90, 150, 100, 52],
'bid': [70, 200, 200, 68]})
@classmethod
def tearDownClass(cls):
script='''
undef((exec name from objs(true) where shared=1),SHARED)
if(existsDatabase('dfs://testmergepart')){
dropDatabase('dfs://testmergepart')
}
'''
cls.s.run(script)
def test_create_table_by_python_dictionary(self):
data = {'state': ['Ohio', 'Ohio', 'Ohio', 'Nevada', 'Nevada'],
'year': [2000, 2001, 2002, 2001, 2002],
'pop': [1.5, 1.7, 3.6, 2.4, 2.9]}
tmp = self.s.table(data=data, tableAliasName="tmp")
re = self.s.run("tmp")
df = pd.DataFrame(data)
assert_frame_equal(tmp.toDF(), df)
assert_frame_equal(re, df)
def test_create_table_by_pandas_dataframe(self):
data = {'state': ['Ohio', 'Ohio', 'Ohio', 'Nevada', 'Nevada'],
'year': [2000, 2001, 2002, 2001, 2002],
'pop': [1.5, 1.7, 3.6, 2.4, 2.9]}
df = pd.DataFrame(data)
tmp = self.s.table(data=df, tableAliasName="tmp")
re = self.s.run("tmp")
assert_frame_equal(tmp.toDF(), df)
assert_frame_equal(re, df)
def test_table_toDF(self):
tmp = self.s.loadText(DATA_DIR + "/USPrices_FIRST.csv")
df = self.s.run("select * from loadText('{data}')".format(data=DATA_DIR + "/USPrices_FIRST.csv"))
self.assertEqual(len(tmp.toDF()), len(df))
assert_frame_equal(tmp.toDF(), df)
tbName = tmp.tableName()
self.s.run("undef", tbName)
def test_table_showSQL(self):
tmp = self.s.loadText(DATA_DIR + "/USPrices_FIRST.csv")
sql = tmp.showSQL()
tbName = tmp.tableName()
self.assertEqual(sql, 'select PERMNO,date,SHRCD,TICKER,TRDSTAT,HEXCD,CUSIP,DLSTCD,DLPRC,'
'DLRET,BIDLO,ASKHI,PRC,VOL,RET,BID,ASK,SHROUT,CFACPR,CFACSHR,OPENPRC '
'from {tbName}'.format(tbName=tbName))
self.s.run("undef", tbName)
def test_table_sql_select_where(self):
data = DATA_DIR + "/USPrices_FIRST.csv"
tmp = self.s.loadText(data)
re = tmp.select(['PERMNO', 'date']).where(tmp.date > '2010.01.01')
df = self.s.run("select PERMNO,date from loadText('{data}') where date>2010.01.01".format(data=data))
self.assertEqual(re.rows, 1510)
assert_frame_equal(re.toDF(), df)
re = tmp.select(['PERMNO', 'date']).where(tmp.date > '2010.01.01').sort(['date desc'])
df = self.s.run(
"select PERMNO,date from loadText('{data}') where date>2010.01.01 order by date desc".format(data=data))
self.assertEqual(re.rows, 1510)
assert_frame_equal(re.toDF(), df)
re = tmp[tmp.date > '2010.01.01']
df = self.s.run("select * from loadText('{data}') where date>2010.01.01".format(data=data))
self.assertEqual(re.rows, 1510)
assert_frame_equal(re.toDF(), df)
tbName = tmp.tableName()
self.s.run("undef", tbName)
def test_table_sql_groupby(self):
data = DATA_DIR + "/USPrices_FIRST.csv"
tmp = self.s.loadText(data)
origin = tmp.toDF()
re = tmp.groupby('PERMNO').agg({'bid': ['sum']}).toDF()
df = self.s.run("select sum(bid) from loadText('{data}') group by PERMNO".format(data=data))
self.assertEqual((re['PERMNO'] == 10001).all(), True)
self.assertAlmostEqual(re['sum_bid'][0], 59684.9775)
assert_frame_equal(re, df)
re = tmp.groupby(['PERMNO', 'date']).agg({'bid': ['sum']}).toDF()
df = self.s.run("select sum(bid) from loadText('{data}') group by PERMNO,date".format(data=data))
self.assertEqual(re.shape[1], 3)
self.assertEqual(len(re), 6047)
self.assertEqual((origin['BID'] == re['sum_bid']).all(), True)
assert_frame_equal(re, df)
re = tmp.groupby(['PERMNO', 'date']).agg({'bid': ['sum'], 'ask': ['sum']}).toDF()
df = self.s.run("select sum(bid),sum(ask) from loadText('{data}') group by PERMNO,date".format(data=data))
self.assertEqual(re.shape[1], 4)
self.assertEqual(len(re), 6047)
self.assertEqual((origin['BID'] == re['sum_bid']).all(), True)
self.assertEqual((origin['ASK'] == re['sum_ask']).all(), True)
assert_frame_equal(re, df)
re = tmp.groupby(['PERMNO']).agg2([ddb.wsum, ddb.wavg], [('bid', 'ask')]).toDF()
df = self.s.run("select wsum(bid,ask),wavg(bid,ask) from loadText('{data}') group by PERMNO".format(data=data))
| assert_frame_equal(re, df) | pandas.testing.assert_frame_equal |
from datetime import (
datetime,
timedelta,
)
from importlib import reload
import string
import sys
import numpy as np
import pytest
from pandas._libs.tslibs import iNaT
import pandas.util._test_decorators as td
from pandas import (
NA,
Categorical,
CategoricalDtype,
Index,
Interval,
NaT,
Series,
Timedelta,
Timestamp,
cut,
date_range,
)
import pandas._testing as tm
class TestAstypeAPI:
def test_arg_for_errors_in_astype(self):
# see GH#14878
ser = Series([1, 2, 3])
msg = (
r"Expected value of kwarg 'errors' to be one of \['raise', "
r"'ignore'\]\. Supplied value is 'False'"
)
with pytest.raises(ValueError, match=msg):
ser.astype(np.float64, errors=False)
ser.astype(np.int8, errors="raise")
@pytest.mark.parametrize("dtype_class", [dict, Series])
def test_astype_dict_like(self, dtype_class):
# see GH#7271
ser = Series(range(0, 10, 2), name="abc")
dt1 = dtype_class({"abc": str})
result = ser.astype(dt1)
expected = Series(["0", "2", "4", "6", "8"], name="abc")
tm.assert_series_equal(result, expected)
dt2 = dtype_class({"abc": "float64"})
result = ser.astype(dt2)
expected = Series([0.0, 2.0, 4.0, 6.0, 8.0], dtype="float64", name="abc")
tm.assert_series_equal(result, expected)
dt3 = dtype_class({"abc": str, "def": str})
msg = (
"Only the Series name can be used for the key in Series dtype "
r"mappings\."
)
with pytest.raises(KeyError, match=msg):
ser.astype(dt3)
dt4 = dtype_class({0: str})
with pytest.raises(KeyError, match=msg):
ser.astype(dt4)
# GH#16717
# if dtypes provided is empty, it should error
if dtype_class is Series:
dt5 = dtype_class({}, dtype=object)
else:
dt5 = dtype_class({})
with pytest.raises(KeyError, match=msg):
ser.astype(dt5)
class TestAstype:
@pytest.mark.parametrize("dtype", np.typecodes["All"])
def test_astype_empty_constructor_equality(self, dtype):
# see GH#15524
if dtype not in (
"S",
"V", # poor support (if any) currently
"M",
"m", # Generic timestamps raise a ValueError. Already tested.
):
init_empty = Series([], dtype=dtype)
with tm.assert_produces_warning(DeprecationWarning):
as_type_empty = Series([]).astype(dtype)
tm.assert_series_equal(init_empty, as_type_empty)
@pytest.mark.parametrize("dtype", [str, np.str_])
@pytest.mark.parametrize(
"series",
[
Series([string.digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]),
Series([string.digits * 10, tm.rands(63), tm.rands(64), np.nan, 1.0]),
],
)
def test_astype_str_map(self, dtype, series):
# see GH#4405
result = series.astype(dtype)
expected = series.map(str)
tm.assert_series_equal(result, expected)
def test_astype_float_to_period(self):
result = Series([np.nan]).astype("period[D]")
expected = Series([NaT], dtype="period[D]")
tm.assert_series_equal(result, expected)
def test_astype_no_pandas_dtype(self):
# https://github.com/pandas-dev/pandas/pull/24866
ser = Series([1, 2], dtype="int64")
# Don't have PandasDtype in the public API, so we use `.array.dtype`,
# which is a PandasDtype.
result = ser.astype(ser.array.dtype)
tm.assert_series_equal(result, ser)
@pytest.mark.parametrize("dtype", [np.datetime64, np.timedelta64])
def test_astype_generic_timestamp_no_frequency(self, dtype, request):
# see GH#15524, GH#15987
data = [1]
s = Series(data)
if np.dtype(dtype).name not in ["timedelta64", "datetime64"]:
mark = pytest.mark.xfail(reason="GH#33890 Is assigned ns unit")
request.node.add_marker(mark)
msg = (
fr"The '{dtype.__name__}' dtype has no unit\. "
fr"Please pass in '{dtype.__name__}\[ns\]' instead."
)
with pytest.raises(ValueError, match=msg):
s.astype(dtype)
def test_astype_dt64_to_str(self):
# GH#10442 : testing astype(str) is correct for Series/DatetimeIndex
dti = date_range("2012-01-01", periods=3)
result = Series(dti).astype(str)
expected = Series(["2012-01-01", "2012-01-02", "2012-01-03"], dtype=object)
tm.assert_series_equal(result, expected)
def test_astype_dt64tz_to_str(self):
# GH#10442 : testing astype(str) is correct for Series/DatetimeIndex
dti_tz = date_range("2012-01-01", periods=3, tz="US/Eastern")
result = Series(dti_tz).astype(str)
expected = Series(
[
"2012-01-01 00:00:00-05:00",
"2012-01-02 00:00:00-05:00",
"2012-01-03 00:00:00-05:00",
],
dtype=object,
)
tm.assert_series_equal(result, expected)
def test_astype_datetime(self):
s = Series(iNaT, dtype="M8[ns]", index=range(5))
s = s.astype("O")
assert s.dtype == np.object_
s = Series([datetime(2001, 1, 2, 0, 0)])
s = s.astype("O")
assert s.dtype == np.object_
s = Series([datetime(2001, 1, 2, 0, 0) for i in range(3)])
s[1] = np.nan
assert s.dtype == "M8[ns]"
s = s.astype("O")
assert s.dtype == np.object_
def test_astype_datetime64tz(self):
s = Series(date_range("20130101", periods=3, tz="US/Eastern"))
# astype
result = s.astype(object)
expected = Series(s.astype(object), dtype=object)
tm.assert_series_equal(result, expected)
result = Series(s.values).dt.tz_localize("UTC").dt.tz_convert(s.dt.tz)
tm.assert_series_equal(result, s)
# astype - object, preserves on construction
result = Series(s.astype(object))
expected = s.astype(object)
tm.assert_series_equal(result, expected)
# astype - datetime64[ns, tz]
with tm.assert_produces_warning(FutureWarning):
# dt64->dt64tz astype deprecated
result = Series(s.values).astype("datetime64[ns, US/Eastern]")
tm.assert_series_equal(result, s)
with tm.assert_produces_warning(FutureWarning):
# dt64->dt64tz astype deprecated
result = Series(s.values).astype(s.dtype)
tm.assert_series_equal(result, s)
result = s.astype("datetime64[ns, CET]")
expected = Series(date_range("20130101 06:00:00", periods=3, tz="CET"))
tm.assert_series_equal(result, expected)
def test_astype_str_cast_dt64(self):
# see GH#9757
ts = Series([Timestamp("2010-01-04 00:00:00")])
s = ts.astype(str)
expected = Series(["2010-01-04"])
tm.assert_series_equal(s, expected)
ts = Series([Timestamp("2010-01-04 00:00:00", tz="US/Eastern")])
s = ts.astype(str)
expected = Series(["2010-01-04 00:00:00-05:00"])
tm.assert_series_equal(s, expected)
def test_astype_str_cast_td64(self):
# see GH#9757
td = Series([Timedelta(1, unit="d")])
ser = td.astype(str)
expected = Series(["1 days"])
tm.assert_series_equal(ser, expected)
def test_dt64_series_astype_object(self):
dt64ser = Series(date_range("20130101", periods=3))
result = dt64ser.astype(object)
assert isinstance(result.iloc[0], datetime)
assert result.dtype == np.object_
def test_td64_series_astype_object(self):
tdser = Series(["59 Days", "59 Days", "NaT"], dtype="timedelta64[ns]")
result = tdser.astype(object)
assert isinstance(result.iloc[0], timedelta)
assert result.dtype == np.object_
@pytest.mark.parametrize(
"data, dtype",
[
(["x", "y", "z"], "string"),
pytest.param(
["x", "y", "z"],
"arrow_string",
marks= | td.skip_if_no("pyarrow", min_version="1.0.0") | pandas.util._test_decorators.skip_if_no |
import numpy as np
import pandas as pd
import time
import matplotlib.pyplot as plt
import seaborn as sns
from auxiliary.callable_algorithms import (
our_simple_nelder_mead_method,
our_simple_newton_based_optimization,
global_nelder_mead_method,
global_newton_based_optimization,
global_optimization_BOBYQA,
)
from auxiliary.functions import rastrigin, griewank, rosenbrock, levi_no_13
# In this file we implement the benchmark routine that we use to benchmark the nelder-mead-method
# we implemented and compare its performace to algorithms from the established libary NLOPT
# This file includes to benchmarks
# the benchmark 'benchmark_simple_optimization is used for algorithms that start from a signle point
# the benchmark 'benchmark_smart_optimization' is used for algorithms that get a domain and find their own starting points
# the function with the prefix 'run_' simply execute the corresponding benchmark for multiple computational budgets and safe that data in a dataframe
# This function creates starting points fot the 'benchmark_simple_optimization'
def get_starting_points(n, domain_center, domain_radius, dimension):
"""Return a dataframe which contains the results of the optimization algorithm applied to the
test-funcion f with n different starting point.
Args:
n: the number of points which we want to the function to return
domain_center: the center of the domain in which the starting points should lie
domain_radius: the radius of the domain
dimension: the dimension of the space from which we want the starting point
Returns:
out: returns and array of n starting points within the domain_radius around the domain_center
"""
# get random numbers
A = np.random.rand(n, dimension) * domain_radius
# ensure they are in the domain
A = [domain_center + x for x in A]
return A
# this function computes the measures of interests after the benchmarks are run
def average_time_success(df):
"""Return a dataframe which contains the results of the optimization algorithm applied to the
test-funcion f with n different starting point.
Args:
df: non-empty dataframe with the collumns: algorithm, test_function,computational_budget, smaple_size,success, time function_evaluations
Returns:
out: datafram with one entry and the same collumns
"""
# create new dataframe
df_new = pd.DataFrame([])
# safe the name of the algorithm
df_new["algorithm"] = pd.Series([df.iloc[0]["algorithm"]])
# safe the name of the test-function
df_new["test_function"] = pd.Series([df.iloc[0]["test_function"]])
# safe the computational_budget used
df_new["computational_budget"] = pd.Series([df.iloc[0]["computational_budget"]])
# safe the sample size
df_new["sample_size"] = pd.Series([df.iloc[0]["sample_size"]])
# compute and safe the success rate
df_new["success_rate"] = pd.Series([df["success"].mean()])
# compute and safe the average time per optimization
df_new["average_time"] = pd.Series([df["time"].mean()])
# compute and safe the average number of function evaluations taken
df_new["average_function_evaluations"] = pd.Series(
[df["function_evaluations"].mean()]
)
return df_new
# This function performs the benchmark for algorithms that take the domain as input and find their own starting points
def benchmark_smart_optimization(
f,
algorithm,
computational_budget,
domain_center,
domain_radius,
number_of_optimizations,
dimension,
optimum,
algo_name="unknown",
t_func_name="unknown",
x_tolerance=1e-6,
y_tolerance=1e-6,
sample_size=50,
number_of_candidates=5,
):
"""Return a dataframe which contains the results of the optimization algorithm applied number_of_optimizations many times to the
test-funcion f.
Args:
f: a function from \R^n to \R whose optimum we want to find
algorithm: the optimization algorithm we use for the optimization
computation_budget: the number of maximal function evaluations
domain_center: a point in \R^n
domain_radius: a positive real number
number_of_optimizations: a natural number
dimension: the dimension of the problem
optimum: the correct optimum of f
algo_name: the name of the algorithm as a string
t_func_name: the name of the test function
x_tolerance: a positive real number
y_tolerance: a positive real number
sample_size: The optimization-intern number of points that get considered as starting points for the local search
number_of_candidates: The optimization-intern number of starting points from which we start the local search
Returns:
out: A dataframe that contains the results and the number of function evaluation for
each attemp to optimize f
"""
# create dataframe
df = pd.DataFrame([], columns=["computed_result", "function_evaluations", "time"])
# run the optimizations
for i in range(number_of_optimizations):
# start timer to take the time
start = time.time()
a = algorithm(
f,
computational_budget,
domain_center,
domain_radius,
dimension,
sample_size,
number_of_candidates,
x_tolerance,
y_tolerance,
)
# end timer
end = time.time()
a = list(a)
# compute time taken and add as new collumn
a = pd.Series(
a + [end - start],
index=df.columns,
)
df = df.append(a, ignore_index=True)
# compute further measures we might want to analyse later
df["correct_result"] = pd.Series([optimum] * number_of_optimizations)
df["algorithm"] = pd.Series([algo_name] * number_of_optimizations)
df["test_function"] = pd.Series([t_func_name] * number_of_optimizations)
df["computational_budget"] = pd.Series(
[computational_budget] * number_of_optimizations
)
df["sample_size"] = pd.Series([number_of_optimizations] * number_of_optimizations)
df["success"] = df.apply(
lambda row: np.allclose(row.correct_result, row.computed_result), axis=1
)
df["success"] = df.apply(lambda row: row.success * 1, axis=1)
return df
pass
# benchmark an algorithm that starts from a single point
def benchmark_simple_optimization(
f,
optimum,
algo,
computational_budget,
n,
domain_center,
domain_radius,
dimension,
algo_name="unknown",
t_func_name="unknown",
x_tolerance=1e-6,
y_tolerance=1e-6,
):
"""Return a dataframe which contains the results of the optimization algorithm applied to the
test-funcion f with n different starting point.
Args:
f: a function from \R^n to \R whose optimum we want to find
optimum: The correct optimum of f
algo: The optimization algorithm we use for the optimization
computation_budget: the number of maximal function evaluations
x_tolerance: a positive real number
y_tolerance: a positive real number
n: The number or starting points from which we start the optimization
domain_center: The center of the domain in which we want to start the optimization
domain_radius: The radius of the domain
dimension: The dimension of the problem f
Returns:
out: A dataframe that contains the results and the number of function evaluation for
each attemp to optimize f
"""
# get starting points
starting_points = get_starting_points(n, domain_center, domain_radius, dimension)
# create dataframe
df = | pd.DataFrame([], columns=["computed_result", "function_evaluations", "time"]) | pandas.DataFrame |
from snorkel.lf_helpers import (
get_left_tokens,
get_right_tokens,
get_between_tokens,
get_tagged_text,
get_text_between,
is_inverted,
rule_regex_search_tagged_text,
rule_regex_search_btw_AB,
rule_regex_search_btw_BA,
rule_regex_search_before_A,
rule_regex_search_before_B,
)
from collections import OrderedDict
import numpy as np
import random
import re
import pathlib
import pandas as pd
import nltk
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
random.seed(100)
stop_word_list = stopwords.words('english')
"""
Debugging to understand how LFs work
"""
def LF_DEBUG(c):
"""
This label function is for debugging purposes. Feel free to ignore.
keyword arguments:
c - The candidate object to be labeled
"""
print(c)
print()
print("Left Tokens")
print(list(get_left_tokens(c[0], window=5)))
print()
print("Right Tokens")
print(list(get_right_tokens(c[0])))
print()
print("Between Tokens")
print(list(get_between_tokens(c)))
print()
print("Tagged Text")
print(get_tagged_text(c))
print(re.search(r'{{B}} .* is a .* {{A}}', get_tagged_text(c)))
print()
print("Get between Text")
print(get_text_between(c))
print(len(get_text_between(c)))
print()
print("Parent Text")
print(c.get_parent())
print()
return 0
# Helper function for label functions
def ltp(tokens):
return '(' + '|'.join(tokens) + ')'
"""
DISTANT SUPERVISION
"""
path = pathlib.Path(__file__).joinpath('../../../datafile/results/compound_treats_disease.tsv.xz').resolve()
pair_df = pd.read_table(path, dtype={"sources": str})
knowledge_base = set()
for row in pair_df.itertuples():
if not row.sources or | pd.isnull(row.sources) | pandas.isnull |
import argparse
import os
from pathlib import Path
from typing import Tuple
import pandas as pd
import category_encoders as ce
import yaml
import sys
from dotenv import load_dotenv
# from src.table import sentence_feature
load_dotenv() # noqa
sys.path.append(f"{os.getenv('PROJECT_ROOT')}src/") # noqa
ROOT = Path(os.getenv('PROJECT_ROOT'))
def count_encoding(df: pd.DataFrame, columns: list = None) -> pd.DataFrame:
"""
引数カラムのカテゴリごとの数
Args:
df:
columns:
Returns:
"""
df_ = df.copy()
count_encoder = ce.CountEncoder(cols=columns)
df_result = count_encoder.fit_transform(df_[columns])
df_result = df_result.add_suffix("_COUNT")
df_ = pd.concat([df, df_result], axis=1)
return df_
def make_feat_join_minor_cate_activity_name(df):
df_ = df.copy()
df_["activity_name_cnt"] = df_.groupby("ACTIVITY_NAME")["LOAN_ID"].transform("count")
df_result = df_[["ACTIVITY_NAME", "activity_name_cnt"]].apply(
lambda x: x["ACTIVITY_NAME"] if x["activity_name_cnt"] > 500 else "Others", axis=1
)
return df_result
def make_feat_join_minor_cate_country_code(df):
df_ = df.copy()
df_["country_code_cnt"] = df_.groupby("COUNTRY_CODE")["LOAN_ID"].transform("count")
df_result = df_[["COUNTRY_CODE", "country_code_cnt"]].apply(
lambda x: x["COUNTRY_CODE"] if x["country_code_cnt"] > 500 else "Others", axis=1
)
return df_result
def make_feat_join_minor_cate_currency(df):
df_ = df.copy()
df_["currency_cnt"] = df_.groupby("CURRENCY")["LOAN_ID"].transform("count")
df_result = df_[["CURRENCY", "currency_cnt"]].apply(
lambda x: x["CURRENCY"] if x["currency_cnt"] > 500 else "Others", axis=1
)
return df_result
def make_feat_is_loan_amount_outlier(df):
df_ = df.copy()
df_result = df_["LOAN_AMOUNT"].map(lambda col: 1 if col >= 2000 else 0)
return df_result
def make_feat_concat_country_code_sector_name(df):
df_ = df.copy()
df_result = df_[["COUNTRY_CODE", "SECTOR_NAME"]].apply(lambda x: f"{x['COUNTRY_CODE']}_{x['SECTOR_NAME']}", axis=1)
return df_result
def make_loan_use_first_word_feature(df: pd.DataFrame, skip_list: list = None) -> pd.Series:
"""
df["LOAN_USE"]の先頭の単語を特徴量として返す。
skip_list中の単語は飛ばして次の単語を対象とする。
Args:
df:
skip_list:
Returns:
"""
if skip_list is None:
skip_list = ["to", "To", "a", ""]
df_ = df.copy()
def extract_loan_use_first_word_skip(loan_use_str):
loan_use_str_split = loan_use_str.split(" ")
for word in loan_use_str_split:
if word not in skip_list:
return word
return ""
return df_["LOAN_USE"].apply(extract_loan_use_first_word_skip)
def make_feat_join_minor_loan_use_first_word(df):
"""
LOAN_USE_first_wordのカテゴリで100以下のものをOthersとする。
Args:
df:
Returns:
"""
df_ = df.copy()
df_["cnt"] = df_.groupby("LOAN_USE_first_word")["LOAN_ID"].transform("count")
df_result = df_[["LOAN_USE_first_word", "cnt"]].apply(
lambda x: x["LOAN_USE_first_word"] if x["cnt"] > 100 else "Others", axis=1
)
return df_result
def make_feat_join_minor_town_name(df):
df_ = df.copy()
df_["cnt"] = df_.groupby("TOWN_NAME")["LOAN_ID"].transform("count")
df_result = df_[["TOWN_NAME", "cnt"]].apply(
lambda x: x["TOWN_NAME"] if x["cnt"] > 500 else "Others", axis=1
)
return df_result
def agg_category_group(df: pd.DataFrame, category: str, target: str, agg_list: list = None) -> pd.DataFrame:
"""
dfのcategoryカラムのグループごとにtargetカラムの統計量を取ってdf更新
Args:
df:
category:
target:
agg_list:用いる統計量のリスト
Returns:
"""
df_ = df.copy()
if agg_list is None:
agg_list = ["mean", "std", "min", "max"]
for agg_type in agg_list:
group_agg = df_[[category, target]].groupby(category).agg(agg_type).reset_index().rename(
{target: f"{category}_{agg_type}_{target}"}, axis=1
)
df_ = df_.merge(group_agg, how='left', on=[category])
return df_
def make_features(train_df: pd.DataFrame, test_df: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""
特徴量生成処理
Args:
train_df:
test_df:
Returns:
前処理後のtrain、test
"""
train_df_ = train_df.copy()
test_df_ = test_df.copy()
df_ = | pd.concat([train_df_, test_df_], axis=0) | pandas.concat |
import numpy as np
import pandas as pd
import plotly.express as px
import os
from pet.flags import save_yaml, load_yaml
import dataclasses
from joblib import load
from glob import glob
from shutil import rmtree, copytree, copyfile
from sklearn.metrics import accuracy_score, f1_score, recall_score, precision_score
from preparation.cleaner import Preprocessing_NLP
from preparation.prepare_data import Prepare
import argparse
import os
import json
from typing import Tuple
import torch
from pet.tasks import ClassProcessor, PROCESSORS, load_examples, UNLABELED_SET, TRAIN_SET, DEV_SET, TEST_SET, METRICS, DEFAULT_METRICS
from pet.utils import eq_div
from pet.wrapper import WRAPPER_TYPES, MODEL_CLASSES, SEQUENCE_CLASSIFIER_WRAPPER, WrapperConfig
import pet
import logg
from pet.flags import save_yaml
import dataclasses
logger = logg.get_logger('root')
# logging.getLogger().setLevel(verbosity_to_loglevel(2))
# create requirement.txt file : !pipreqs ./ --ignore .venv --encoding=utf8 --force
def load_pet_configs(args) -> Tuple[WrapperConfig, pet.TrainConfig, pet.EvalConfig]:
"""
Load the model, training and evaluation configs for PET from the given command line arguments.
"""
model_cfg = WrapperConfig(model_type=args.model_type, model_name_or_path=args.model_name_or_path,
wrapper_type=args.wrapper_type, task_name=args.task_name, label_list=args.label_list,
max_seq_length=args.pet_max_seq_length, verbalizer_file=args.verbalizer_file,
cache_dir=args.cache_dir, verbalizer=args.verbalizer, pattern=args.pattern)
train_cfg = pet.TrainConfig(device=args.device, per_gpu_train_batch_size=args.pet_per_gpu_train_batch_size,
per_gpu_unlabeled_batch_size=args.pet_per_gpu_unlabeled_batch_size, n_gpu=args.n_gpu,
num_train_epochs=args.pet_num_train_epochs, max_steps=args.pet_max_steps,
gradient_accumulation_steps=args.pet_gradient_accumulation_steps,
weight_decay=args.weight_decay, learning_rate=args.learning_rate,
adam_epsilon=args.adam_epsilon, warmup_steps=args.warmup_steps,
max_grad_norm=args.max_grad_norm, lm_training=args.lm_training, alpha=args.alpha)
eval_cfg = pet.EvalConfig(device=args.device, n_gpu=args.n_gpu, metrics=args.metrics,
per_gpu_eval_batch_size=args.pet_per_gpu_eval_batch_size,
decoding_strategy=args.decoding_strategy, priming=args.priming)
return model_cfg, train_cfg, eval_cfg
def load_sequence_classifier_configs(args) -> Tuple[WrapperConfig, pet.TrainConfig, pet.EvalConfig]:
"""
Load the model, training and evaluation configs for a regular sequence classifier from the given command line
arguments. This classifier can either be used as a standalone model or as the final classifier for PET/iPET.
"""
model_cfg = WrapperConfig(model_type=args.model_type, model_name_or_path=args.model_name_or_path,
wrapper_type=SEQUENCE_CLASSIFIER_WRAPPER, task_name=args.task_name,
label_list=args.label_list, max_seq_length=args.sc_max_seq_length,
verbalizer_file=args.verbalizer_file, cache_dir=args.cache_dir,
verbalizer=args.verbalizer, pattern=args.pattern)
train_cfg = pet.TrainConfig(device=args.device, per_gpu_train_batch_size=args.sc_per_gpu_train_batch_size,
per_gpu_unlabeled_batch_size=args.sc_per_gpu_unlabeled_batch_size, n_gpu=args.n_gpu,
num_train_epochs=args.sc_num_train_epochs, max_steps=args.sc_max_steps,
temperature=args.temperature,
gradient_accumulation_steps=args.sc_gradient_accumulation_steps,
weight_decay=args.weight_decay, learning_rate=args.learning_rate,
adam_epsilon=args.adam_epsilon, warmup_steps=args.warmup_steps,
max_grad_norm=args.max_grad_norm, use_logits=args.method != 'sequence_classifier')
eval_cfg = pet.EvalConfig(device=args.device, n_gpu=args.n_gpu, metrics=args.metrics,
per_gpu_eval_batch_size=args.sc_per_gpu_eval_batch_size)
return model_cfg, train_cfg, eval_cfg
def load_ipet_config(args) -> pet.IPetConfig:
"""
Load the iPET config from the given command line arguments.
"""
ipet_cfg = pet.IPetConfig(generations=args.ipet_generations, logits_percentage=args.ipet_logits_percentage,
scale_factor=args.ipet_scale_factor, n_most_likely=args.ipet_n_most_likely)
return ipet_cfg
def add_fix_params(args):
args.task_name = "Verbalizer"
args.wrapper_type = "mlm"
args.lm_training = True
args.alpha = 0.9999
args.temperature = 2
args.verbalizer_file = None
args.decoding_strategy = "default"
args.pet_gradient_accumulation_steps = 1
args.pet_max_steps = -1
args.sc_repetitions = 1
args.sc_gradient_accumulation_steps = 1
args.split_examples_evenly = False
args.cache_dir = ""
args.weight_decay = 0.01
args.adam_epsilon = 1e-8
args.max_grad_norm = 1.0
args.warmup_steps = 0
args.logging_steps = 50
args.no_cuda = False
args.overwrite_output_dir = False
args.priming = False
return args
class Pet:
"""Class for compile full pipeline of Pet task.
Pet steps:
- Preprocessing data :class:Preprocessing_NLP
- Prepare data :class:Prepare
- Compute Pet model
- Returns prediction/metric results on validation data.
- Returns prediction/metric results on test data.
"""
def __init__(self, flags_parameters):
"""
Args:
flags_parameters : Instance of Flags class object
From flags_parameters:
column_text (str) : name of the column with texts (only one column)
frac_trainset (float) : (]0;1]) fraction of dataset to use for train set
seed (int) : seed used for train/test split, cross-validation split and fold choice
outdir (str) : path of output logs
"""
self.flags_parameters = flags_parameters
self.column_text = flags_parameters.column_text
self.frac_trainset = flags_parameters.frac_trainset
self.seed = flags_parameters.seed
self.outdir = self.flags_parameters.outdir
self.flags_parameters = add_fix_params(self.flags_parameters)
logger.info("Parameters: {}".format(self.flags_parameters))
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
# Setup CUDA, GPU & distributed training
self.flags_parameters.device = "cuda" if torch.cuda.is_available() and not self.flags_parameters.no_cuda else "cpu"
self.flags_parameters.n_gpu = torch.cuda.device_count()
flags_dict = dataclasses.asdict(self.flags_parameters)
save_yaml(os.path.join(self.outdir, "flags.yaml"), flags_dict)
self.pre = None
self.info_scores = {self.flags_parameters.model_type: {}} # variable to store all information scores
def data_preprocessing(self, data=None):
""" Apply :class:Preprocessing_NLP from preprocessing_nlp.py
and :class:Prepare from prepare_data.py
Args :
data (Dataframe)
"""
# Read data
if data is None:
logger.info("\nRead data...")
data = pd.read_csv(self.flags_parameters.path_data)
# Preprocessing
logger.info("\nBegin preprocessing of {} data :".format(len(data)))
self.pre = Preprocessing_NLP(data, self.flags_parameters)
self.data = self.pre.transform(data)
if self.flags_parameters.path_data_validation == '' or self.flags_parameters.path_data_validation == 'empty' or self.flags_parameters.path_data_validation is None:
self.dataset_val = None
# Validation
# use a loaded validation dataset :
else:
self.dataset_val = pd.read_csv(self.flags_parameters.path_data_validation)
self.dataset_val = self.pre.transform(self.dataset_val)
self.prepare = Prepare(self.flags_parameters)
self.column_text, self.column_text_b, self.X_train, self.Y_train, self.X_val, self.Y_val, self.X_test, self.Y_test, self.folds = self.prepare.get_datasets(
self.data, self.dataset_val)
self.target = self.prepare.target
# unlabeled data :
self.dataset_unlabeled = pd.read_csv(self.flags_parameters.path_data_unlabeled)
self.dataset_unlabeled = self.pre.transform(self.dataset_unlabeled)
self.X_unlabeled, self.Y_unlabeled, self.column_text, self.column_text_b = self.prepare.separate_X_Y(self.dataset_unlabeled)
if self.Y_unlabeled is None:
self.Y_unlabeled = pd.DataFrame({self.Y_train.columns[0]: [self.Y_train.iloc[0, 0] for i in range(len(self.X_unlabeled))]})
def preprocess_test_data(self, data_test):
""" Apply same transformation as in the function self.data_preprocessing for data_test
Args:
data_test (str, list, dict, dataframe)
Returns:
data_test (Dataframe)
doc_spacy_data_test (List) : documents from data_test preprocessed by spacy nlp
y_test (DataFrame) (Optional)
"""
if isinstance(data_test, str):
data_test = pd.DataFrame({self.flags_parameters.column_text: [data_test]})
elif isinstance(data_test, list):
data_test = pd.DataFrame({self.flags_parameters.column_text: data_test})
elif isinstance(data_test, dict):
data_test = pd.DataFrame(data_test)
if self.pre is not None:
data_test = self.pre.transform(data_test)
data_test, y_test, self.column_text, self.column_text_b = self.prepare.separate_X_Y(data_test)
else:
self.pre = Preprocessing_NLP(data_test, self.flags_parameters)
data_test = self.pre.transform(data_test)
self.prepare = Prepare(self.flags_parameters)
data_test, y_test, self.column_text, self.column_text_b = self.prepare.separate_X_Y(data_test)
self.target = self.prepare.target
if y_test is None:
return data_test
else:
return data_test, y_test
def prepare_model(self, x=None, y=None, x_val=None, y_val=None, type_model="classifier"):
""" Instantiate self.x_train, self.y_train, self.x_val, self.y_val and models
Args:
x (Dataframe) (Optional)
y (Dataframe) (Optional)
x_val (Dataframe) (Optional)
y_val (Dataframe) (Optional)
type_model (str) 'embedding', 'clustering' or 'classifier'
"""
# if x and y are None use self.X_train and self.Y_train else use x and y :
if x is not None:
self.x_train = x
else:
self.x_train = self.X_train
if y is not None:
self.y_train = y
else:
self.y_train = self.Y_train
if x_val is not None:
self.x_val = x_val
else:
if self.X_val is not None:
self.x_val = self.X_val
else:
self.x_val = None
if y_val is not None:
self.y_val = y_val
else:
if self.Y_val is not None:
self.y_val = self.Y_val
else:
self.y_val = None
if self.y_train is not None:
assert isinstance(self.y_train, pd.DataFrame), "y/self.y_train must be a DataFrame type"
def train_single_model(self, data_dir, train_file_name, dev_file_name, test_file_name, unlabeled_file_name,
text_x_column=0, text_x_column_b=None, text_y_column=1):
output_dir_train = os.path.join(self.outdir, "train_single_model")
if os.path.exists(output_dir_train) and os.listdir(output_dir_train) \
and self.flags_parameters.do_train and not self.flags_parameters.overwrite_output_dir:
raise ValueError("Output directory ({}) already exists and is not empty.".format(output_dir_train))
os.makedirs(output_dir_train)
if text_x_column_b is None:
TEXT_B_COLUMN = -1
else:
TEXT_B_COLUMN = text_x_column_b
processor = ClassProcessor(TRAIN_FILE_NAME=train_file_name, DEV_FILE_NAME=dev_file_name,
TEST_FILE_NAME=test_file_name, UNLABELED_FILE_NAME=unlabeled_file_name,
LABELS=self.flags_parameters.labels, TEXT_A_COLUMN=text_x_column,
TEXT_B_COLUMN=TEXT_B_COLUMN, LABEL_COLUMN=text_y_column)
self.flags_parameters.label_list = processor.get_labels()
with open(os.path.join(output_dir_train, "label_list.json"), "w") as outfile:
json.dump(self.flags_parameters.label_list, outfile)
args_dict = dataclasses.asdict(self.flags_parameters)
save_yaml(os.path.join(output_dir_train, "flags.yaml"), args_dict)
train_ex_per_label, test_ex_per_label = None, None
train_ex, test_ex = self.flags_parameters.train_examples, self.flags_parameters.test_examples
if self.flags_parameters.split_examples_evenly:
train_ex_per_label = eq_div(self.flags_parameters.train_examples, len(self.flags_parameters.label_list)) if self.flags_parameters.train_examples != -1 else -1
test_ex_per_label = eq_div(self.flags_parameters.test_examples, len(self.flags_parameters.label_list)) if self.flags_parameters.test_examples != -1 else -1
train_ex, test_ex = None, None
eval_set = TEST_SET if self.flags_parameters.eval_set == 'test' else DEV_SET
train_data = load_examples(
processor, data_dir, TRAIN_SET, num_examples=train_ex, num_examples_per_label=train_ex_per_label)
val_data = load_examples(
processor, data_dir, DEV_SET, num_examples=-1, num_examples_per_label=test_ex_per_label)
eval_data = load_examples(
processor, data_dir, eval_set, num_examples=test_ex, num_examples_per_label=test_ex_per_label)
unlabeled_data = load_examples(
processor, data_dir, UNLABELED_SET, num_examples=self.flags_parameters.unlabeled_examples)
# args.metrics = METRICS.get(args.task_name, DEFAULT_METRICS)
pet_model_cfg, pet_train_cfg, pet_eval_cfg = load_pet_configs(self.flags_parameters)
sc_model_cfg, sc_train_cfg, sc_eval_cfg = load_sequence_classifier_configs(self.flags_parameters)
ipet_cfg = load_ipet_config(self.flags_parameters)
if self.flags_parameters.method == 'pet':
pet.train_pet(pet_model_cfg, pet_train_cfg, pet_eval_cfg, sc_model_cfg, sc_train_cfg, sc_eval_cfg,
pattern_ids=self.flags_parameters.pattern_ids, output_dir=output_dir_train,
ensemble_repetitions=self.flags_parameters.pet_repetitions, final_repetitions=self.flags_parameters.sc_repetitions,
reduction=self.flags_parameters.reduction, train_data=train_data, val_data=val_data,
unlabeled_data=unlabeled_data,
eval_data=eval_data, do_train=self.flags_parameters.do_train, do_eval=self.flags_parameters.do_eval,
no_distillation=self.flags_parameters.no_distillation, seed=self.flags_parameters.seed)
elif self.flags_parameters.method == 'ipet':
pet.train_ipet(pet_model_cfg, pet_train_cfg, pet_eval_cfg, ipet_cfg, sc_model_cfg, sc_train_cfg,
sc_eval_cfg,
pattern_ids=self.flags_parameters.pattern_ids, output_dir=output_dir_train,
ensemble_repetitions=self.flags_parameters.pet_repetitions, final_repetitions=self.flags_parameters.sc_repetitions,
reduction=self.flags_parameters.reduction, train_data=train_data, val_data=val_data,
unlabeled_data=unlabeled_data,
eval_data=eval_data, do_train=self.flags_parameters.do_train, do_eval=self.flags_parameters.do_eval, seed=self.flags_parameters.seed)
elif self.flags_parameters.method == 'sequence_classifier':
pet.train_classifier(sc_model_cfg, sc_train_cfg, sc_eval_cfg, output_dir=output_dir_train,
repetitions=self.flags_parameters.sc_repetitions, train_data=train_data, val_data=val_data,
unlabeled_data=unlabeled_data,
eval_data=eval_data, do_train=self.flags_parameters.do_train,
do_eval=self.flags_parameters.do_eval, seed=self.flags_parameters.seed)
else:
raise ValueError(f"Training method '{self.flags_parameters.method}' not implemented")
output_dir_final_model = os.path.join(output_dir_train, 'final')
i = 0
while os.path.exists(os.path.join(self.outdir, 'final_'+str(i))):
i += 1
copytree(output_dir_final_model, os.path.join(self.outdir, 'final_'+str(i)))
rmtree(output_dir_train)
return os.path.join(self.outdir, 'final_'+str(i))
def predict_single_model(self, eval_set, output_dir_final_model, data_dir,
train_file_name, dev_file_name, test_file_name, unlabeled_file_name,
text_x_column=0, text_x_column_b=None, text_y_column=1):
args = add_fix_params(self.flags_parameters)
if output_dir_final_model is None:
output_dir_final_model = os.path.join(self.outdir, 'final')
try:
a = args.label_list
except:
args.label_list = args.labels
logger.info("Parameters: {}".format(args))
if not os.path.exists(self.outdir):
raise ValueError("Output directory ({}) doesn't exist".format(self.outdir))
f = open(os.path.join(output_dir_final_model, "label_map.json"), "r")
label_map = json.load(f)
if text_x_column_b is None:
TEXT_B_COLUMN = -1
else:
TEXT_B_COLUMN = text_x_column_b
processor = ClassProcessor(TRAIN_FILE_NAME=train_file_name, DEV_FILE_NAME=dev_file_name,
TEST_FILE_NAME=test_file_name, UNLABELED_FILE_NAME=unlabeled_file_name,
LABELS=args.labels, TEXT_A_COLUMN=text_x_column,
TEXT_B_COLUMN=TEXT_B_COLUMN, LABEL_COLUMN=text_y_column)
eval_data = load_examples(processor, data_dir, eval_set, num_examples=-1, num_examples_per_label=None)
sc_model_cfg, sc_train_cfg, sc_eval_cfg = load_sequence_classifier_configs(args)
logits_dict = pet.test(output_dir_final_model, eval_data, sc_eval_cfg, label_map, type_dataset=eval_set,
priming_data=None)
return logits_dict
def get_y_pred(self, logits_dict_test):
map_label = {i: k for i, k in enumerate(logits_dict_test.keys())}
y_pred = list(np.argmax(np.array([f for f in logits_dict_test.values()]).T, axis=1))
y_pred = [map_label[p] for p in y_pred]
return y_pred
def get_y_confidence(self, logits_dict_test):
y_confidence = list(np.max(np.array([f for f in logits_dict_test.values()]).T, axis=1))
return y_confidence
def calcul_metric_classification(self, y_true, y_pred, print_score=True):
""" Compute for multi-class variable (y_true, y_pred) accuracy, recall_macro, precision_macro and f1_macro
Args:
y_true (Dataframe or array)
y_pred (Dataframe or array)
print_score (Boolean)
Returns:
acc, f1, recall, precision (float)
"""
acc = np.round(accuracy_score(np.array(y_true).reshape(-1), np.array(y_pred).reshape(-1)), 4)
f1 = np.round(f1_score(np.array(y_true).reshape(-1), np.array(y_pred).reshape(-1), average='macro'), 4)
recall = np.round(recall_score(np.array(y_true).reshape(-1), np.array(y_pred).reshape(-1), average='macro'), 4)
precision = np.round(precision_score(np.array(y_true).reshape(-1), np.array(y_pred).reshape(-1), average='macro'), 4)
if print_score:
logger.info('\nScores :')
logger.info('accuracy = {}'.format(acc))
logger.info('precision {} = {}'.format('macro', precision))
logger.info('recall {} = {}'.format('macro', recall))
logger.info('f1 score {} = {}'.format('macro', f1))
logger.info('\n')
return acc, f1, recall, precision
def train(self, x=None, y=None, x_val=None, y_val=None):
""" Careful : Train for classification
Args:
x (Dataframe) (Optional)
y (Dataframe) (Optional)
x_val (Dataframe) (Optional)
y_val (Dataframe) (Optional)
"""
self.prepare_model(x=x, y=y, x_val=x_val, y_val=y_val, type_model="classifier")
# variable to store best Hyperparameters with specific "seed" and "scoring"
dict_models_parameters = {"seed": self.seed}
if self.x_val is None:
self.fold_id = np.ones((len(self.y_train),)) * -1
else:
self.fold_id = np.ones((len(self.y_val),)) * -1
self.oof_val = {}
data_dir = self.outdir
train_file_name = "train"
dev_file_name = "dev.csv"
test_file_name = "test.csv"
unlabeled_file_name = "unlabeled.csv"
first_fold = True
for num_fold, (train_index, val_index) in enumerate(self.folds):
logger.info("Fold {}:".format(num_fold))
if train_index == 'all':
# validation
x_train, x_val = self.x_train, self.x_val
y_train, y_val = self.y_train, self.y_val
else:
# cross-validation
x_train, x_val = self.x_train.iloc[train_index, :], self.x_train.iloc[val_index, :]
y_train, y_val = self.y_train.iloc[train_index, :], self.y_train.iloc[val_index, :]
text_x_column = self.column_text
text_x_column_b = self.column_text_b
if text_x_column_b is None:
text_y_column = 1
else:
text_y_column = 2
pd.concat([x_train, y_train], axis=1).to_csv(os.path.join(data_dir, train_file_name), index=False)
pd.concat([x_val, y_val], axis=1).to_csv(os.path.join(data_dir, dev_file_name), index=False)
pd.concat([self.X_unlabeled, self.Y_unlabeled], axis=1).to_csv(os.path.join(data_dir, unlabeled_file_name), index=False)
output_dir_final_model = self.train_single_model(data_dir, train_file_name, dev_file_name,
dev_file_name, unlabeled_file_name,
text_x_column=text_x_column, text_x_column_b=text_x_column_b,
text_y_column=text_y_column)
logits_dict_val = self.predict_single_model(eval_set="dev", output_dir_final_model=output_dir_final_model,
data_dir=data_dir,
train_file_name=train_file_name, dev_file_name=dev_file_name,
test_file_name=dev_file_name, unlabeled_file_name=unlabeled_file_name,
text_x_column=text_x_column, text_x_column_b=text_x_column_b,
text_y_column=text_y_column)
y_val_pred = self.get_y_pred(logits_dict_val)
for it, idx in enumerate(val_index):
self.oof_val[idx] = y_val_pred[it]
self.fold_id[val_index] = num_fold
sd = sorted(self.oof_val.items())
prediction_oof_val = []
for k, v in sd:
prediction_oof_val.append(v)
prediction_oof_val = np.array(prediction_oof_val)
if self.X_val is None:
# cross-validation
y_true_sample = self.y_train.values[np.where(self.fold_id >= 0)[0]].copy()
else:
# validation
y_true_sample = self.Y_val.copy()
acc, f1, recall, precision = self.calcul_metric_classification(y_true_sample, prediction_oof_val, True)
self.info_scores[self.flags_parameters.model_type]["accuracy_val"] = acc
self.info_scores[self.flags_parameters.model_type]["f1_macro_val"] = f1
self.info_scores[self.flags_parameters.model_type]["recall_macro_val"] = recall
self.info_scores[self.flags_parameters.model_type]["precision_macro_val"] = precision
def get_leaderboard(self, dataset='val', sort_by=None, ascending=False, info_models=None):
""" Metric scores for each model of self.models or info_models
if no optimization and validation : you need to give info_model (dictionary with Model class)
Args:
dataset (str) : 'val' or 'test', which prediction to use
sort_by (str) : metric column name to sort
ascending (Boolean)
info_models (dict) : dictionary with Model class
Return:
self.leaderboard (Dataframe)
"""
metrics = ['accuracy', 'recall_macro', 'precision_macro', 'f1_macro']
scores = {}
leaderboard = {"name": list(self.info_scores.keys())}
for metric in metrics:
scores[metric + '_' + dataset] = [self.info_scores[name_model][metric + '_' + dataset]
for name_model in self.info_scores.keys()]
leaderboard[metric + '_' + dataset] = np.round(scores[metric + '_' + dataset], 4)
if sort_by in ['recall', 'precision', 'f1']:
sort_by = sort_by + '_macro'
leaderboard = pd.DataFrame(leaderboard)
if sort_by:
leaderboard = leaderboard.sort_values(by=sort_by + '_' + dataset, ascending=ascending)
return leaderboard
def prediction(self, on_test_data=True, x=None, y=None, proba=True):
""" prediction on X_test if on_test_data else x
"""
if on_test_data and x is None: # predict on self.X_test
x = self.X_test
y = self.Y_test
data_dir = self.outdir
test_file_name = "test.csv"
text_x_column = self.column_text
text_x_column_b = self.column_text_b
if text_x_column_b is None:
text_y_column = 1
else:
text_y_column = 2
| pd.concat([x, y], axis=1) | pandas.concat |
import sys
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID";
# The GPU id to use, usually either "0" or "1";
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import itertools
import tensorflow as tf
from tensorflow import keras
from keras.callbacks import EarlyStopping
from keras.models import load_model
from sklearn.metrics import accuracy_score, average_precision_score, recall_score, precision_score, log_loss, auc, f1_score, roc_auc_score, classification_report, roc_curve, confusion_matrix, precision_recall_curve
from sklearn.utils import shuffle, class_weight
from sklearn.preprocessing import StandardScaler, MinMaxScaler, binarize
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
#Loading Data
path='***/Abdominal_Ultrasound_MLMD/'
name='Abdo_US'
train ='***/Abdominal_Ultrasound_MLMD/train_dat.pkl'#55%
val = '***/Abdominal_Ultrasound_MLMD/val_dat.pkl' #15%
test ='***/Abdominal_Ultrasound_MLMD/test_dat.pkl'#30%
#Label (Can change this to train the model using lab tests, combined label, or differential diagnoses label which is used in the study)
Label='PrimaryDx_Label' #From preprocessing file - this is the set of differential diagnoses used to act as the classification label
#Confusion Matrix Plot for Logistic Regression
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
plt.savefig(path + name + '_Logistic_Regression_CM')
plt.close()
#Confusion Matrix Plot for Random Forest
def plot_confusion_matrix_RF(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
plt.savefig(path + name + '_Random_Forest_CM')
plt.close()
#Confusion Matrix Plot for Neural Network
def plot_confusion_matrix_NN(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
plt.savefig(path + name + '_Neural_Network_CM')
plt.close()
#Function for upsampling of the minority class for training data only
def balance_classes(num, train_dat):
train_dat_0s = train_dat[train_dat[Label] == 0]
train_dat_1s = train_dat[train_dat[Label] == 1]
if num == 1:
# Bring up 1s
rep_1 = [train_dat_1s for x in range(train_dat_0s.shape[0] // train_dat_1s.shape[0])]
keep_1s = pd.concat(rep_1, axis=0)
train_dat = pd.concat([keep_1s, train_dat_0s], axis=0)
elif num == 0:
# Reduce 0s
keep_0s = train_dat_0s.sample(frac=train_dat_1s.shape[0]/train_dat_0s.shape[0])
train_dat = pd.concat([keep_0s,train_dat_1s],axis=0)
return train_dat
def main():
print(tf.__version__)
#Loading input data - test, val, train data and dropping different labels (differential diagnosis, combined label, lab tests from the dataset)
test_dat = pd.read_pickle(test)
test_dat.drop(name, axis=1, inplace=True)
test_dat.drop('CM_Label', axis=1, inplace=True)
test_dat.drop('PrimaryDx', axis=1, inplace=True)
print (test_dat['PrimaryDx_Label'].value_counts())
val_dat = pd.read_pickle(val)
val_dat.drop(name, axis=1, inplace=True)
val_dat.drop('CM_Label', axis=1, inplace=True)
val_dat.drop('PrimaryDx', axis=1, inplace=True)
print (val_dat['PrimaryDx_Label'].value_counts())
train_dat = | pd.read_pickle(train) | pandas.read_pickle |
#!/usr/bin/env python
import sys
sys.path
sys.path.append('../')
from experiments.visualizations import newfig, savefig_and_close, \
plot_df_heatmap, render_mpl_table, \
export_df
import itertools
import os
from os import walk
import sys
from argparse import ArgumentParser
import json
import pandas as pd
import numpy as np
from scipy.stats import friedmanchisquare, wilcoxon, ranksums
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
fig_extension = 'svg'
def get_list_results_folders(folder, essentials=['description.txt'],
finished=None, return_unfinished=False):
list_results_folders = []
list_unfinished_folders = []
for root, subdirs, files in walk(folder, followlinks=True):
if set(essentials).issubset(set(files)):
if set(finished).issubset(set(files)):
list_results_folders.append(root)
elif return_unfinished:
list_unfinished_folders.append(root)
if return_unfinished:
return list_results_folders, list_unfinished_folders
return list_results_folders
def format_diary_df(df):
df[2] = pd.to_datetime(df[2])
df[3] = pd.to_timedelta(df[3], unit='s')
new_column_names = {0: 'entry_n', 1: 'subentry_n', 2: 'date', 3: 'time'}
for i in range(5, df.shape[1], 2):
new_column_names[i] = df.ix[0, i-1]
df.rename(columns=new_column_names, inplace=True)
df.drop(list(range(4, df.shape[1], 2)), axis=1, inplace=True)
return df
def get_dataframe_from_csv(folder, filename, keep_time=False):
filename = os.path.join(folder, filename)
df = pd.read_csv(filename, header=None, quotechar='|',
infer_datetime_format=True)
df = format_diary_df(df)
if keep_time:
to_drop = ['entry_n', 'subentry_n']
else:
to_drop = ['entry_n', 'subentry_n', 'date', 'time']
df.drop(to_drop, axis=1, inplace=True)
return df
def extract_summary(folder):
dataset_df = get_dataframe_from_csv(folder, 'dataset.csv', keep_time=True)
results_df = get_dataframe_from_csv(folder, 'training.csv',
keep_time=False)
best_epoch = results_df.groupby(as_index='pid', by='epoch')['val_y_loss'].mean().argmin()
# FIXME the best epoch could be computed for all the summaries later on
# However, it seems easier at this point
results_df['best_epoch'] = best_epoch
model_df = get_dataframe_from_csv(folder, 'model.csv', keep_time=False)
dataset_df['folder'] = folder
results_df['folder'] = folder
model_df['folder'] = folder
summary = pd.merge(results_df, dataset_df)
summary = pd.merge(summary, model_df)
# TODO add test results
try:
results_test_df = get_dataframe_from_csv(folder, 'test.csv',
keep_time=False)
results_test_df.rename(columns={key: 'test_' + key for key in
results_test_df.columns},
inplace=True)
results_test_df['folder'] = folder
cm_string = results_test_df['test_cm'][0]
cm_string = ''.join(i for i in cm_string if i == ' ' or i.isdigit())
cm = np.fromstring(cm_string, dtype=int, sep=' ')
n_classes = int(np.sqrt(len(cm)))
print('Samples with y = {}, test acc = {}, n_classes = {}'.format(
dataset_df['n_samples_with_y'][0],
results_test_df['test_acc'][0],
n_classes))
summary = pd.merge(summary, results_test_df)
except IOError as e:
# TODO solve the possible IOError
# from IPython import embed; embed()
pass
return summary
def extract_unfinished_summary(folder):
dataset_df = get_dataframe_from_csv(folder, 'dataset.csv', keep_time=True)
dataset_df['folder'] = folder
return dataset_df
def export_datasets_info(df, path='', stylesheets=['style.css']):
columns = ['dataset', 'n_samples_without_y', 'n_samples_with_y', 'n_features',
'n_classes']
sort_by = ['dataset']
index = columns[0]
df_table = df[columns].drop_duplicates(subset=columns, keep='first'
).sort_values(sort_by).set_index(index)
# Export to LaTeX
df_table.to_latex(os.path.join(path, "datasets.tex"))
df_table.set_index([df_table.index, 'n_samples_with_y'], inplace=True)
# Add mean performance of best model
best_model = df.groupby('architecture')['val_y_acc'].mean().argmax()
mean_loss_best_model = df[df['architecture'] == best_model][[
'dataset', 'val_y_acc', 'n_samples_with_y']].groupby(
by=['dataset', 'n_samples_with_y']).mean().round(decimals=2)
mean_loss_best_model = mean_loss_best_model.rename(
columns={'val_y_acc': 'mean(val_y_acc)'})
df_table = pd.concat([df_table, mean_loss_best_model], axis=1)
# FIXME study if this change needs to be done in export_df
df_table_one_index = df_table.reset_index()
# Export to svg
export_df(df_table_one_index, 'datasets', path=path, extension='svg')
return df_table
def friedman_test(df, index, column):
indices = np.sort(df[index].unique())
results = {}
first = True
for ind in indices:
results[ind] = df[df[index] == ind][column].values
if first:
size = results[ind].shape[0]
first = False
elif size != results[ind].shape[0]:
print("Friedman test can not be done with different sample sizes")
return
# FIXME be sure that the order is correct
statistic, pvalue = friedmanchisquare(*results.values())
return statistic, pvalue
def wilcoxon_rank_sum_test(df, index, column, signed=False,
twosided=True):
indices = np.sort(df[index].unique())
results = {}
for i, index1 in enumerate(indices):
results[index1] = df[df[index] == index1][column]
# Ensures that all the results are aligned by simulation number (column
# sim), dataset (column name) and mixing_matrix_M
if i == 0:
experiments = df[df[index] == index1][['sim', 'mixing_matrix_M',
'dataset']].values
else:
np.testing.assert_equal(experiments,
df[df[index] == index1][['sim',
'mixing_matrix_M',
'dataset']].values)
stat = []
for (index1, index2) in itertools.combinations(indices, 2):
if index1 != index2:
if signed:
statistic, pvalue = wilcoxon(results[index1].values,
results[index2].values)
else:
statistic, pvalue = ranksums(results[index1].values,
results[index2].values)
if not twosided:
pvalue /= 2
stat.append(pd.DataFrame([[index1, index2, statistic, pvalue]],
columns=['index1', 'index2', 'statistic',
'p-value']))
dfstat = pd.concat(stat, axis=0, ignore_index=True)
return dfstat
def main(results_path='results', summary_path='', filter_rows={},
filter_performance=1.0, verbose=1, gui=False, avoid_big_files=True,
only_best_epoch=True):
print('\n#########################################################')
print('##### Making summary of folder {}'.format(results_path))
print('#')
results_folders, unfin_folders = get_list_results_folders(
results_path, essentials=['description.txt', 'dataset.csv',
'model.csv'],
finished=['validation.csv', 'training.csv'],
return_unfinished=True)
if summary_path == '':
summary_path = os.path.join(results_path, 'summary')
# Creates summary path if it does not exist
if not os.path.exists(summary_path):
os.mkdir(summary_path)
u_summaries = []
for uf in unfin_folders:
u_summaries.append(extract_unfinished_summary(uf))
if len(u_summaries) > 0:
print("Experiments that did not finish: {}".format(len(u_summaries)))
dfs_unf = pd.concat(u_summaries, axis=0, ignore_index=True)
if verbose > 1:
print(dfs_unf)
f_summaries = []
for rf in results_folders:
f_summaries.append(extract_unfinished_summary(rf))
if len(f_summaries) == 0:
print("There are no finished experiments")
sys.exit(0)
else:
print("Experiments finished: {}".format(len(f_summaries)))
dfs_fin = | pd.concat(f_summaries, axis=0, ignore_index=True) | pandas.concat |
from copy import deepcopy
import os
import re
import math
from sktime.forecasting.model_selection import (
ExpandingWindowSplitter,
SlidingWindowSplitter,
)
from pycaret.internal.pycaret_experiment.utils import highlight_setup, MLUsecase
from pycaret.internal.pycaret_experiment.supervised_experiment import (
_SupervisedExperiment,
)
from pycaret.internal.pipeline import (
estimator_pipeline,
get_pipeline_fit_kwargs,
)
from pycaret.internal.utils import (
color_df,
SeasonalPeriod,
TSModelTypes,
get_function_params,
)
import pycaret.internal.patches.sklearn
import pycaret.internal.patches.yellowbrick
from pycaret.internal.logging import get_logger
from pycaret.internal.Display import Display
from pycaret.internal.distributions import *
from pycaret.internal.validation import *
from pycaret.internal.tunable import TunableMixin
import pycaret.containers.metrics.time_series
import pycaret.containers.models.time_series
import pycaret.internal.preprocess
import pycaret.internal.persistence
import pandas as pd # type: ignore
from pandas.io.formats.style import Styler
import numpy as np # type: ignore
import datetime
import time
import gc
from sklearn.base import clone # type: ignore
from typing import List, Tuple, Any, Union, Optional, Dict, Generator
import warnings
from IPython.utils import io
import traceback
import plotly.express as px # type: ignore
import plotly.graph_objects as go # type: ignore
import logging
from sklearn.base import clone # type: ignore
from sklearn.model_selection._validation import _aggregate_score_dicts # type: ignore
from sklearn.model_selection import check_cv, ParameterGrid, ParameterSampler # type: ignore
from sklearn.model_selection._search import _check_param_grid # type: ignore
from sklearn.metrics._scorer import get_scorer, _PredictScorer # type: ignore
from collections import defaultdict
from functools import partial
from scipy.stats import rankdata # type: ignore
from joblib import Parallel, delayed # type: ignore
from sktime.forecasting.base import ForecastingHorizon
from sktime.utils.validation.forecasting import check_y_X # type: ignore
from sktime.forecasting.model_selection import SlidingWindowSplitter # type: ignore
from pycaret.internal.tests.time_series import test_
from pycaret.internal.plots.time_series import plot_
warnings.filterwarnings("ignore")
LOGGER = get_logger()
# def _get_cv_n_folds(y, cv) -> int:
# """
# Get the number of folds for time series
# cv must be of type SlidingWindowSplitter or ExpandingWindowSplitter
# TODO: Fix this inside sktime and replace this with sktime method [1]
# Ref:
# [1] https://github.com/alan-turing-institute/sktime/issues/632
# """
# n_folds = int((len(y) - cv.initial_window) / cv.step_length)
# return n_folds
def get_folds(cv, y) -> Generator[Tuple[pd.Series, pd.Series], None, None]:
"""
Returns the train and test indices for the time series data
"""
# https://github.com/alan-turing-institute/sktime/blob/main/examples/window_splitters.ipynb
for train_indices, test_indices in cv.split(y):
# print(f"Train Indices: {train_indices}, Test Indices: {test_indices}")
yield train_indices, test_indices
def cross_validate_ts(
forecaster,
y: pd.Series,
X: Optional[Union[pd.Series, pd.DataFrame]],
cv,
scoring: Dict[str, Union[str, _PredictScorer]],
fit_params,
n_jobs,
return_train_score,
error_score=0,
verbose: int = 0,
**additional_scorer_kwargs,
) -> Dict[str, np.array]:
"""Performs Cross Validation on time series data
Parallelization is based on `sklearn` cross_validate function [1]
Ref:
[1] https://github.com/scikit-learn/scikit-learn/blob/0.24.1/sklearn/model_selection/_validation.py#L246
Parameters
----------
forecaster : [type]
Time Series Forecaster that is compatible with sktime
y : pd.Series
The variable of interest for forecasting
X : Optional[Union[pd.Series, pd.DataFrame]]
Exogenous Variables
cv : [type]
[description]
scoring : Dict[str, Union[str, _PredictScorer]]
Scoring Dictionary. Values can be valid strings that can be converted to
callable metrics or the callable metrics directly
fit_params : [type]
Fit parameters to be used when training
n_jobs : [type]
Number of cores to use to parallelize. Refer to sklearn for details
return_train_score : [type]
Should the training scores be returned. Unused for now.
error_score : int, optional
Unused for now, by default 0
verbose : int
Sets the verbosity level. Unused for now
additional_scorer_kwargs: Dict[str, Any]
Additional scorer kwargs such as {`sp`:12} required by metrics like MASE
Returns
-------
[type]
[description]
Raises
------
Error
If fit and score raises any exceptions
"""
try:
# # For Debug
# n_jobs = 1
scoring = _get_metrics_dict_ts(scoring)
parallel = Parallel(n_jobs=n_jobs)
out = parallel(
delayed(_fit_and_score)(
forecaster=clone(forecaster),
y=y,
X=X,
scoring=scoring,
train=train,
test=test,
parameters=None,
fit_params=fit_params,
return_train_score=return_train_score,
error_score=error_score,
**additional_scorer_kwargs,
)
for train, test in get_folds(cv, y)
)
# raise key exceptions
except Exception:
raise
# Similar to parts of _format_results in BaseGridSearch
(test_scores_dict, fit_time, score_time, cutoffs) = zip(*out)
test_scores = _aggregate_score_dicts(test_scores_dict)
return test_scores, cutoffs
def _get_metrics_dict_ts(
metrics_dict: Dict[str, Union[str, _PredictScorer]]
) -> Dict[str, _PredictScorer]:
"""Returns a metrics dictionary in which all values are callables
of type _PredictScorer
Parameters
----------
metrics_dict : A metrics dictionary in which some values can be strings.
If the value is a string, the corresponding callable metric is returned
e.g. Dictionary Value of 'neg_mean_absolute_error' will return
make_scorer(mean_absolute_error, greater_is_better=False)
"""
return_metrics_dict = {}
for k, v in metrics_dict.items():
if isinstance(v, str):
return_metrics_dict[k] = get_scorer(v)
else:
return_metrics_dict[k] = v
return return_metrics_dict
def _fit_and_score(
forecaster,
y: pd.Series,
X: Optional[Union[pd.Series, pd.DataFrame]],
scoring: Dict[str, Union[str, _PredictScorer]],
train,
test,
parameters,
fit_params,
return_train_score,
error_score=0,
**additional_scorer_kwargs,
):
"""Fits the forecaster on a single train split and scores on the test split
Similar to _fit_and_score from `sklearn` [1] (and to some extent `sktime` [2]).
Difference is that [1] operates on a single fold only, whereas [2] operates on all cv folds.
Ref:
[1] https://github.com/scikit-learn/scikit-learn/blob/0.24.1/sklearn/model_selection/_validation.py#L449
[2] https://github.com/alan-turing-institute/sktime/blob/v0.5.3/sktime/forecasting/model_selection/_tune.py#L95
Parameters
----------
forecaster : [type]
Time Series Forecaster that is compatible with sktime
y : pd.Series
The variable of interest for forecasting
X : Optional[Union[pd.Series, pd.DataFrame]]
Exogenous Variables
scoring : Dict[str, Union[str, _PredictScorer]]
Scoring Dictionary. Values can be valid strings that can be converted to
callable metrics or the callable metrics directly
train : [type]
Indices of training samples.
test : [type]
Indices of test samples.
parameters : [type]
Parameter to set for the forecaster
fit_params : [type]
Fit parameters to be used when training
return_train_score : [type]
Should the training scores be returned. Unused for now.
error_score : int, optional
Unused for now, by default 0
**additional_scorer_kwargs: Dict[str, Any]
Additional scorer kwargs such as {`sp`:12} required by metrics like MASE
Raises
------
ValueError
When test indices do not match predicted indices. This is only for
for internal checks and should not be raised when used by external users
"""
if parameters is not None:
forecaster.set_params(**parameters)
y_train, y_test = y[train], y[test]
X_train = None if X is None else X[train]
X_test = None if X is None else X[test]
#### Fit the forecaster ----
start = time.time()
try:
forecaster.fit(y_train, X_train, **fit_params)
except Exception as error:
logging.error(f"Fit failed on {forecaster}")
logging.error(error)
if error_score == "raise":
raise
fit_time = time.time() - start
#### Determine Cutoff ----
# NOTE: Cutoff is available irrespective of whether fit passed or failed
cutoff = forecaster.cutoff
#### Score the model ----
lower = | pd.Series([]) | pandas.Series |
__author__ = "<NAME>"
import os
import re
import gzip
import logging
import pandas
import csv
from .Exceptions import ReportableException
def folder_contents(folder, pattern=None):
regexp = re.compile(pattern) if pattern else None
p = os .listdir(folder)
if regexp: p = [x for x in p if regexp.search(x)]
p = [os.path.join(folder,x) for x in p]
return p
def ensure_requisite_folders(path):
folder = os.path.split(path)[0]
if len(folder) and not os.path.exists(folder):
os.makedirs(folder)
def maybe_create_folder(path):
if not os.path.exists(path):
os.makedirs(path)
########################################################################################################################
def file_logic(folder, pattern):
r = re.compile(pattern)
f = sorted([x for x in os.listdir(folder) if r.search(x)])
p_ = [r.search(x).group(1) for x in f]
p = [os.path.join(folder,x) for x in f]
return pandas.DataFrame({"name":p_, "path":p, "file":f})
def file_logic_2(folder, pattern, sub_fields, filter=None):
r = re.compile(pattern)
f = os.listdir(folder)
if filter is not None:
filter = re.compile(filter)
f = [x for x in f if filter.search(x)]
f = sorted([x for x in f if r.search(x)])
r, subfield_names, subfield_positions = name_parse_prepare(pattern, sub_fields)
values=[]
for x in f:
values.append((x, os.path.join(folder, x))+name_parse(x, r, subfield_positions))
columns = ["file", "path"] + subfield_names
values = pandas.DataFrame(values, columns=columns)
return values
########################################################################################################################
def name_parse_prepare(name_subfield_regexp, name_subfield):
if name_subfield and name_subfield_regexp:
r = re.compile(name_subfield_regexp)
subfield_names = [x[0] for x in name_subfield]
subfield_positions = [int(x[1]) for x in name_subfield]
else:
r = None
subfield_names = None
subfield_positions = None
return r, subfield_names, subfield_positions
def name_parse(file, subfield_regexp, subfield_positions):
if subfield_positions:
values = []
s_ = subfield_regexp.search(file)
for position in subfield_positions:
values.append(s_.group(position))
values = tuple(values)
else:
values = None
return values
def name_parse_argumentize(subfield_names, subfield_positions, values):
return {subfield_names[i-1]:values[i-1] for i in subfield_positions}
########################################################################################################################
class PercentReporter(object):
def __init__(self, level, total, increment=10, pattern="%i %% complete"):
self.level = level
self.total = total
self.increment = increment
self.last_reported = 0
self.pattern = pattern
def update(self, i, text=None, force=False):
percent = int(i*100.0/self.total)
if force or percent >= self.last_reported + self.increment:
self.last_reported = percent
if not text:
text = self.pattern
logging.log(self.level, text, percent)
ERROR_REGEXP = re.compile('[^0-9a-zA-Z]+')
########################################################################################################################
def load_list(path):
k = pandas.read_table(path, header=None)
return k.iloc[:,0]
def to_dataframe(data, columns, fill_na=None, to_numeric=None):
if len(data):
data = pandas.DataFrame(data, columns=columns)
data = data[columns]
else:
data = | pandas.DataFrame(columns=columns) | pandas.DataFrame |
"""
Functions for categoricals
"""
from itertools import chain, product
import numpy as np
import pandas as pd
import pandas.api.types as pdtypes
from pandas.core.algorithms import value_counts
from .utils import last2
__all__ = [
'cat_anon',
'cat_collapse',
'cat_concat',
'cat_drop',
'cat_expand',
'cat_explicit_na',
'cat_infreq',
'cat_inorder',
'cat_inseq',
'cat_lump',
'cat_lump_lowfreq',
'cat_lump_min',
'cat_lump_n',
'cat_lump_prop',
'cat_move',
'cat_other',
'cat_recode',
'cat_relabel',
'cat_relevel',
'cat_rename',
'cat_reorder',
'cat_reorder2',
'cat_rev',
'cat_shift',
'cat_shuffle',
'cat_unify',
'cat_zip',
]
def cat_infreq(c, ordered=None):
"""
Reorder categorical by frequency of the values
Parameters
----------
c : list-like
Values that will make up the categorical.
ordered : bool
If ``True``, the categorical is ordered.
Returns
-------
out : categorical
Values
Examples
--------
>>> x = ['d', 'a', 'b', 'b', 'c', 'c', 'c']
>>> cat_infreq(x)
['d', 'a', 'b', 'b', 'c', 'c', 'c']
Categories (4, object): ['c', 'b', 'd', 'a']
>>> cat_infreq(x, ordered=True)
['d', 'a', 'b', 'b', 'c', 'c', 'c']
Categories (4, object): ['c' < 'b' < 'd' < 'a']
When two or more values occur the same number of times, if the
categorical is ordered, the order is preserved. If it is not
not ordered, the order depends on that of the values. Above 'd'
comes before 'a', and below 'a' comes before 'a'.
>>> c = pd.Categorical(
... x, categories=['a', 'c', 'b', 'd']
... )
>>> cat_infreq(c)
['d', 'a', 'b', 'b', 'c', 'c', 'c']
Categories (4, object): ['c', 'b', 'a', 'd']
>>> cat_infreq(c.set_ordered(True))
['d', 'a', 'b', 'b', 'c', 'c', 'c']
Categories (4, object): ['c' < 'b' < 'a' < 'd']
"""
kwargs = {} if ordered is None else {'ordered': ordered}
counts = value_counts(c)
if pdtypes.is_categorical_dtype(c):
original_cat_order = c.categories
else:
original_cat_order = pd.unique(c)
counts = counts.reindex(index=original_cat_order)
cats = (_stable_series_sort(counts, ascending=False)
.index
.to_list())
return pd.Categorical(c, categories=cats, **kwargs)
def cat_inorder(c, ordered=None):
"""
Reorder categorical by appearance
Parameters
----------
c : list-like
Values that will make up the categorical.
ordered : bool
If ``True``, the categorical is ordered.
Returns
-------
out : categorical
Values
Examples
--------
>>> import numpy as np
>>> x = [4, 1, 3, 4, 4, 7, 3]
>>> cat_inorder(x)
[4, 1, 3, 4, 4, 7, 3]
Categories (4, int64): [4, 1, 3, 7]
>>> arr = np.array(x)
>>> cat_inorder(arr)
[4, 1, 3, 4, 4, 7, 3]
Categories (4, int64): [4, 1, 3, 7]
>>> c = ['b', 'f', 'c', None, 'c', 'a', 'b', 'e']
>>> cat_inorder(c)
['b', 'f', 'c', NaN, 'c', 'a', 'b', 'e']
Categories (5, object): ['b', 'f', 'c', 'a', 'e']
>>> s = pd.Series(c)
>>> cat_inorder(s)
['b', 'f', 'c', NaN, 'c', 'a', 'b', 'e']
Categories (5, object): ['b', 'f', 'c', 'a', 'e']
>>> cat = pd.Categorical(c)
>>> cat_inorder(cat)
['b', 'f', 'c', NaN, 'c', 'a', 'b', 'e']
Categories (5, object): ['b', 'f', 'c', 'a', 'e']
>>> cat_inorder(cat, ordered=True)
['b', 'f', 'c', NaN, 'c', 'a', 'b', 'e']
Categories (5, object): ['b' < 'f' < 'c' < 'a' < 'e']
By default, ordered categories remain ordered.
>>> ocat = pd.Categorical(cat, ordered=True)
>>> ocat
['b', 'f', 'c', NaN, 'c', 'a', 'b', 'e']
Categories (5, object): ['a' < 'b' < 'c' < 'e' < 'f']
>>> cat_inorder(ocat)
['b', 'f', 'c', NaN, 'c', 'a', 'b', 'e']
Categories (5, object): ['b' < 'f' < 'c' < 'a' < 'e']
>>> cat_inorder(ocat, ordered=False)
['b', 'f', 'c', NaN, 'c', 'a', 'b', 'e']
Categories (5, object): ['b', 'f', 'c', 'a', 'e']
Notes
-----
``NaN`` or ``None`` are ignored when creating the categories.
"""
kwargs = {} if ordered is None else {'ordered': ordered}
if isinstance(c, (pd.Series, pd.Categorical)):
cats = c[~pd.isnull(c)].unique()
if hasattr(cats, 'to_list'):
cats = cats.to_list()
elif hasattr(c, 'dtype'):
cats = pd.unique(c[~pd.isnull(c)])
else:
cats = pd.unique([
x for x, keep in zip(c, ~pd.isnull(c))
if keep
])
return pd.Categorical(c, categories=cats, **kwargs)
def cat_inseq(c, ordered=None):
"""
Reorder categorical by numerical order
Parameters
----------
c : list-like
Values that will make up the categorical.
ordered : bool
If ``True``, the categorical is ordered.
Returns
-------
out : categorical
Values
Examples
--------
>>> x = pd.Categorical([5, 1, 3, 2, 4])
>>> cat_inseq(x)
[5, 1, 3, 2, 4]
Categories (5, int64): [1, 2, 3, 4, 5]
>>> x = pd.Categorical([5, 1, '3', 2, 4])
>>> cat_inseq(x)
[5, 1, 3, 2, 4]
Categories (5, int64): [1, 2, 3, 4, 5]
Values that cannot be coerced to numerical turn in ``NaN``,
and categories cannot be ``NaN``.
>>> x = pd.Categorical([5, 1, 'three', 2, 4])
>>> cat_inseq(x)
[5, 1, NaN, 2, 4]
Categories (4, int64): [1, 2, 4, 5]
Coerces values to numerical
>>> x = [5, 1, '3', 2, 4]
>>> cat_inseq(x, ordered=True)
[5, 1, 3, 2, 4]
Categories (5, int64): [1 < 2 < 3 < 4 < 5]
>>> x = [5, 1, '3', 2, '4.5']
>>> cat_inseq(x)
[5.0, 1.0, 3.0, 2.0, 4.5]
Categories (5, float64): [1.0, 2.0, 3.0, 4.5, 5.0]
Atleast one of the values must be coercible to the integer
>>> x = ['five', 'one', 'three', 'two', 'four']
>>> cat_inseq(x)
Traceback (most recent call last):
...
ValueError: Atleast one existing category must be a number.
>>> x = ['five', 'one', '3', 'two', 'four']
>>> cat_inseq(x)
[NaN, NaN, 3, NaN, NaN]
Categories (1, int64): [3]
"""
c = as_categorical(c)
# one value at a time to avoid turning integers into floats
# when some values create nans
numerical_cats = []
for x in c.categories:
_x = pd.to_numeric(x, 'coerce')
if not pd.isnull(_x):
numerical_cats.append(_x)
if len(numerical_cats) == 0 and len(c) > 0:
raise ValueError(
"Atleast one existing category must be a number."
)
# Change the original categories to numerical ones, making sure
# to rename the existing ones i.e '3' becomes 3. Only after that,
# change to order.
c = (c.set_categories(numerical_cats, rename=True)
.reorder_categories(sorted(numerical_cats)))
if ordered is not None:
c.set_ordered(ordered, inplace=True)
return c
def cat_reorder(c, x, fun=np.median, ascending=True):
"""
Reorder categorical by sorting along another variable
It is the order of the categories that changes. Values in x
are grouped by categories and summarised to determine the
new order.
Parameters
----------
c : list-like
Values that will make up the categorical.
x : list-like
Values by which ``c`` will be ordered.
fun : callable
Summarising function to ``x`` for each category in ``c``.
Default is the *median*.
ascending : bool
If ``True``, the ``c`` is ordered in ascending order of ``x``.
Examples
--------
>>> c = list('abbccc')
>>> x = [11, 2, 2, 3, 33, 3]
>>> cat_reorder(c, x)
['a', 'b', 'b', 'c', 'c', 'c']
Categories (3, object): ['b', 'c', 'a']
>>> cat_reorder(c, x, fun=max)
['a', 'b', 'b', 'c', 'c', 'c']
Categories (3, object): ['b', 'a', 'c']
>>> cat_reorder(c, x, fun=max, ascending=False)
['a', 'b', 'b', 'c', 'c', 'c']
Categories (3, object): ['c', 'a', 'b']
>>> c_ordered = pd.Categorical(c, ordered=True)
>>> cat_reorder(c_ordered, x)
['a', 'b', 'b', 'c', 'c', 'c']
Categories (3, object): ['b' < 'c' < 'a']
>>> cat_reorder(c + ['d'], x)
Traceback (most recent call last):
...
ValueError: Lengths are not equal. len(c) is 7 and len(x) is 6.
"""
if len(c) != len(x):
raise ValueError(
"Lengths are not equal. len(c) is {} and len(x) is {}.".format(
len(c), len(x)
)
)
summary = (pd.Series(x)
.groupby(c)
.apply(fun)
.sort_values(ascending=ascending)
)
cats = summary.index.to_list()
return pd.Categorical(c, categories=cats)
def cat_reorder2(c, x, y, *args, fun=last2, ascending=False, **kwargs):
"""
Reorder categorical by sorting along another variable
It is the order of the categories that changes. Values in x
are grouped by categories and summarised to determine the
new order.
Parameters
----------
c : list-like
Values that will make up the categorical.
x : list-like
Values by which ``c`` will be ordered.
y : list-like
Values by which ``c`` will be ordered.
*args : tuple
Position arguments passed to function fun.
fun : callable
Summarising function to ``x`` for each category in ``c``.
Default is the *median*.
ascending : bool
If ``True``, the ``c`` is ordered in ascending order of ``x``.
**kwargs : dict
Keyword arguments passed to ``fun``.
Examples
--------
Order stocks by the price in the latest year. This type of ordering
can be used to order line plots so that the ends match the order of
the legend.
>>> stocks = list('AAABBBCCC')
>>> year = [1980, 1990, 2000] * 3
>>> price = [12.34, 12.90, 13.55, 10.92, 14.73, 11.08, 9.02, 12.44, 15.65]
>>> cat_reorder2(stocks, year, price)
['A', 'A', 'A', 'B', 'B', 'B', 'C', 'C', 'C']
Categories (3, object): ['C', 'A', 'B']
"""
if len(c) != len(x) or len(x) != len(y):
raise ValueError(
"Lengths are not equal. len(c) is {}, len(x) is {} and "
"len(y) is {}.".format(len(c), len(x), len(y))
)
# Wrap two argument function fun with a function that
# takes a dataframe, put x and y into a dataframe, then
# use dataframe.groupby
def _fun(cat_df):
return fun(cat_df['x'], cat_df['y'], *args, **kwargs)
summary = (pd.DataFrame({'x': x, 'y': y})
.groupby(c)
.apply(_fun)
.sort_values(ascending=ascending)
)
cats = summary.index.to_list()
return pd.Categorical(c, categories=cats)
def cat_move(c, *args, to=0):
"""
Reorder categories explicitly
Parameters
----------
c : list-like
Values that will make up the categorical.
*args : tuple
Categories to reorder. Any categories not mentioned
will be left in existing order.
to : int or inf
Position where to place the categories. ``inf``, puts
them at the end (highest value).
Returns
-------
out : categorical
Values
Examples
--------
>>> c = ['a', 'b', 'c', 'd', 'e']
>>> cat_move(c, 'e', 'b')
['a', 'b', 'c', 'd', 'e']
Categories (5, object): ['e', 'b', 'a', 'c', 'd']
>>> cat_move(c, 'c', to=np.inf)
['a', 'b', 'c', 'd', 'e']
Categories (5, object): ['a', 'b', 'd', 'e', 'c']
>>> cat_move(pd.Categorical(c, ordered=True), 'a', 'c', 'e', to=1)
['a', 'b', 'c', 'd', 'e']
Categories (5, object): ['b' < 'a' < 'c' < 'e' < 'd']
"""
c = as_categorical(c)
if np.isinf(to):
to = len(c.categories)
args = list(args)
unmoved_cats = c.categories.drop(args).to_list()
cats = unmoved_cats[0:to] + args + unmoved_cats[to:]
c.reorder_categories(cats, inplace=True)
return c
def cat_rev(c):
"""
Reverse order of categories
Parameters
----------
c : list-like
Values that will make up the categorical.
Returns
-------
out : categorical
Values
Examples
--------
>>> c = ['a', 'b', 'c']
>>> cat_rev(c)
['a', 'b', 'c']
Categories (3, object): ['c', 'b', 'a']
>>> cat_rev(pd.Categorical(c))
['a', 'b', 'c']
Categories (3, object): ['c', 'b', 'a']
"""
c = as_categorical(c)
c.reorder_categories(c.categories[::-1], inplace=True)
return c
def cat_shift(c, n=1):
"""
Shift and wrap-around categories to the left or right
Parameters
----------
c : list-like
Values that will make up the categorical.
n : int
Number of times to shift. If positive, shift to
the left, if negative shift to the right.
Default is 1.
Returns
-------
out : categorical
Values
Examples
--------
>>> c = ['a', 'b', 'c', 'd', 'e']
>>> cat_shift(c)
['a', 'b', 'c', 'd', 'e']
Categories (5, object): ['b', 'c', 'd', 'e', 'a']
>>> cat_shift(c, 2)
['a', 'b', 'c', 'd', 'e']
Categories (5, object): ['c', 'd', 'e', 'a', 'b']
>>> cat_shift(c, -2)
['a', 'b', 'c', 'd', 'e']
Categories (5, object): ['d', 'e', 'a', 'b', 'c']
>>> cat_shift(pd.Categorical(c, ordered=True), -3)
['a', 'b', 'c', 'd', 'e']
Categories (5, object): ['c' < 'd' < 'e' < 'a' < 'b']
"""
c = as_categorical(c)
cats = c.categories.to_list()
cats_extended = cats + cats
m = len(cats)
n = n % m
cats = cats_extended[n:m] + cats_extended[:n]
c.reorder_categories(cats, inplace=True)
return c
def cat_shuffle(c, random_state=None):
"""
Reverse order of categories
Parameters
----------
c : list-like
Values that will make up the categorical.
random_state : int or ~numpy.random.RandomState, optional
Seed or Random number generator to use. If ``None``, then
numpy global generator :class:`numpy.random` is used.
Returns
-------
out : categorical
Values
Examples
--------
>>> np.random.seed(123)
>>> c = ['a', 'b', 'c', 'd', 'e']
>>> cat_shuffle(c)
['a', 'b', 'c', 'd', 'e']
Categories (5, object): ['b', 'd', 'e', 'a', 'c']
>>> cat_shuffle(pd.Categorical(c, ordered=True), 321)
['a', 'b', 'c', 'd', 'e']
Categories (5, object): ['d' < 'b' < 'a' < 'c' < 'e']
"""
c = as_categorical(c)
if random_state is None:
random_state = np.random
elif isinstance(random_state, int):
random_state = np.random.RandomState(random_state)
elif not isinstance(random_state, np.random.RandomState):
raise TypeError(
"Unknown type `{}` of random_state".format(type(random_state))
)
cats = c.categories.to_list()
random_state.shuffle(cats)
c.reorder_categories(cats, inplace=True)
return c
# Change the value of categories
def cat_anon(c, prefix='', random_state=None):
"""
Anonymise categories
Neither the value nor the order of the categories is preserved.
Parameters
----------
c : list-like
Values that will make up the categorical.
random_state : int or ~numpy.random.RandomState, optional
Seed or Random number generator to use. If ``None``, then
numpy global generator :class:`numpy.random` is used.
Returns
-------
out : categorical
Values
Examples
--------
>>> np.random.seed(123)
>>> c = ['a', 'b', 'b', 'c', 'c', 'c']
>>> cat_anon(c)
['0', '1', '1', '2', '2', '2']
Categories (3, object): ['1', '0', '2']
>>> cat_anon(c, 'c-', 321)
['c-1', 'c-2', 'c-2', 'c-0', 'c-0', 'c-0']
Categories (3, object): ['c-0', 'c-2', 'c-1']
>>> cat_anon(pd.Categorical(c, ordered=True), 'c-', 321)
['c-1', 'c-2', 'c-2', 'c-0', 'c-0', 'c-0']
Categories (3, object): ['c-0' < 'c-2' < 'c-1']
"""
c = as_categorical(c)
if random_state is None:
random_state = np.random
elif isinstance(random_state, int):
random_state = np.random.RandomState(random_state)
elif not isinstance(random_state, np.random.RandomState):
raise TypeError(
"Unknown type `{}` of random_state".format(type(random_state))
)
# Shuffle two times,
# 1. to prevent predicable sequence to category mapping
# 2. to prevent reversing of the new categories to the old ones
fmt = '{}{}'.format
cats = [fmt(prefix, i) for i in range(len(c.categories))]
random_state.shuffle(cats)
c.rename_categories(cats, inplace=True)
cats = c.categories.to_list()
random_state.shuffle(cats)
c.reorder_categories(cats, inplace=True)
return c
def cat_collapse(c, mapping, group_other=False):
"""
Collapse categories into manually defined groups
Parameters
----------
c : list-like
Values that will make up the categorical.
mapping : dict
New categories and the old categories contained in them.
group_other : False
If ``True``, a category is created to contain all other
categories that have not been explicitly collapsed.
The name of the other categories is ``other``, it may be
postfixed by the first available integer starting from
2 if there is a category with a similar name.
Returns
-------
out : categorical
Values
Examples
--------
>>> c = ['a', 'b', 'c', 'd', 'e', 'f']
>>> mapping = {'first_2': ['a', 'b'], 'second_2': ['c', 'd']}
>>> cat_collapse(c, mapping)
['first_2', 'first_2', 'second_2', 'second_2', 'e', 'f']
Categories (4, object): ['first_2', 'second_2', 'e', 'f']
>>> cat_collapse(c, mapping, group_other=True)
['first_2', 'first_2', 'second_2', 'second_2', 'other', 'other']
Categories (3, object): ['first_2', 'second_2', 'other']
Collapsing preserves the order
>>> cat_rev(c)
['a', 'b', 'c', 'd', 'e', 'f']
Categories (6, object): ['f', 'e', 'd', 'c', 'b', 'a']
>>> cat_collapse(cat_rev(c), mapping)
['first_2', 'first_2', 'second_2', 'second_2', 'e', 'f']
Categories (4, object): ['f', 'e', 'second_2', 'first_2']
>>> mapping = {'other': ['a', 'b'], 'another': ['c', 'd']}
>>> cat_collapse(c, mapping, group_other=True)
['other', 'other', 'another', 'another', 'other2', 'other2']
Categories (3, object): ['other', 'another', 'other2']
"""
def make_other_name():
"""
Generate unique name for the other category
"""
if 'other' not in mapping:
return 'other'
for i in range(2, len(mapping)+2):
other = 'other' + str(i)
if other not in mapping:
return other
c = as_categorical(c)
if group_other:
mapping = mapping.copy()
other = make_other_name()
mapped_categories = list(chain(*mapping.values()))
unmapped_categories = c.categories.difference(mapped_categories)
mapping[other] = list(unmapped_categories)
inverted_mapping = {
cat: new_cat
for new_cat, old_cats in mapping.items()
for cat in old_cats
}
# Convert old categories to new values in order and remove
# any duplicates. The preserves the order
new_cats = pd.unique([
inverted_mapping.get(x, x)
for x in c.categories
])
c = pd.Categorical(
[inverted_mapping.get(x, x) for x in c],
categories=new_cats,
ordered=c.ordered
)
return c
def cat_other(c, keep=None, drop=None, other_category='other'):
"""
Replace categories with 'other'
Parameters
----------
c : list-like
Values that will make up the categorical.
keep : list-like
Categories to preserve. Only one of ``keep`` or ``drop``
should be specified.
drop : list-like
Categories to drop. Only one of ``keep`` or ``drop``
should be specified.
other_category : object
Value used for the 'other' values. It is placed at
the end of the categories.
Returns
-------
out : categorical
Values
Examples
--------
>>> c = ['a', 'b', 'a', 'c', 'b', 'b', 'b', 'd', 'c']
>>> cat_other(c, keep=['a', 'b'])
['a', 'b', 'a', 'other', 'b', 'b', 'b', 'other', 'other']
Categories (3, object): ['a', 'b', 'other']
>>> cat_other(c, drop=['a', 'b'])
['other', 'other', 'other', 'c', 'other', 'other', 'other', 'd', 'c']
Categories (3, object): ['c', 'd', 'other']
>>> cat_other(pd.Categorical(c, ordered=True), drop=['a', 'b'])
['other', 'other', 'other', 'c', 'other', 'other', 'other', 'd', 'c']
Categories (3, object): ['c' < 'd' < 'other']
"""
if keep is None and drop is None:
raise ValueError(
"Missing columns to `keep` or those to `drop`."
)
elif keep is not None and drop is not None:
raise ValueError(
"Only one of `keep` or `drop` should be given."
)
c = as_categorical(c)
cats = c.categories
if keep is not None:
if not pdtypes.is_list_like(keep):
keep = [keep]
elif drop is not None:
if not pdtypes.is_list_like(drop):
drop = [drop]
keep = cats.difference(drop)
inverted_mapping = {
cat: other_category
for cat in cats.difference(keep)
}
inverted_mapping.update({x: x for x in keep})
new_cats = cats.intersection(keep).to_list() + [other_category]
c = pd.Categorical(
[inverted_mapping.get(x, x) for x in c],
categories=new_cats,
ordered=c.ordered
)
return c
def _lump(lump_it, c, other_category):
"""
Return a categorical of lumped
Helper for cat_lump_* functions
Parameters
----------
lump_it : sequence[(obj, bool)]
Sequence of (category, lump_category)
c : cateorical
Original categorical.
other_category : object (default: 'other')
Value used for the 'other' values. It is placed at
the end of the categories.
Returns
-------
out : categorical
Values
"""
lookup = {
cat: other_category if lump else cat
for cat, lump in lump_it
}
new_cats = (
c.categories
.intersection(lookup.values())
.insert(len(c), other_category)
)
c = pd.Categorical(
[lookup[value] for value in c],
categories=new_cats,
ordered=c.ordered
)
return c
def cat_lump(
c,
n=None,
prop=None,
w=None,
other_category='other',
ties_method='min'
):
"""
Lump together least or most common categories
This is a general method that calls one of
:func:`~plydata.cat_tools.cat_lump_n`
:func:`~plydata.cat_tools.cat_lump_prop` or
:func:`~plydata.cat_tools.cat_lump_lowfreq`
depending on the parameters.
Parameters
----------
c : list-like
Values that will make up the categorical.
n : int (optional)
Number of most/least common values to preserve (not lumped
together). Positive ``n`` preserves the most common,
negative ``n`` preserves the least common.
Lumping happens on condition that the lumped category "other"
will have the smallest number of items.
You should only specify one of ``n`` or ``prop``
prop : float (optional)
Proportion above/below which the values of a category will be
preserved (not lumped together). Positive ``prop`` preserves
categories whose proportion of values is *more* than ``prop``.
Negative ``prop`` preserves categories whose proportion of
values is *less* than ``prop``.
Lumping happens on condition that the lumped category "other"
will have the smallest number of items.
You should only specify one of ``n`` or ``prop``
w : list[int|float] (optional)
Weights for the frequency of each value. It should be the same
length as ``c``.
other_category : object (default: 'other')
Value used for the 'other' values. It is placed at
the end of the categories.
ties_method : {'min', 'max', 'average', 'first', 'dense'} (default: min)
How to treat categories that occur the same number of times
(i.e. ties):
* min: lowest rank in the group
* max: highest rank in the group
* average: average rank of the group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups.
Examples
--------
>>> cat_lump(list('abbccc'))
['other', 'b', 'b', 'c', 'c', 'c']
Categories (3, object): ['b', 'c', 'other']
When the least categories put together are not less than the next
smallest group.
>>> cat_lump(list('abcddd'))
['a', 'b', 'c', 'd', 'd', 'd']
Categories (4, object): ['a', 'b', 'c', 'd']
>>> cat_lump(list('abcdddd'))
['other', 'other', 'other', 'd', 'd', 'd', 'd']
Categories (2, object): ['d', 'other']
>>> c = pd.Categorical(list('abccdd'))
>>> cat_lump(c, n=1)
['other', 'other', 'c', 'c', 'd', 'd']
Categories (3, object): ['c', 'd', 'other']
>>> cat_lump(c, n=2)
['other', 'other', 'c', 'c', 'd', 'd']
Categories (3, object): ['c', 'd', 'other']
``n`` Least common categories
>>> cat_lump(c, n=-2)
['a', 'b', 'other', 'other', 'other', 'other']
Categories (3, object): ['a', 'b', 'other']
There are fewer than ``n`` categories that are the most/least common.
>>> cat_lump(c, n=3)
['a', 'b', 'c', 'c', 'd', 'd']
Categories (4, object): ['a', 'b', 'c', 'd']
>>> cat_lump(c, n=-3)
['a', 'b', 'c', 'c', 'd', 'd']
Categories (4, object): ['a', 'b', 'c', 'd']
By proportions, categories that make up *more* than ``prop`` fraction
of the items.
>>> cat_lump(c, prop=1/3.01)
['other', 'other', 'c', 'c', 'd', 'd']
Categories (3, object): ['c', 'd', 'other']
>>> cat_lump(c, prop=-1/3.01)
['a', 'b', 'other', 'other', 'other', 'other']
Categories (3, object): ['a', 'b', 'other']
>>> cat_lump(c, prop=1/2)
['other', 'other', 'other', 'other', 'other', 'other']
Categories (1, object): ['other']
Order of categoricals is maintained
>>> c = pd.Categorical(
... list('abccdd'),
... categories=list('adcb'),
... ordered=True
... )
>>> cat_lump(c, n=2)
['other', 'other', 'c', 'c', 'd', 'd']
Categories (3, object): ['d' < 'c' < 'other']
**Weighted lumping**
>>> c = list('abcd')
>>> weights = [3, 2, 1, 1]
>>> cat_lump(c, n=2) # No lumping
['a', 'b', 'c', 'd']
Categories (4, object): ['a', 'b', 'c', 'd']
>>> cat_lump(c, n=2, w=weights)
['a', 'b', 'other', 'other']
Categories (3, object): ['a', 'b', 'other']
"""
if n is not None:
return cat_lump_n(c, n, w, other_category, ties_method)
elif prop is not None:
return cat_lump_prop(c, prop, w, other_category)
else:
return cat_lump_lowfreq(c, other_category)
def cat_lump_n(
c,
n,
w=None,
other_category='other',
ties_method='min'
):
"""
Lump together most/least common n categories
Parameters
----------
c : list-like
Values that will make up the categorical.
n : int
Number of most/least common values to preserve (not lumped
together). Positive ``n`` preserves the most common,
negative ``n`` preserves the least common.
Lumping happens on condition that the lumped category "other"
will have the smallest number of items.
You should only specify one of ``n`` or ``prop``
w : list[int|float] (optional)
Weights for the frequency of each value. It should be the same
length as ``c``.
other_category : object (default: 'other')
Value used for the 'other' values. It is placed at
the end of the categories.
ties_method : {'min', 'max', 'average', 'first', 'dense'} (default: min)
How to treat categories that occur the same number of times
(i.e. ties):
* min: lowest rank in the group
* max: highest rank in the group
* average: average rank of the group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups.
Examples
--------
>>> c = pd.Categorical(list('abccdd'))
>>> cat_lump_n(c, 1)
['other', 'other', 'c', 'c', 'd', 'd']
Categories (3, object): ['c', 'd', 'other']
>>> cat_lump_n(c, 2)
['other', 'other', 'c', 'c', 'd', 'd']
Categories (3, object): ['c', 'd', 'other']
``n`` Least common categories
>>> cat_lump_n(c, -2)
['a', 'b', 'other', 'other', 'other', 'other']
Categories (3, object): ['a', 'b', 'other']
There are fewer than ``n`` categories that are the most/least common.
>>> cat_lump_n(c, 3)
['a', 'b', 'c', 'c', 'd', 'd']
Categories (4, object): ['a', 'b', 'c', 'd']
>>> cat_lump_n(c, -3)
['a', 'b', 'c', 'c', 'd', 'd']
Categories (4, object): ['a', 'b', 'c', 'd']
Order of categoricals is maintained
>>> c = pd.Categorical(
... list('abccdd'),
... categories=list('adcb'),
... ordered=True
... )
>>> cat_lump_n(c, 2)
['other', 'other', 'c', 'c', 'd', 'd']
Categories (3, object): ['d' < 'c' < 'other']
**Weighted lumping**
>>> c = list('abcd')
>>> weights = [3, 2, 1, 1]
>>> cat_lump_n(c, n=2) # No lumping
['a', 'b', 'c', 'd']
Categories (4, object): ['a', 'b', 'c', 'd']
>>> cat_lump_n(c, n=2, w=weights)
['a', 'b', 'other', 'other']
Categories (3, object): ['a', 'b', 'other']
"""
c = as_categorical(c)
if len(c) == 0:
return c
if w is None:
counts = c.value_counts().sort_values(ascending=False)
else:
counts = (
pd.Series(w)
.groupby(c)
.apply(np.sum)
.sort_values(ascending=False)
)
if n < 0:
rank = counts.rank(method=ties_method)
n = -n
else:
rank = (-counts).rank(method=ties_method)
# Less than n categories outside the lumping,
if not (rank > n).any():
return c
lump_it = zip(rank.index, rank > n)
return _lump(lump_it, c, other_category)
def cat_lump_prop(
c,
prop,
w=None,
other_category='other',
):
"""
Lump together least or most common categories by proportion
Parameters
----------
c : list-like
Values that will make up the categorical.
prop : float
Proportion above/below which the values of a category will be
preserved (not lumped together). Positive ``prop`` preserves
categories whose proportion of values is *more* than ``prop``.
Negative ``prop`` preserves categories whose proportion of
values is *less* than ``prop``.
Lumping happens on condition that the lumped category "other"
will have the smallest number of items.
You should only specify one of ``n`` or ``prop``
w : list[int|float] (optional)
Weights for the frequency of each value. It should be the same
length as ``c``.
other_category : object (default: 'other')
Value used for the 'other' values. It is placed at
the end of the categories.
Examples
--------
By proportions, categories that make up *more* than ``prop`` fraction
of the items.
>>> c = pd.Categorical(list('abccdd'))
>>> cat_lump_prop(c, 1/3.01)
['other', 'other', 'c', 'c', 'd', 'd']
Categories (3, object): ['c', 'd', 'other']
>>> cat_lump_prop(c, -1/3.01)
['a', 'b', 'other', 'other', 'other', 'other']
Categories (3, object): ['a', 'b', 'other']
>>> cat_lump_prop(c, 1/2)
['other', 'other', 'other', 'other', 'other', 'other']
Categories (1, object): ['other']
"""
c = as_categorical(c)
if len(c) == 0:
return c
if w is None:
counts = c.value_counts().sort_values(ascending=False)
total = len(c)
else:
counts = (
pd.Series(w)
.groupby(c)
.apply(np.sum)
.sort_values(ascending=False)
)
total = counts.sum()
# For each category findout whether to lump it or keep it
# Create a generator of the form ((cat, lump), ...)
props = counts / total
if prop < 0:
if not (props > -prop).any():
# No proportion more than target, so no lumping
# the most common
return c
else:
lump_it = zip(props.index, props > -prop)
else:
if not (props <= prop).any():
# No proportion less than target, so no lumping
# the least common
return c
else:
lump_it = zip(props.index, props <= prop)
return _lump(lump_it, c, other_category)
def cat_lump_lowfreq(
c,
other_category='other',
):
"""
Lump together least categories
Ensures that the "other" category is still the smallest.
Parameters
----------
c : list-like
Values that will make up the categorical.
other_category : object (default: 'other')
Value used for the 'other' values. It is placed at
the end of the categories.
Examples
--------
>>> cat_lump_lowfreq(list('abbccc'))
['other', 'b', 'b', 'c', 'c', 'c']
Categories (3, object): ['b', 'c', 'other']
When the least categories put together are not less than the next
smallest group.
>>> cat_lump_lowfreq(list('abcddd'))
['a', 'b', 'c', 'd', 'd', 'd']
Categories (4, object): ['a', 'b', 'c', 'd']
>>> cat_lump_lowfreq(list('abcdddd'))
['other', 'other', 'other', 'd', 'd', 'd', 'd']
Categories (2, object): ['d', 'other']
"""
c = as_categorical(c)
if len(c) == 0:
return c
# For each category findout whether to lump it or keep it
# Create a generator of the form ((cat, lump), ...)
counts = c.value_counts().sort_values(ascending=False)
if len(counts) == 1:
return c
unique_counts = pd.unique(counts)
smallest = unique_counts[-1]
next_smallest = unique_counts[-2]
smallest_counts = counts[counts == smallest]
smallest_total = smallest_counts.sum()
smallest_cats = smallest_counts.index
if not smallest_total < next_smallest:
return c
lump_it = (
(cat, True) if cat in smallest_cats else (cat, False)
for cat in counts.index
)
return _lump(lump_it, c, other_category)
def cat_lump_min(
c,
min,
w=None,
other_category='other',
):
"""
Lump catogeries, preserving those that appear min number of times
Parameters
----------
c : list-like
Values that will make up the categorical.
min : int
Minum number of times a category must be represented to be
preserved.
w : list[int|float] (optional)
Weights for the frequency of each value. It should be the same
length as ``c``.
other_category : object (default: 'other')
Value used for the 'other' values. It is placed at
the end of the categories.
Examples
--------
>>> c = list('abccdd')
>>> cat_lump_min(c, min=1)
['a', 'b', 'c', 'c', 'd', 'd']
Categories (4, object): ['a', 'b', 'c', 'd']
>>> cat_lump_min(c, min=2)
['other', 'other', 'c', 'c', 'd', 'd']
Categories (3, object): ['c', 'd', 'other']
**Weighted Lumping**
>>> weights = [2, 2, .5, .5, 1, 1]
>>> cat_lump_min(c, min=2, w=weights)
['a', 'b', 'other', 'other', 'd', 'd']
Categories (4, object): ['a', 'b', 'd', 'other']
Unlike :func:`~plydata.cat_tools.cat_lump`, :func:`cat_lump_min`
can lump together and create a category larger than the preserved
categories.
>>> c = list('abxyzccdd')
>>> cat_lump_min(c, min=2)
['other', 'other', 'other', 'other', 'other', 'c', 'c', 'd', 'd']
Categories (3, object): ['c', 'd', 'other']
"""
c = as_categorical(c)
if len(c) == 0:
return c
if w is None:
counts = c.value_counts().sort_values(ascending=False)
else:
counts = (
pd.Series(w)
.groupby(c)
.apply(np.sum)
.sort_values(ascending=False)
)
if (counts >= min).all():
return c
lookup = {
cat: cat if freq >= min else other_category
for cat, freq in counts.items()
}
new_cats = (
c.categories
.intersection(lookup.values())
.insert(len(c), other_category)
)
c = pd.Categorical(
[lookup[value] for value in c],
categories=new_cats,
ordered=c.ordered
)
return c
def cat_rename(c, mapping=None, **kwargs):
"""
Change/rename categories manually
Parameters
----------
c : list-like
Values that will make up the categorical.
mapping : dict (optional)
Mapping of the form ``{old_name: new_name}`` for how to rename
the categories. Setting a value to ``None`` removes the category.
This arguments is useful if the old names are not valid
python parameters. Otherwise, ``kwargs`` can be used.
**kwargs : dict
Mapping to rename categories. Setting a value to ``None`` removes
the category.
Examples
--------
>>> c = list('abcd')
>>> cat_rename(c, a='A')
['A', 'b', 'c', 'd']
Categories (4, object): ['A', 'b', 'c', 'd']
>>> c = pd.Categorical(
... list('abcd'),
... categories=list('bacd'),
... ordered=True
... )
>>> cat_rename(c, b='B', d='D')
['a', 'B', 'c', 'D']
Categories (4, object): ['B' < 'a' < 'c' < 'D']
Remove categories by setting them to ``None``.
>>> cat_rename(c, b='B', d=None)
['a', 'B', 'c']
Categories (3, object): ['B' < 'a' < 'c']
"""
c = as_categorical(c)
if mapping is not None and len(kwargs):
raise ValueError("Use only one of `new` or the ``kwargs``.")
lookup = mapping or kwargs
if not lookup:
return c
# Remove categories set to None
remove = [
old
for old, new in lookup.items()
if new is None
]
if remove:
for cat in remove:
del lookup[cat]
c = c.remove_categories(remove).dropna()
# Separately change values (inplace) and the categories (using an
# array) old to the new names. Then reconcile the two lists.
categories = c.categories.to_numpy().copy()
c.add_categories(
pd.Index(lookup.values()).difference(c.categories),
inplace=True
)
for old, new in lookup.items():
if old not in c.categories:
raise IndexError("Unknown category '{}'.".format(old))
c[c == old] = new
categories[categories == old] = new
new_categories = pd.unique(categories)
c.remove_unused_categories(inplace=True)
c.set_categories(new_categories, inplace=True)
return c
def cat_relabel(c, func=None, *args, **kwargs):
"""
Change/rename categories and collapse as necessary
Parameters
----------
c : list-like
Values that will make up the categorical.
func : callable
Function to create the new name. The first argument to
the function will be a category to be renamed.
*args : tuple
Positional arguments passed to ``func``.
*kwargs : dict
Keyword arguments passed to ``func``.
Examples
--------
>>> c = list('abcde')
>>> cat_relabel(c, str.upper)
['A', 'B', 'C', 'D', 'E']
Categories (5, object): ['A', 'B', 'C', 'D', 'E']
>>> c = pd.Categorical([0, 1, 2, 1, 1, 0])
>>> def func(x):
... if x == 0:
... return 'low'
... elif x == 1:
... return 'mid'
... elif x == 2:
... return 'high'
>>> cat_relabel(c, func)
['low', 'mid', 'high', 'mid', 'mid', 'low']
Categories (3, object): ['low', 'mid', 'high']
When the function yields the same output for 2 or more
different categories, those categories are collapsed.
>>> def first(x):
... return x[0]
>>> c = pd.Categorical(['aA', 'bB', 'aC', 'dD'],
... categories=['bB', 'aA', 'dD', 'aC'],
... ordered=True
... )
>>> cat_relabel(c, first)
['a', 'b', 'a', 'd']
Categories (3, object): ['b' < 'a' < 'd']
"""
c = as_categorical(c)
new_categories = [func(x, *args, **kwargs) for x in c.categories]
new_categories_uniq = pd.unique(new_categories)
if len(new_categories_uniq) < len(c.categories):
# Collapse
lookup = dict(zip(c.categories, new_categories))
c = pd.Categorical(
[lookup[value] for value in c],
categories=new_categories_uniq,
ordered=c.ordered
)
else:
c.categories = new_categories
return c
def cat_expand(c, *args):
"""
Add additional categories to a categorical
Parameters
----------
c : list-like
Values that will make up the categorical.
*args : tuple
Categories to add.
Examples
--------
>>> cat_expand(list('abc'), 'd', 'e')
['a', 'b', 'c']
Categories (5, object): ['a', 'b', 'c', 'd', 'e']
>>> c = pd.Categorical(list('abcd'), ordered=True)
>>> cat_expand(c, 'e', 'f')
['a', 'b', 'c', 'd']
Categories (6, object): ['a' < 'b' < 'c' < 'd' < 'e' < 'f']
"""
c = as_categorical(c)
c.add_categories(
pd.Index(args).difference(c.categories),
inplace=True
)
return c
def cat_explicit_na(c, na_category='(missing)'):
"""
Give missing values an explicity category
Parameters
----------
c : list-like
Values that will make up the categorical.
na_category : object (default: '(missing)')
Category for missing values
Examples
--------
>>> c = pd.Categorical(
... ['a', 'b', None, 'c', None, 'd', 'd'],
... ordered=True
... )
>>> c
['a', 'b', NaN, 'c', NaN, 'd', 'd']
Categories (4, object): ['a' < 'b' < 'c' < 'd']
>>> cat_explicit_na(c)
['a', 'b', '(missing)', 'c', '(missing)', 'd', 'd']
Categories (5, object): ['a' < 'b' < 'c' < 'd' < '(missing)']
"""
c = as_categorical(c)
bool_idx = pd.isnull(c)
if any(bool_idx):
c.add_categories([na_category], inplace=True)
c[bool_idx] = na_category
return c
def cat_remove_unused(c, only=None):
"""
Remove unused categories
Parameters
----------
c : list-like
Values that will make up the categorical.
only : list-like (optional)
The categories to remove *if* they are empty. If not given,
all unused categories are dropped.
Examples
--------
>>> c = pd.Categorical(list('abcdd'), categories=list('bacdefg'))
>>> c
['a', 'b', 'c', 'd', 'd']
Categories (7, object): ['b', 'a', 'c', 'd', 'e', 'f', 'g']
>>> cat_remove_unused(c)
['a', 'b', 'c', 'd', 'd']
Categories (4, object): ['b', 'a', 'c', 'd']
>>> cat_remove_unused(c, only=['a', 'e', 'g'])
['a', 'b', 'c', 'd', 'd']
Categories (5, object): ['b', 'a', 'c', 'd', 'f']
"""
if not pdtypes.is_categorical_dtype(c):
# All categories are used
c = pd.Categorical(c)
return c
else:
c = c.copy()
if only is None:
only = c.categories
used_idx = pd.unique(c.codes)
used_categories = c.categories[used_idx]
c = c.remove_categories(
c.categories
.difference(used_categories)
.intersection(only)
)
return c
def cat_unify(cs, categories=None):
"""
Unify (union of all) the categories in a list of categoricals
Parameters
----------
cs : list-like
Categoricals
categories : list-like
Extra categories to apply to very categorical.
Examples
--------
>>> c1 = pd.Categorical(['a', 'b'], categories=list('abc'))
>>> c2 = pd.Categorical(['d', 'e'], categories=list('edf'))
>>> c1_new, c2_new = cat_unify([c1, c2])
>>> c1_new
['a', 'b']
Categories (6, object): ['a', 'b', 'c', 'e', 'd', 'f']
>>> c2_new
['d', 'e']
Categories (6, object): ['a', 'b', 'c', 'e', 'd', 'f']
>>> c1_new, c2_new = cat_unify([c1, c2], categories=['z', 'y'])
>>> c1_new
['a', 'b']
Categories (8, object): ['a', 'b', 'c', 'e', 'd', 'f', 'z', 'y']
>>> c2_new
['d', 'e']
Categories (8, object): ['a', 'b', 'c', 'e', 'd', 'f', 'z', 'y']
"""
cs = [as_categorical(c) for c in cs]
all_cats = list(chain(*(c.categories.to_list() for c in cs)))
if categories is None:
categories = pd.unique(all_cats)
else:
categories = pd.unique(all_cats + categories)
cs = [c.set_categories(categories) for c in cs]
return cs
def cat_concat(*args):
"""
Concatenate categoricals and combine the categories
Parameters
----------
*args : tuple
Categoricals to be concatenated
Examples
--------
>>> c1 = pd.Categorical(['a', 'b'], categories=['b', 'a'])
>>> c2 = pd.Categorical(['d', 'a', 'c'])
>>> cat_concat(c1, c2)
['a', 'b', 'd', 'a', 'c']
Categories (4, object): ['b', 'a', 'c', 'd']
Notes
-----
The resulting category is not ordered.
"""
categories = pd.unique(list(chain(*(
c.categories if pdtypes.is_categorical_dtype(c) else c
for c in args
))))
cs = pd.Categorical(
list(chain(*(c for c in args))),
categories=categories
)
return cs
def cat_zip(*args, sep=':', keep_empty=False):
"""
Create a new categorical (zip style) combined from two or more
Parameters
----------
*args : tuple
Categoricals to be concatenated.
sep : str (default: ':')
Separator for the combined categories.
keep_empty : bool (default: False)
If ``True``, include all combinations of categories
even those without observations.
Examples
--------
>>> c1 = pd.Categorical(list('aba'))
>>> c2 = pd.Categorical(list('122'))
>>> cat_zip(c1, c2)
['a:1', 'b:2', 'a:2']
Categories (3, object): ['a:1', 'a:2', 'b:2']
>>> cat_zip(c1, c2, keep_empty=True)
['a:1', 'b:2', 'a:2']
Categories (4, object): ['a:1', 'a:2', 'b:1', 'b:2']
"""
values = [sep.join(items) for items in zip(*args)]
cs = [
c if | pdtypes.is_categorical_dtype(c) | pandas.api.types.is_categorical_dtype |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# General imports
import numpy as np
import pandas as pd
import sys, gc, time, warnings, pickle, random, psutil
# custom imports
from multiprocessing import Pool # Multiprocess Runs
import warnings
warnings.filterwarnings('ignore')
import os
os.environ["CUDA_VISIBLE_DEVICES"]="0"
import tqdm
from sklearn.preprocessing import OrdinalEncoder, MinMaxScaler #StandardScaler
from keras_models import create_model17EN1EN2emb1, create_model17noEN1EN2, create_model17
import scipy.stats as stats
import json
with open('SETTINGS.json', 'r') as myfile:
datafile=myfile.read()
SETTINGS = json.loads(datafile)
data_path = SETTINGS['RAW_DATA_DIR']
PROCESSED_DATA_DIR = SETTINGS['PROCESSED_DATA_DIR']
MODELS_DIR = SETTINGS['MODELS_DIR']
LGBM_DATASETS_DIR = SETTINGS['LGBM_DATASETS_DIR']
SUBMISSION_DIR = SETTINGS['SUBMISSION_DIR']
###############################################################################
################################# LIGHTGBM ####################################
###############################################################################
########################### Helpers
###############################################################################
## Seeder
# :seed to make all processes deterministic # type: int
def seed_everything(seed=0):
random.seed(seed)
np.random.seed(seed)
## Multiprocess Runs
def df_parallelize_run(func, t_split):
num_cores = np.min([N_CORES,len(t_split)])
pool = Pool(num_cores)
df = pd.concat(pool.map(func, t_split), axis=1)
pool.close()
pool.join()
return df
########################### Helper to load data by store ID
###############################################################################
# Read data
lag_features=[
'sales_lag_28', 'sales_lag_29', 'sales_lag_30', 'sales_lag_31',
'sales_lag_32', 'sales_lag_33', 'sales_lag_34', 'sales_lag_35',
'sales_lag_36', 'sales_lag_37', 'sales_lag_38',
'sales_lag_39', 'sales_lag_40', 'sales_lag_41', 'sales_lag_42',
'rolling_mean_7', 'rolling_mean_14',
'rolling_mean_30', 'rolling_std_30', 'rolling_mean_60',
'rolling_std_60', 'rolling_mean_180', 'rolling_std_180',
'rolling_mean_tmp_1_7', 'rolling_mean_tmp_1_14',
'rolling_mean_tmp_1_30', 'rolling_mean_tmp_1_60',
'rolling_mean_tmp_7_7', 'rolling_mean_tmp_7_14',
'rolling_mean_tmp_7_30', 'rolling_mean_tmp_7_60',
'rolling_mean_tmp_14_7', 'rolling_mean_tmp_14_14',
'rolling_mean_tmp_14_30', 'rolling_mean_tmp_14_60'
]
def get_data_by_store(store):
# Read and contact basic feature
df = pd.concat([pd.read_pickle(BASE),
pd.read_pickle(PRICE).iloc[:,2:],
pd.read_pickle(CALENDAR).iloc[:,2:]],
axis=1)
# Leave only relevant store
df = df[df['store_id']==store]
df2 = pd.read_pickle(MEAN_ENC)[mean_features]
df2 = df2[df2.index.isin(df.index)]
# df3 = pd.read_pickle(LAGS).iloc[:,3:]
df3 = pd.read_pickle(LAGS)[lag_features]
df3 = df3[df3.index.isin(df.index)]
df = | pd.concat([df, df2], axis=1) | pandas.concat |
import matplotlib.pyplot as plt
import numpy as np
import gdxpds
import pandas as pd
import glob
##files = glob.glob('../results/for-post/results_0*.gdx')
#files = [
# #'../results/Sensitivities/Int2x/results_Cint.gdx',
# #'../results/Sensitivities/Int2x/results_Dint.gdx',
# #'../results/Sensitivities/Int2x/results_Gint.gdx',
# #'../results/Sensitivities/Int2x/results_Hint.gdx'
# '../results/Sensitivities/Oil and gas prices/results_Aoil.gdx',
# '../results/Sensitivities/Oil and gas prices/results_Boil.gdx',
# '../results/Sensitivities/Oil and gas prices/results_Coil.gdx',
# '../results/Sensitivities/Oil and gas prices/results_Doil.gdx',
# '../results/Sensitivities/Oil and gas prices/results_Eoil.gdx',
# '../results/Sensitivities/Oil and gas prices/results_Foil.gdx',
# '../results/Sensitivities/Oil and gas prices/results_Goil.gdx',
# '../results/Sensitivities/Oil and gas prices/results_Hoil.gdx'
#]
print('-- Reading in files...')
files = [
'../results/MainScenarios/results_A.gdx',
'../results/MainScenarios/results_B.gdx',
'../results/MainScenarios/results_C.gdx',
'../results/MainScenarios/results_D.gdx',
'../results/MainScenarios/results_E.gdx',
'../results/MainScenarios/results_F.gdx',
'../results/MainScenarios/results_G.gdx',
'../results/MainScenarios/results_H.gdx',
#
'../results/Sensitivities/results_B30.gdx',
'../results/Sensitivities/results_D30.gdx',
'../results/Sensitivities/results_F30.gdx',
'../results/Sensitivities/results_H30.gdx',
'../results/Sensitivities/results_B90.gdx',
'../results/Sensitivities/results_D90.gdx',
'../results/Sensitivities/results_F90.gdx',
'../results/Sensitivities/results_H90.gdx',
#
'../results/Sensitivities//results_Cint.gdx',
'../results/Sensitivities//results_Dint.gdx',
'../results/Sensitivities//results_Gint.gdx',
'../results/Sensitivities//results_Hint.gdx',
#
'../results/Sensitivities/results_Aoil.gdx',
'../results/Sensitivities/results_Boil.gdx',
'../results/Sensitivities/results_Coil.gdx',
'../results/Sensitivities/results_Doil.gdx',
'../results/Sensitivities/results_Eoil.gdx',
'../results/Sensitivities/results_Foil.gdx',
'../results/Sensitivities/results_Goil.gdx',
'../results/Sensitivities/results_Hoil.gdx',
#
'../results/Sensitivities/results_Are.gdx',
'../results/Sensitivities/results_Bre.gdx',
'../results/Sensitivities/results_Cre.gdx',
'../results/Sensitivities/results_Dre.gdx',
'../results/Sensitivities/results_Ere.gdx',
'../results/Sensitivities/results_Fre.gdx',
'../results/Sensitivities/results_Gre.gdx',
'../results/Sensitivities/results_Hre.gdx',
]
#scenarios = ['A','B','C','D','E','F','G','H']
#scenarios = ['B30','B90','D30','D90','F30','F90','H30','H90']
#scenarios = ['Cint','Dint','Gint','Hint']
#scenarios = ['Are','Bre','Cre','Dre','Ere','Fre','Gre','Hre']
#scenarios = ['Aoil','Boil','Coil','Doil','Eoil','Foil','Goil','Hoil']
scenarios = [
'A','B','C','D','E','F','G','H',
'B30','B90','D30','D90','F30','F90','H30','H90',
'Cint','Dint','Gint','Hint',
'Are','Bre','Cre','Dre','Ere','Fre','Gre','Hre',
'Aoil','Boil','Coil','Doil','Eoil','Foil','Goil','Hoil'
]
countries = ['bah','kuw','omn','qat','ksa','uae']
_fuel_cons_list = []
_ELWAbld_xls_list = []
_ELWAcap_list = []
_ELWAsupELp_xls_list = []
_WAcapSingle_list = []
_RWELtrade_tot_list = []
_RWEMallquant_list = []
_Invest_list = []
_ELtrans_list = []
_RWELtrade_tot_dict ={}
years = {'t01':'2015',
't02':'2016',
't03':'2017',
't04':'2018',
't05':'2019',
't06':'2020',
't07':'2021',
't08':'2022',
't09':'2023',
't10':'2024',
't11':'2025',
't12':'2026',
't13':'2027',
't14':'2028',
't15':'2029',
't16':'2030'}
fuels = {'methane':'NG',
'arablight':'oil'}
print('-- Creating dataframes...')
for filename, scenario in zip(files, scenarios):
_dataframes = gdxpds.to_dataframes(filename)
# fuel consumption
_fuel_cons = _dataframes['ELWAfcon_xls']
_fuel_cons.columns = ['c','trun','f','value']
_fuel_cons['scenario'] = scenario
#_fuel_cons = _fuel_cons.set_index(['scenario','c','trun','f'])
#_fuel_cons = _fuel_cons.pivot_table(values='Value',index=['scenario','c'],columns='f')
_fuel_cons = _fuel_cons.replace(years)
_fuel_cons = _fuel_cons.replace(fuels)
_fuel_cons['value'] = _fuel_cons['value']/1000
_fuel_cons_list.append(_fuel_cons)
# Technology builds
_ELWAbld_xls = _dataframes['ELWAbld_xls']
_ELWAbld_xls.columns = ['c','trun','ELp','value']
_ELWAbld_xls['scenario'] = scenario
_ELWAbld_xls = _ELWAbld_xls.replace(years)
_ELWAbld_xls = _ELWAbld_xls.replace(fuels)
_ELWAbld_xls_list.append(_ELWAbld_xls)
# Installed Capacity
_ELWAcap = _dataframes['ELWAcap']
_ELWAcap.columns = ['trun','c','ELp','sector','value']
_ELWAcap = _ELWAcap.drop(columns='sector')
_ELWAcap['scenario'] = scenario
_ELWAcap = _ELWAcap.replace(years)
_ELWAcap = _ELWAcap.replace(fuels)
#_ELWAcap = _ELWAcap.pivot_table(values='value',index=['scenario','c','trun'],columns='ELp')
_ELWAcap_list.append(_ELWAcap)
## Installed Capacity that works
#_ELWAcap = _dataframes['ELWAcap_xls']
#_ELWAcap.columns = ['c','trun','ELp','value']
#_ELWAcap['scenario'] = scenario
#_ELWAcap = _ELWAcap.replace(years)
#_ELWAcap = _ELWAcap.replace(fuels)
##_ELWAcap = _ELWAcap.pivot_table(values='value',index=['scenario','c','trun'],columns='ELp')
#_ELWAcap_list.append(_ELWAcap)
# Electricity Generation
_ELWAsupELp_xls = _dataframes['ELWAsupELp_xls']
_ELWAsupELp_xls.columns = ['trun','c','ELp','value']
_ELWAsupELp_xls['scenario'] = scenario
_ELWAsupELp_xls = _ELWAsupELp_xls.replace(years)
_ELWAsupELp_xls = _ELWAsupELp_xls.replace(fuels)
#_ELWAsupELp_xls = _ELWAsupELp_xls.pivot_table(values='value',index=['scenario','c','trun'],columns='ELp')
_ELWAsupELp_xls_list.append(_ELWAsupELp_xls)
# Water capacity
_WAcapSingle = _dataframes['WAcapSingle']
_WAcapSingle.columns = ['trun','c','WAp','sector','value']
_WAcapSingle = _WAcapSingle.drop(columns='sector')
_WAcapSingle['scenario'] = scenario
_WAcapSingle = _WAcapSingle.replace(years)
_WAcapSingle = _WAcapSingle.replace(fuels)
_WAcapSingle_list.append(_WAcapSingle)
# Electrcitiy trade
_RWELtrade_tot = _dataframes['RWELtrade_tot']
_RWELtrade_tot.columns = ['trun','c','cc','value']
_RWELtrade_tot['scenario'] = scenario
_RWELtrade_tot = _RWELtrade_tot.replace(years)
#_RWELtrade_tot = _RWELtrade_tot.set_index(['scenario','c','trun','cc'])
#_RWELtrade_tot = _RWELtrade_tot.pivot_table(values='value',index=['scenario','c'],columns='cc')
#_RWELtrade_tot_dict[scenario] = _RWELtrade_tot
_RWELtrade_tot_list.append(_RWELtrade_tot)
# CO2 emissions
_RWEMallquant = _dataframes['RWEMallquant']
_RWEMallquant.columns = ['trun','c','value']
_RWEMallquant['scenario'] = scenario
_RWEMallquant = _RWEMallquant.replace(years)
_RWEMallquant_list.append(_RWEMallquant)
# Investments
_Invest = _dataframes['Invest']
_Invest.columns = ['trun','tech','sector','value']
_Invest['scenario'] = scenario
_Invest = _Invest.replace(years)
_Invest['value'] = _Invest['value']/1000
_Invest_list.append(_Invest)
_ELtrans = _dataframes['RWELtrans_tot']
_ELtrans.columns = ['trun','r','c','rr','cc','value']
_ELtrans['scenario'] = scenario
_ELtrans = _ELtrans.replace(years)
_ELtrans_list.append(_ELtrans)
fuel_cons_df = pd.concat(_fuel_cons_list)
#fuel_cons_df.info()
ELWAbld_xls_df = pd.concat(_ELWAbld_xls_list)
#ELWAbld_xls_df.info()
_ELWAcap_df = pd.concat(_ELWAcap_list)
#_ELWAcap_df.info()
_ELWAsupELp_xls_df = pd.concat(_ELWAsupELp_xls_list)
#_ELWAsupELp_xls_df.info()
_WAcapSingle_df = pd.concat(_WAcapSingle_list)
_RWELtrade_tot_df = | pd.concat(_RWELtrade_tot_list) | pandas.concat |
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
from pandas import DataFrame, Series
import pandas as pd
import numpy as np
data = {'state': ['Ohio', 'Ohio', 'Ohio', 'Nevada', 'Nevada'],
'year': [2000, 2001, 2002, 2001, 2002],
'pop': [1.5, 1.7, 3.6, 2.4, 2.9]}
frame = DataFrame(data, columns=['year', 'state', 'pop'])
frame2 = DataFrame(data, columns=['year', 'state', 'pop'], index=['one', 'two', 'three', 'four', 'five'])
# frame2['debt'] = np.arange(5.)
val = Series([-1.2, -1.5, -1.7], index=['two', 'four', 'five'])
frame2['debt'] = val
frame2['eastern'] = frame2['state'] == 'Ohio'
del frame2['eastern']
# frame2.drop('eastern')
pop = {'Nevada': {2001: 2.4, 2002: 2.9},
'Ohio': {2000: 1.5, 2001: 1.7, 2002: 3.6}}
frame3 = | DataFrame(pop) | pandas.DataFrame |
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
from __future__ import absolute_import
import mock
import unittest
import pandas
import datalab.stackdriver.commands._monitoring as monitoring_commands
PROJECT = 'my-project'
class TestCases(unittest.TestCase):
@mock.patch('datalab.stackdriver.commands._monitoring._render_dataframe')
@mock.patch('datalab.stackdriver.monitoring.MetricDescriptors')
def test_list_metric_descriptors(self, mock_metric_descriptors, mock_render_dataframe):
METRIC_TYPES = ['compute.googleapis.com/instances/cpu/utilization',
'compute.googleapis.com/instances/cpu/usage_time']
DATAFRAME = pandas.DataFrame(METRIC_TYPES, columns=['Metric type'])
PATTERN = 'compute*cpu*'
mock_metric_class = mock_metric_descriptors.return_value
mock_metric_class.as_dataframe.return_value = DATAFRAME
monitoring_commands._list_metric_descriptors(
{'project': PROJECT, 'type': PATTERN}, None)
mock_metric_descriptors.assert_called_once_with(project_id=PROJECT)
mock_metric_class.as_dataframe.assert_called_once_with(pattern=PATTERN)
mock_render_dataframe.assert_called_once_with(DATAFRAME)
@mock.patch('datalab.stackdriver.commands._monitoring._render_dataframe')
@mock.patch('datalab.stackdriver.monitoring.ResourceDescriptors')
def test_list_resource_descriptors(self, mock_resource_descriptors, mock_render_dataframe):
RESOURCE_TYPES = ['gce_instance', 'aws_ec2_instance']
DATAFRAME = pandas.DataFrame(RESOURCE_TYPES, columns=['Resource type'])
PATTERN = '*instance*'
mock_resource_class = mock_resource_descriptors.return_value
mock_resource_class.as_dataframe.return_value = DATAFRAME
monitoring_commands._list_resource_descriptors(
{'project': PROJECT, 'type': PATTERN}, None)
mock_resource_descriptors.assert_called_once_with(project_id=PROJECT)
mock_resource_class.as_dataframe.assert_called_once_with(pattern=PATTERN)
mock_render_dataframe.assert_called_once_with(DATAFRAME)
@mock.patch('datalab.stackdriver.commands._monitoring._render_dataframe')
@mock.patch('datalab.stackdriver.monitoring.Groups')
def test_list_groups(self, mock_groups, mock_render_dataframe):
GROUP_IDS = ['GROUP-205', 'GROUP-101']
DATAFRAME = | pandas.DataFrame(GROUP_IDS, columns=['Group ID']) | pandas.DataFrame |
from configparser import ConfigParser
import os
import pandas as pd
import shutil
import time
import zipsender_utils
from zipsender_utils import add_path_script_folders
list_folders_name = ['zipind', 'Telegram_filesender']
add_path_script_folders(list_folders_name)
import telegram_filesender, utils_filesender, zipind_utils
import api_telegram
def gen_data_frame(path_folder):
list_data = []
for root, dirs, files in os.walk(path_folder):
for file in files:
d = {}
file_path = os.path.join(root, file)
d['file_output'] = file_path
d['description'] = file
list_data.append(d)
df = pd.DataFrame(list_data)
list_columns = ['file_output', 'description']
df = df.reindex(list_columns, axis=1)
return df
def create_description_report(folder_toupload, folder_project_name):
path_dir_project = os.path.join(folder_toupload, folder_project_name)
path_dir_project_output = os.path.join(path_dir_project, 'output')
path_description = os.path.join(path_dir_project, 'descriptions.xlsx')
df = gen_data_frame(os.path.abspath(path_dir_project_output))
df.to_excel(path_description, index=False)
folder_path_description = os.path.join(folder_toupload,
folder_project_name)
return folder_path_description
def get_personalize_description(list_dict_description,
list_str_part,
custom_description):
first_file_description = list_dict_description[0]['description']
# set count_parts
last_file_description = list_dict_description[-1]['description']
count_parts_raw = last_file_description.split('-')[-1]
count_parts_raw2 = int(count_parts_raw.split('.')[0])
count_parts = f'{count_parts_raw2:02}'
# set str_part
if count_parts_raw2 == 1:
str_part = list_str_part[0]
else:
str_part = list_str_part[1]
d_keys = {'count_parts': count_parts,
'str_part': str_part}
template_content = zipsender_utils.get_txt_content(custom_description)
description_bottom = zipsender_utils.compile_template(d_keys,
template_content)
description_personalized = (first_file_description + '\n' +
description_bottom)
return description_personalized
def update_descriptions(list_dict_description,
description_personalized,
log_folder_path,
folder_project_name,
title_log_file_list):
# Add project file structure
folder_project_name = folder_project_name.strip('_')
path_file_log = os.path.join(log_folder_path, folder_project_name + '.txt')
log_description = f'{folder_project_name}\n{title_log_file_list}'
dict_log = {'file_output': path_file_log, 'description': log_description}
list_dict_log = [dict_log]
# personalize first file description
list_dict_description[0]['description'] = description_personalized
list_dict_description_updated = list_dict_log + list_dict_description
return list_dict_description_updated
def get_list_dict_sent_doc(return_send_files):
list_dict_sent_doc = []
for message_file in return_send_files:
dict_sent_doc = {}
dict_sent_doc['caption'] = message_file['caption']
message_id = int(message_file['message_id'])
dict_sent_doc['message_id'] = message_id
chat_id = int(message_file['chat']['id'])
return_message_data = api_telegram.get_messages(chat_id, [message_id])
dict_sent_doc['file_id'] = \
return_message_data[0]['document']["file_id"]
list_dict_sent_doc.append(dict_sent_doc)
return list_dict_sent_doc
def get_list_message_id(list_dict_sent_doc):
list_message_id = []
for dict_sent_doc in list_dict_sent_doc:
message_id = dict_sent_doc['message_id']
list_message_id.append(message_id)
return list_message_id
def send_files_mode_album_doc(dir_project_name,
list_dict_description,
chat_id,
chat_id_cache,
log_project_sent_folder_path):
return_send_files = api_telegram.send_files(list_dict_description,
chat_id_cache)
list_dict_sent_doc = get_list_dict_sent_doc(return_send_files)
# save 'sent files to cache group' metadata
name_file_sent_1 = (dir_project_name + '-report_sent_1' + '.csv')
path_file_sent_1 = os.path.join(log_project_sent_folder_path,
name_file_sent_1)
df_files_metadata = | pd.DataFrame(list_dict_sent_doc) | pandas.DataFrame |
import pandas as pd
import numpy as np
from datetime import datetime, date, timedelta
import requests
import matplotlib.pyplot as plt
from countries import countries_dict
import matplotlib.dates as mdates
import matplotlib.ticker as ticker
import difflib
def countries ():
date_url = date.today().strftime('%m-%d-%Y')
inputValid, countries_valid = False, False
countries_in_scope = []
while inputValid == False:
url = r'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/' + date_url + '.csv'
feedback = requests.get(url)
if feedback.status_code != 200:
date_url = (date.today() - timedelta(days=1)).strftime('%m-%d-%Y')
continue
else:
inputValid = True
print(countries_dict.keys())
#countries in scope
while countries_valid == False:
country = input("Add a country or type 'continue' to move forward: ")
if country == 'continue':
countries_valid = True
else:
if country in countries_dict:
countries_in_scope.append(country)
print(f"{country} have been added.")
continue
else:
print("Invalid input.")
right_country = ''.join(difflib.get_close_matches(country, countries_dict, n = 1))
print(f"Did you mean: {right_country}? {right_country} has been added")
countries_in_scope.append(right_country)
print("\n")
print(f"Countries that will be displayed: {', '.join(countries_in_scope)}")
#Total Cases
df = pd.read_csv(url)
table_country = pd.pivot_table(df, values = ['Confirmed', 'Deaths'], index = 'Country_Region', aggfunc = np.sum, margins = True)
table_country = table_country[table_country.index.isin(countries_in_scope)]
#Daily Cases
daily_cases_url = r'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'
df_daily_cases = pd.read_csv(daily_cases_url)
countries_daily_cases = pd.pivot_table(df_daily_cases, index = 'Country/Region', aggfunc = np.sum, margins=True)
#get the first avilable date (1/22/20)
dates_list = list(df_daily_cases)[4:]
countries_plot_cases = pd.pivot_table(df_daily_cases, values = [dates_list[0]], index = 'Country/Region', aggfunc = np.sum)
#loop through the dates
for i in range(1, len(dates_list)):
append_df = pd.pivot_table(df_daily_cases, values = [dates_list[i]], index = 'Country/Region', aggfunc = np.sum)
countries_plot_cases = pd.concat([countries_plot_cases, append_df], axis=1)
#get the daily increases
countries_plot_daily_cases = pd.pivot_table(df_daily_cases, values = [dates_list[0]], index = 'Country/Region', aggfunc = np.sum)
for i in range(1, len(dates_list)):
countries_plot_daily_cases[dates_list[i]] = countries_plot_cases[dates_list[i]] - countries_plot_cases[dates_list[i - 1]]
countries_plot_cases = countries_plot_cases[countries_plot_cases.index.isin(countries_in_scope)].T
countries_plot_daily_cases = countries_plot_daily_cases[countries_plot_daily_cases.index.isin(countries_in_scope)].T
#Daily Deaths
daily_deaths_url = r'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv'
df_daily_deaths = | pd.read_csv(daily_deaths_url) | pandas.read_csv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.