prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pandas as pd
from texthero import representation
from texthero import preprocessing
from . import PandasTestCase
import doctest
import unittest
import string
"""
Test doctest
"""
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(representation))
return tests
class TestRepresentation(PandasTestCase):
"""
Term Frequency.
"""
def test_term_frequency_single_document(self):
s = pd.Series("a b c c")
s_true = pd.Series([[1, 1, 2]])
self.assertEqual(representation.term_frequency(s), s_true)
def test_term_frequency_multiple_documents(self):
s = pd.Series(["doc_one", "doc_two"])
s_true = pd.Series([[1, 0], [0, 1]])
self.assertEqual(representation.term_frequency(s), s_true)
def test_term_frequency_not_lowercase(self):
s = pd.Series(["one ONE"])
s_true = pd.Series([[1, 1]])
self.assertEqual(representation.term_frequency(s), s_true)
def test_term_frequency_punctuation_are_kept(self):
s = pd.Series(["one !"])
s_true = pd.Series([[1, 1]])
self.assertEqual(representation.term_frequency(s), s_true)
"""
TF-IDF
"""
def test_idf_single_document(self):
s = pd.Series("a")
s_true = pd.Series([[1]])
self.assertEqual(representation.tfidf(s), s_true)
def test_idf_single_not_lowercase(self):
tfidf_single_smooth = 0.7071067811865475 # TODO
s = pd.Series("ONE one")
s_true = pd.Series([[tfidf_single_smooth, tfidf_single_smooth]])
self.assertEqual(representation.tfidf(s), s_true)
"""
Word2Vec
"""
def test_word2vec(self):
s = pd.Series(["today is a beautiful day", "today is not that beautiful"])
df_true = pd.DataFrame(
[[0.0] * 300] * 7,
index=["a", "beautiful", "day", "is", "not", "that", "today"],
)
s = preprocessing.tokenize(s)
df_embedding = representation.word2vec(s, min_count=1, seed=1)
self.assertEqual(type(df_embedding), pd.DataFrame)
self.assertEqual(df_embedding.shape, df_true.shape)
def test_most_similar_simple(self):
s = pd.Series(["one one one"])
s = preprocessing.tokenize(s)
df_embeddings = representation.word2vec(s, min_count=1, seed=1)
to = "one"
most_similar = representation.most_similar(df_embeddings, to)
self.assertEqual(most_similar.shape, (1,))
def test_most_similar_raise_with_series(self):
s_embed =
|
pd.Series({"one": 1})
|
pandas.Series
|
import numpy as np
import pandas as pd
import datetime as dt
import pickle
import bz2
from .analyzer import summarize_returns
DATA_PATH = '../backtest/'
class Portfolio():
"""
Portfolio is the core class for event-driven backtesting. It conducts the
backtesting in the following order:
1. Initialization:
Set the capital base we invest and the securities we
want to trade.
2. Receive the price information with .receive_price():
Insert the new price information of each securities so that the
Portfolio class will calculated and updated the relevant status such
as the portfolio value and position weights.
3. Rebalance with .rebalance():
Depending on the signal, we can choose to change the position
on each securities.
4. Keep position with .keep_position():
If we don't rebalance the portfolio, we need to tell it to keep
current position at the end of the market.
Example
-------
see Vol_MA.ipynb, Vol_MA_test_robustness.ipynb
Parameters
----------
capital: numeric
capital base we put into the porfolio
inception: datetime.datetime
the time when we start backtesting
components: list of str
tikers of securities to trade, such as ['AAPL', 'MSFT', 'AMZN]
name: str
name of the portfolio
is_share_integer: boolean
If true, the shares of securities will be rounded to integers.
"""
def __init__(self, capital, inception, components,
name='portfolio', is_share_integer=False):
# -----------------------------------------------
# initialize parameters
# -----------------------------------------------
self.capital = capital # initial money invested
if isinstance(components, str):
components = [components] # should be list
self.components = components # equities in the portfolio
# self.commission_rate = commission_rate
self.inception = inception
self.component_prices =
|
pd.DataFrame(columns=self.components)
|
pandas.DataFrame
|
import unittest
from unittest import mock
from unittest.mock import MagicMock
import numpy as np
import pandas as pd
from matplotlib.axes import Axes
from matplotlib.figure import Figure
from pandas.util.testing import assert_frame_equal
import tests.test_data as td
from shift_detector.checks.statistical_checks import numerical_statistical_check, categorical_statistical_check
from shift_detector.checks.statistical_checks.categorical_statistical_check import CategoricalStatisticalCheck
from shift_detector.checks.statistical_checks.numerical_statistical_check import NumericalStatisticalCheck
from shift_detector.checks.statistical_checks.text_metadata_statistical_check import TextMetadataStatisticalCheck
from shift_detector.detector import Detector
from shift_detector.precalculations.store import Store
from shift_detector.precalculations.text_metadata import NumCharsMetadata, NumWordsMetadata, \
DistinctWordsRatioMetadata, LanguagePerParagraph, UnknownWordRatioMetadata, StopwordRatioMetadata, LanguageMetadata
from shift_detector.utils.visualization import PlotData
class TestTextMetadataStatisticalCheck(unittest.TestCase):
def setUp(self):
self.poems = td.poems
self.phrases = td.phrases
def test_significant_metadata(self):
pvalues = pd.DataFrame([[0.001, 0.2]], columns=['num_chars', 'distinct_words_ratio'], index=['pvalue'])
result = TextMetadataStatisticalCheck(significance=0.01).significant_metadata_names(pvalues)
self.assertIn('num_chars', result)
self.assertNotIn('distinct_words_ratio', result)
def test_not_significant(self):
df1 = pd.DataFrame.from_dict({'text': self.poems})
df2 = pd.DataFrame.from_dict({'text': list(reversed(self.poems))})
store = Store(df1, df2)
result = TextMetadataStatisticalCheck().run(store)
self.assertEqual(1, len(result.examined_columns))
self.assertEqual(0, len(result.shifted_columns))
self.assertEqual(0, len(result.explanation))
def test_significant(self):
df1 = pd.DataFrame.from_dict({'text': self.poems})
df2 = pd.DataFrame.from_dict({'text': self.phrases})
store = Store(df1, df2)
result = TextMetadataStatisticalCheck([NumCharsMetadata(), NumWordsMetadata(),
DistinctWordsRatioMetadata(), LanguagePerParagraph()]
).run(store)
self.assertEqual(1, len(result.examined_columns))
self.assertEqual(1, len(result.shifted_columns))
self.assertEqual(1, len(result.explanation))
def test_compliance_with_detector(self):
df1 = pd.DataFrame.from_dict({'text': ['This is a very important text.',
'It contains information.', 'Brilliant ideas are written down.',
'Read it.', 'You will become a lot smarter.',
'Or you will waste your time.', 'Come on, figure it out!',
'Perhaps it will at least entertain you.', 'Do not be afraid.',
'Be brave!']})
df2 = pd.DataFrame.from_dict({'text': ['This is a very important text.',
'It contains information.', 'Brilliant ideas are written down.',
'Read it.', 'You will become a lot smarter.',
'Or you will waste your time.', 'Come on, figure it out!',
'Perhaps it will at least entertain you.', 'Do not be afraid.',
'Be brave!']})
detector = Detector(df1=df1, df2=df2, log_print=False)
detector.run(TextMetadataStatisticalCheck())
column_index = pd.MultiIndex.from_product([['text'], ['distinct_words', 'num_chars', 'num_words']],
names=['column', 'metadata'])
solution = pd.DataFrame([[1.0, 1.0, 1.0]], columns=column_index, index=['pvalue'])
self.assertEqual(1, len(detector.check_reports[0].examined_columns))
self.assertEqual(0, len(detector.check_reports[0].shifted_columns))
self.assertEqual(0, len(detector.check_reports[0].explanation))
assert_frame_equal(solution, detector.check_reports[0].information['test_results'])
def test_language_can_be_set(self):
check = TextMetadataStatisticalCheck([UnknownWordRatioMetadata(), StopwordRatioMetadata()], language='fr')
md_with_lang = [mdtype for mdtype in check.metadata_precalculation.text_metadata_types
if type(mdtype) in [UnknownWordRatioMetadata, StopwordRatioMetadata]]
for mdtype in md_with_lang:
self.assertEqual('fr', mdtype.language)
def test_infer_language_is_set(self):
check = TextMetadataStatisticalCheck([UnknownWordRatioMetadata(), StopwordRatioMetadata()], infer_language=True)
md_with_lang = [mdtype for mdtype in check.metadata_precalculation.text_metadata_types
if type(mdtype) in [UnknownWordRatioMetadata, StopwordRatioMetadata]]
for mdtype in md_with_lang:
self.assertTrue(mdtype.infer_language)
def test_figure_function_is_collected(self):
df1 = pd.DataFrame.from_dict({'text': ['blub'] * 10})
df2 = pd.DataFrame.from_dict({'text': ['blub'] * 10})
metadata_names = ['num_chars', 'num_words']
cols = pd.MultiIndex.from_product([df1.columns, metadata_names], names=['column', 'metadata'])
check = TextMetadataStatisticalCheck()
pvalues = pd.DataFrame(columns=cols, index=['pvalue'])
for solution, num_sig_metadata in [(1, 2), (1, 1), (0, 0)]:
p = [0.001] * num_sig_metadata + [0.05] * (2 - num_sig_metadata)
pvalues[('text', 'num_chars')] = p[0]
pvalues[('text', 'num_words')] = p[1]
with self.subTest(solution=solution, pvalues=pvalues):
result = check.metadata_figure(pvalues=pvalues, df1=df1, df2=df2)
self.assertEqual(solution, len(result))
@mock.patch('shift_detector.checks.statistical_checks.text_metadata_statistical_check.plt')
def test_all_plot_functions_are_called_and_plot_is_shown(self, mock_plt):
plot_data = [PlotData(MagicMock(), 1), PlotData(MagicMock(), 2), PlotData(MagicMock(), 3)]
TextMetadataStatisticalCheck.plot_all_metadata(plot_data)
mock_plt.figure.assert_called_with(figsize=(12.0, 30.0), tight_layout=True)
for func, rows in plot_data:
self.assertTrue(func.called)
mock_plt.show.assert_called_with()
def test_column_tuples_are_handled_by_numerical_visualization(self):
columns = ['text']
metadata_names = ['num_chars']
cols = pd.MultiIndex.from_product([columns, metadata_names], names=['column', 'metadata'])
df1 =
|
pd.DataFrame(columns=cols)
|
pandas.DataFrame
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1":
|
pandas.StringDtype()
|
pandas.StringDtype
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
import matplotlib.font_manager as fm
import math as m
import matplotlib.dates as mdates
id
import itertools
import datetime
import matplotlib.colors as colors
import matplotlib.cm as cm
import os
import statistics
import pysolar
#-----------------------------------------------------------------------------
# Motivación codigo -----------------------------------------------------------
'Codigo para la relación y análisis de los indices de cielo despejado (Kt*) y el indice de claridad (Kt).'
'Se incluye tambien un análisis de su tasa de cambio para evaluar su variabilidad y junto con la fracción'
'de cobertura de nubes. Se hace sobre los datos historicos porque se quiere analizar la variabilidad.'
Theoric_Model = 'GIS' ##---> 'GIS' para que coja el de Gis o 'Piranometro' para que tome el de el piranometro
##############################################################################
#-----------------------------------------------------------------------------
# Rutas para las fuentes -----------------------------------------------------
##############################################################################
prop = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Heavy.otf' )
prop_1 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Book.otf')
prop_2 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Black.otf')
##########################################################################################################
##-----------------------------------LECTURA DE LOS DATOS DE PIRANOMETRO-------------------------------##
##########################################################################################################
df_pira_TS =
|
pd.read_table('/home/nacorreasa/Maestria/Datos_Tesis/Piranometro/60012018_2019.txt', parse_dates=[2])
|
pandas.read_table
|
import os
import bisect
import shutil
import argparse
import pandas as pd
from deepface import DeepFace
"""
按年龄区间划分
年龄维度:[0-8],[8-16],[16-24],[24-32],[32-40],[40-48],[48-56],[56-64],[64-]
按角度划分
角度区间:[00, 22, 40, 55, 75]
pip install deepface
error: Support for codec 'lz4' not build
mamba uninstall pyarrow
mamba install -c anaconda lz4
pip install pyarrow
mamba install pandas
"""
age_range = [8, 16, 24, 32, 40, 48, 56, 64]
pose_range = ["00", "22", "40", "55", "75"]
record_file = "../data/unique_face_record.ftr"
record_class_file = "../data/face_class_record.ftr"
df_record = None
try:
df_record =
|
pd.read_feather(record_file)
|
pandas.read_feather
|
import gc
import warnings
import numpy as np
import pandas as pd
warnings.simplefilter(action='ignore', category=FutureWarning)
# One-hot encoding for categorical columns with get_dummies
def one_hot_encoder(df, nan_as_category=True):
original_columns = list(df.columns)
categorical_columns = [col for col in df.columns if df[col].dtype == 'object']
df =
|
pd.get_dummies(df, columns=categorical_columns, dummy_na=nan_as_category)
|
pandas.get_dummies
|
import pandas as pd
import os
import subprocess
from pathlib import Path
import numpy as np
import datetime
import json
def summary_of_files():
job_list = ["oct20_aa_fix1", "oct20_aa_fix2", "oct20_aa_fix3", "oct20_group_b","oct20_sub_0","oct20_sub_1", "oct20_sub_2", "oct20_sub_3", "oct20_sub_4","oct20_sub_5","oct20_sub_6","oct20_sub_7","oct20_sub_8",]
summary_df = pd.DataFrame()
c = 0
for job in job_list:
df = pd.read_csv(os.path.join("node_encode", job + ".csv"))
for index, row in df.iterrows():
# check hdf5 folder for sta.csv and sta.hdf5
flags = {
"sta": row.sta,
"job_name":row.job_name,
"sac_csv": os.path.exists(os.path.join(row.hdf5_folder, row.sta + ".csv")),
"sac_hdf5": os.path.exists(os.path.join(row.hdf5_folder, row.sta + ".hdf5")),
"prediction_made": os.path.exists(row.prediction_output_folder),
"merge_filtered": os.path.exists(os.path.join(row.merge_output_folder, "merge_filtered.csv")),
"merge_filtered_snr": os.path.exists(os.path.join(row.merge_output_folder, "merge_filtered_snr.csv")),
"merge_filtered_snr_customfilter": os.path.exists(os.path.join(row.merge_output_folder, "merge_filtered_snr_customfilter.csv")),
"n_lines": 0,
"sac_pick_files":0,
}
if flags["merge_filtered_snr_customfilter"]:
flags["n_lines"] = int(subprocess.check_output(["wc", "-l", os.path.join(row.merge_output_folder, "merge_filtered_snr_customfilter.csv")]).decode("utf8").split()[0]) - 1
flags["sac_pick_files"] = len(os.listdir(os.path.join(row.merge_output_folder, "sac_picks")))
checks = {
"non_zero_files": flags["sac_csv"] and flags["sac_hdf5"],
"all_plotted": flags["n_lines"] * 4 == flags["sac_pick_files"],
}
for k,v in flags.items():
summary_df.at[c, k] = v
for k,v in checks.items():
summary_df.at[c, k] = v
c += 1
summary_df.to_csv("oct20_summary.csv", index = False)
def infer_actual_uptime():
# read through all the generated .csv files and like parse them
with open("station/all_stations.txt", "r") as f:
station_list = [line.strip() for line in f if line.strip != ""]
station_dict = {station: {} for station in station_list}
for index, row in df.iterrows():
# open the .csv file with aLL the trace names
csv_path = os.path.join(row.hdf5_folder, row.sta + ".csv")
if os.path.exists(csv_path):
_df = pd.read_csv(csv_path)
_df["hours"] = _df["start_time"].str.slice(stop = 13)
unique_hours = _df["hours"].unique()
for dayhr in unique_hours:
day = datetime.datetime.strftime(datetime.datetime.strptime(dayhr[:10], "%Y-%m-%d"), "%j")
hr = dayhr[-2:]
if day not in station_dict[row.sta]:
station_dict[row.sta][day] = [hr]
else:
station_dict[row.sta][day].append(hr)
with open('08jul_aceh.json', 'w') as f:
json.dump(station_dict,f)
#print(station_dict)
# then summarise findings
df_list = []
summary_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
from __future__ import division
from datetime import datetime
import sys
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
import pandas as pd
import numpy as np
from nose.tools import assert_almost_equal as aae
import bt
import bt.algos as algos
def test_algo_name():
class TestAlgo(algos.Algo):
pass
actual = TestAlgo()
assert actual.name == 'TestAlgo'
class DummyAlgo(algos.Algo):
def __init__(self, return_value=True):
self.return_value = return_value
self.called = False
def __call__(self, target):
self.called = True
return self.return_value
def test_algo_stack():
algo1 = DummyAlgo(return_value=True)
algo2 = DummyAlgo(return_value=False)
algo3 = DummyAlgo(return_value=True)
target = mock.MagicMock()
stack = bt.AlgoStack(algo1, algo2, algo3)
actual = stack(target)
assert not actual
assert algo1.called
assert algo2.called
assert not algo3.called
def test_run_once():
algo = algos.RunOnce()
assert algo(None)
assert not algo(None)
assert not algo(None)
def test_run_period():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunPeriod()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
target.now = None
assert not algo(target)
# run on first date
target.now = dts[0]
assert not algo(target)
# run on first supplied date
target.now = dts[1]
assert algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert not algo(target)
algo = algos.RunPeriod(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
# run on first date
target.now = dts[0]
assert not algo(target)
# first supplied date
target.now = dts[1]
assert not algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert algo(target)
# date not in index
target.now = datetime(2009, 2, 15)
assert not algo(target)
def test_run_daily():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunDaily()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('',[algo]),
data
)
target.data = backtest.data
target.now = dts[1]
assert algo(target)
def test_run_weekly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunWeekly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert not algo(target)
# new week
target.now = dts[3]
assert algo(target)
algo = algos.RunWeekly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert algo(target)
# new week
target.now = dts[3]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8),datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_monthly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunMonthly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert not algo(target)
# new month
target.now = dts[31]
assert algo(target)
algo = algos.RunMonthly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert algo(target)
# new month
target.now = dts[31]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_quarterly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunQuarterly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert not algo(target)
# new quarter
target.now = dts[90]
assert algo(target)
algo = algos.RunQuarterly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert algo(target)
# new quarter
target.now = dts[90]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_yearly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunYearly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert not algo(target)
# new year
target.now = dts[365]
assert algo(target)
algo = algos.RunYearly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert algo(target)
# new year
target.now = dts[365]
assert not algo(target)
def test_run_on_date():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunOnDate('2010-01-01', '2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-03')
assert not algo(target)
def test_rebalance():
algo = algos.Rebalance()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 1000
assert s.capital == 0
c1 = s['c1']
assert c1.value == 1000
assert c1.position == 10
assert c1.weight == 1.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 1000
assert s.capital == 0
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 1000
assert c2.position == 10
assert c2.weight == 1.
def test_rebalance_with_commissions():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 999
assert s.capital == 99
c1 = s['c1']
assert c1.value == 900
assert c1.position == 9
assert c1.weight == 900 / 999.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 997
assert s.capital == 97
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 900
assert c2.position == 9
assert c2.weight == 900. / 997
def test_rebalance_with_cash():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=3)
data =
|
pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
|
pandas.DataFrame
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: pd.Timestamp("2012-12-18 00:00:00"),
231: pd.Timestamp("2012-12-19 00:00:00"),
232: pd.Timestamp("2012-12-20 00:00:00"),
233: pd.Timestamp("2012-12-21 00:00:00"),
234: pd.Timestamp("2012-12-22 00:00:00"),
235: pd.Timestamp("2012-12-23 00:00:00"),
236: pd.Timestamp("2012-12-24 00:00:00"),
237: pd.Timestamp("2012-12-25 00:00:00"),
238: pd.Timestamp("2012-12-26 00:00:00"),
239: pd.Timestamp("2012-12-27 00:00:00"),
240: pd.Timestamp("2012-12-28 00:00:00"),
241: pd.Timestamp("2012-12-29 00:00:00"),
242: pd.Timestamp("2012-12-30 00:00:00"),
243: pd.Timestamp("2012-12-31 00:00:00"),
244: pd.Timestamp("2013-01-01 00:00:00"),
245: pd.Timestamp("2013-01-02 00:00:00"),
246: pd.Timestamp("2013-01-03 00:00:00"),
247: pd.Timestamp("2013-01-04 00:00:00"),
248: pd.Timestamp("2013-01-05 00:00:00"),
249: pd.Timestamp("2013-01-06 00:00:00"),
250: pd.Timestamp("2013-01-07 00:00:00"),
251: pd.Timestamp("2013-01-08 00:00:00"),
252: pd.Timestamp("2013-01-09 00:00:00"),
253: pd.Timestamp("2013-01-10 00:00:00"),
254: pd.Timestamp("2013-01-11 00:00:00"),
255: pd.Timestamp("2013-01-12 00:00:00"),
256: pd.Timestamp("2013-01-13 00:00:00"),
257: pd.Timestamp("2013-01-14 00:00:00"),
258: pd.Timestamp("2013-01-15 00:00:00"),
259: pd.Timestamp("2013-01-16 00:00:00"),
260: pd.Timestamp("2013-01-17 00:00:00"),
261: pd.Timestamp("2013-01-18 00:00:00"),
262: pd.Timestamp("2013-01-19 00:00:00"),
263: pd.Timestamp("2013-01-20 00:00:00"),
264: pd.Timestamp("2013-01-21 00:00:00"),
265: pd.Timestamp("2013-01-22 00:00:00"),
266: pd.Timestamp("2013-01-23 00:00:00"),
267: pd.Timestamp("2013-01-24 00:00:00"),
268: pd.Timestamp("2013-01-25 00:00:00"),
269: pd.Timestamp("2013-01-26 00:00:00"),
270: pd.Timestamp("2013-01-27 00:00:00"),
271: pd.Timestamp("2013-01-28 00:00:00"),
272: pd.Timestamp("2013-01-29 00:00:00"),
273: pd.Timestamp("2013-01-30 00:00:00"),
274: pd.Timestamp("2013-01-31 00:00:00"),
275: pd.Timestamp("2013-02-01 00:00:00"),
276: pd.Timestamp("2013-02-02 00:00:00"),
277: pd.Timestamp("2013-02-03 00:00:00"),
278: pd.Timestamp("2013-02-04 00:00:00"),
279: pd.Timestamp("2013-02-05 00:00:00"),
280: pd.Timestamp("2013-02-06 00:00:00"),
281: pd.Timestamp("2013-02-07 00:00:00"),
282: pd.Timestamp("2013-02-08 00:00:00"),
283: pd.Timestamp("2013-02-09 00:00:00"),
284: pd.Timestamp("2013-02-10 00:00:00"),
285: pd.Timestamp("2013-02-11 00:00:00"),
286: pd.Timestamp("2013-02-12 00:00:00"),
287: pd.Timestamp("2013-02-13 00:00:00"),
288: pd.Timestamp("2013-02-14 00:00:00"),
289: pd.Timestamp("2013-02-15 00:00:00"),
290: pd.Timestamp("2013-02-16 00:00:00"),
291: pd.Timestamp("2013-02-17 00:00:00"),
292: pd.Timestamp("2013-02-18 00:00:00"),
293: pd.Timestamp("2013-02-19 00:00:00"),
294: pd.Timestamp("2013-02-20 00:00:00"),
295: pd.Timestamp("2013-02-21 00:00:00"),
296: pd.Timestamp("2013-02-22 00:00:00"),
297: pd.Timestamp("2013-02-23 00:00:00"),
298: pd.Timestamp("2013-02-24 00:00:00"),
299: pd.Timestamp("2013-02-25 00:00:00"),
300: pd.Timestamp("2013-02-26 00:00:00"),
301: pd.Timestamp("2013-02-27 00:00:00"),
302: pd.Timestamp("2013-02-28 00:00:00"),
303: pd.Timestamp("2013-03-01 00:00:00"),
304: pd.Timestamp("2013-03-02 00:00:00"),
305: pd.Timestamp("2013-03-03 00:00:00"),
306: pd.Timestamp("2013-03-04 00:00:00"),
307: pd.Timestamp("2013-03-05 00:00:00"),
308: pd.Timestamp("2013-03-06 00:00:00"),
309: pd.Timestamp("2013-03-07 00:00:00"),
310: pd.Timestamp("2013-03-08 00:00:00"),
311: pd.Timestamp("2013-03-09 00:00:00"),
312: pd.Timestamp("2013-03-10 00:00:00"),
313: pd.Timestamp("2013-03-11 00:00:00"),
314: pd.Timestamp("2013-03-12 00:00:00"),
315: pd.Timestamp("2013-03-13 00:00:00"),
316: pd.Timestamp("2013-03-14 00:00:00"),
317: pd.Timestamp("2013-03-15 00:00:00"),
318: pd.Timestamp("2013-03-16 00:00:00"),
319: pd.Timestamp("2013-03-17 00:00:00"),
320: pd.Timestamp("2013-03-18 00:00:00"),
321: pd.Timestamp("2013-03-19 00:00:00"),
322: pd.Timestamp("2013-03-20 00:00:00"),
323: pd.Timestamp("2013-03-21 00:00:00"),
324: pd.Timestamp("2013-03-22 00:00:00"),
325: pd.Timestamp("2013-03-23 00:00:00"),
326: pd.Timestamp("2013-03-24 00:00:00"),
327: pd.Timestamp("2013-03-25 00:00:00"),
328: pd.Timestamp("2013-03-26 00:00:00"),
329: pd.Timestamp("2013-03-27 00:00:00"),
330: pd.Timestamp("2013-03-28 00:00:00"),
331: pd.Timestamp("2013-03-29 00:00:00"),
332: pd.Timestamp("2013-03-30 00:00:00"),
333: pd.Timestamp("2013-03-31 00:00:00"),
334: pd.Timestamp("2013-04-01 00:00:00"),
335: pd.Timestamp("2013-04-02 00:00:00"),
336: pd.Timestamp("2013-04-03 00:00:00"),
337: pd.Timestamp("2013-04-04 00:00:00"),
338: pd.Timestamp("2013-04-05 00:00:00"),
339: pd.Timestamp("2013-04-06 00:00:00"),
340: pd.Timestamp("2013-04-07 00:00:00"),
341: pd.Timestamp("2013-04-08 00:00:00"),
342: pd.Timestamp("2013-04-09 00:00:00"),
343: pd.Timestamp("2013-04-10 00:00:00"),
344: pd.Timestamp("2013-04-11 00:00:00"),
345: pd.Timestamp("2013-04-12 00:00:00"),
346: pd.Timestamp("2013-04-13 00:00:00"),
347: pd.Timestamp("2013-04-14 00:00:00"),
348: pd.Timestamp("2013-04-15 00:00:00"),
349: pd.Timestamp("2013-04-16 00:00:00"),
350: pd.Timestamp("2013-04-17 00:00:00"),
351: pd.Timestamp("2013-04-18 00:00:00"),
352: pd.Timestamp("2013-04-19 00:00:00"),
353: pd.Timestamp("2013-04-20 00:00:00"),
354: pd.Timestamp("2013-04-21 00:00:00"),
355: pd.Timestamp("2013-04-22 00:00:00"),
356: pd.Timestamp("2013-04-23 00:00:00"),
357: pd.Timestamp("2013-04-24 00:00:00"),
358: pd.Timestamp("2013-04-25 00:00:00"),
359: pd.Timestamp("2013-04-26 00:00:00"),
360: pd.Timestamp("2013-04-27 00:00:00"),
361: pd.Timestamp("2013-04-28 00:00:00"),
362: pd.Timestamp("2013-04-29 00:00:00"),
363: pd.Timestamp("2013-04-30 00:00:00"),
364: pd.Timestamp("2013-05-01 00:00:00"),
365: pd.Timestamp("2013-05-02 00:00:00"),
366: pd.Timestamp("2013-05-03 00:00:00"),
367: pd.Timestamp("2013-05-04 00:00:00"),
368: pd.Timestamp("2013-05-05 00:00:00"),
369: pd.Timestamp("2013-05-06 00:00:00"),
370: pd.Timestamp("2013-05-07 00:00:00"),
371: pd.Timestamp("2013-05-08 00:00:00"),
372: pd.Timestamp("2013-05-09 00:00:00"),
373: pd.Timestamp("2013-05-10 00:00:00"),
374: pd.Timestamp("2013-05-11 00:00:00"),
375: pd.Timestamp("2013-05-12 00:00:00"),
376: pd.Timestamp("2013-05-13 00:00:00"),
377: pd.Timestamp("2013-05-14 00:00:00"),
378: pd.Timestamp("2013-05-15 00:00:00"),
379: pd.Timestamp("2013-05-16 00:00:00"),
380: pd.Timestamp("2013-05-17 00:00:00"),
381: pd.Timestamp("2013-05-18 00:00:00"),
382: pd.Timestamp("2013-05-19 00:00:00"),
383: pd.Timestamp("2013-05-20 00:00:00"),
384: pd.Timestamp("2013-05-21 00:00:00"),
385: pd.Timestamp("2013-05-22 00:00:00"),
386: pd.Timestamp("2013-05-23 00:00:00"),
387: pd.Timestamp("2013-05-24 00:00:00"),
388: pd.Timestamp("2013-05-25 00:00:00"),
389: pd.Timestamp("2013-05-26 00:00:00"),
390: pd.Timestamp("2013-05-27 00:00:00"),
391: pd.Timestamp("2013-05-28 00:00:00"),
392: pd.Timestamp("2013-05-29 00:00:00"),
393: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.348604308646497,
1: 8.348964254851197,
2: 8.349324201055898,
3: 8.349684147260598,
4: 8.350044093465298,
5: 8.350404039669998,
6: 8.3507639858747,
7: 8.3511239320794,
8: 8.3514838782841,
9: 8.351843824488801,
10: 8.352203770693501,
11: 8.352563716898201,
12: 8.352923663102903,
13: 8.353283609307603,
14: 8.353643555512303,
15: 8.354003501717003,
16: 8.354363447921704,
17: 8.354723394126404,
18: 8.355083340331104,
19: 8.355443286535806,
20: 8.355803232740506,
21: 8.356163178945206,
22: 8.356523125149906,
23: 8.356883071354607,
24: 8.357243017559307,
25: 8.357602963764007,
26: 8.357962909968709,
27: 8.358322856173409,
28: 8.358682802378109,
29: 8.35904274858281,
30: 8.35940269478751,
31: 8.35976264099221,
32: 8.36012258719691,
33: 8.360482533401612,
34: 8.360842479606312,
35: 8.361202425811012,
36: 8.361562372015714,
37: 8.361922318220413,
38: 8.362282264425113,
39: 8.362642210629813,
40: 8.363002156834515,
41: 8.363362103039215,
42: 8.363722049243915,
43: 8.364081995448617,
44: 8.364441941653316,
45: 8.364801887858016,
46: 8.365161834062716,
47: 8.365521780267418,
48: 8.365881726472118,
49: 8.366241672676818,
50: 8.36660161888152,
51: 8.36696156508622,
52: 8.36732151129092,
53: 8.367681457495621,
54: 8.368041403700321,
55: 8.368401349905021,
56: 8.36876129610972,
57: 8.369121242314423,
58: 8.369481188519122,
59: 8.369841134723822,
60: 8.370201080928524,
61: 8.370561027133224,
62: 8.370920973337924,
63: 8.371280919542624,
64: 8.371640865747326,
65: 8.372000811952026,
66: 8.372360758156725,
67: 8.372720704361427,
68: 8.373080650566127,
69: 8.373440596770827,
70: 8.373800542975529,
71: 8.374160489180229,
72: 8.374520435384929,
73: 8.374880381589628,
74: 8.37524032779433,
75: 8.37560027399903,
76: 8.37596022020373,
77: 8.376320166408432,
78: 8.376680112613132,
79: 8.377040058817832,
80: 8.377400005022531,
81: 8.377759951227233,
82: 8.378119897431933,
83: 8.378479843636633,
84: 8.378839789841335,
85: 8.379199736046035,
86: 8.379559682250735,
87: 8.379919628455436,
88: 8.380279574660136,
89: 8.380639520864836,
90: 8.380999467069536,
91: 8.381359413274238,
92: 8.381719359478938,
93: 8.382079305683638,
94: 8.38243925188834,
95: 8.38279919809304,
96: 8.38315914429774,
97: 8.383519090502439,
98: 8.38387903670714,
99: 8.38423898291184,
100: 8.38459892911654,
101: 8.384958875321242,
102: 8.385318821525942,
103: 8.385678767730642,
104: 8.386038713935344,
105: 8.386398660140044,
106: 8.386758606344744,
107: 8.387118552549444,
108: 8.387478498754145,
109: 8.387838444958845,
110: 8.388198391163545,
111: 8.388558337368247,
112: 8.388918283572947,
113: 8.389278229777647,
114: 8.389638175982347,
115: 8.389998122187048,
116: 8.390358068391748,
117: 8.390718014596448,
118: 8.39107796080115,
119: 8.39143790700585,
120: 8.39179785321055,
121: 8.392157799415251,
122: 8.392517745619951,
123: 8.392877691824651,
124: 8.393237638029351,
125: 8.393597584234053,
126: 8.393957530438753,
127: 8.394317476643453,
128: 8.394677422848154,
129: 8.395037369052854,
130: 8.395397315257554,
131: 8.395757261462254,
132: 8.396117207666956,
133: 8.396477153871656,
134: 8.396837100076356,
135: 8.397197046281057,
136: 8.397556992485757,
137: 8.397916938690457,
138: 8.398276884895157,
139: 8.398636831099859,
140: 8.398996777304559,
141: 8.399356723509259,
142: 8.39971666971396,
143: 8.40007661591866,
144: 8.40043656212336,
145: 8.400796508328062,
146: 8.401156454532762,
147: 8.401516400737462,
148: 8.401876346942162,
149: 8.402236293146863,
150: 8.402596239351563,
151: 8.402956185556263,
152: 8.403316131760965,
153: 8.403676077965665,
154: 8.404036024170365,
155: 8.404395970375065,
156: 8.404755916579767,
157: 8.405115862784466,
158: 8.405475808989166,
159: 8.405835755193868,
160: 8.406195701398568,
161: 8.406555647603268,
162: 8.40691559380797,
163: 8.40727554001267,
164: 8.40763548621737,
165: 8.40799543242207,
166: 8.408355378626771,
167: 8.408715324831471,
168: 8.409075271036171,
169: 8.409435217240873,
170: 8.409795163445573,
171: 8.410155109650272,
172: 8.410515055854972,
173: 8.410875002059674,
174: 8.411234948264374,
175: 8.411594894469074,
176: 8.411954840673776,
177: 8.412314786878476,
178: 8.412674733083175,
179: 8.413034679287877,
180: 8.413394625492577,
181: 8.413754571697277,
182: 8.414114517901977,
183: 8.414474464106679,
184: 8.414834410311379,
185: 8.415194356516078,
186: 8.41555430272078,
187: 8.41591424892548,
188: 8.41627419513018,
189: 8.41663414133488,
190: 8.416994087539582,
191: 8.417354033744282,
192: 8.417713979948982,
193: 8.418073926153683,
194: 8.418433872358383,
195: 8.418793818563083,
196: 8.419153764767785,
197: 8.419513710972485,
198: 8.419873657177185,
199: 8.420233603381885,
200: 8.420593549586586,
201: 8.420953495791286,
202: 8.421313441995986,
203: 8.421673388200688,
204: 8.422033334405388,
205: 8.422393280610088,
206: 8.422753226814788,
207: 8.42311317301949,
208: 8.42347311922419,
209: 8.423833065428889,
210: 8.42419301163359,
211: 8.42455295783829,
212: 8.42491290404299,
213: 8.42527285024769,
214: 8.425632796452392,
215: 8.425992742657092,
216: 8.426352688861792,
217: 8.426712635066494,
218: 8.427072581271194,
219: 8.427432527475894,
220: 8.427792473680595,
221: 8.428152419885295,
222: 8.428512366089995,
223: 8.428872312294695,
224: 8.429232258499397,
225: 8.429592204704097,
226: 8.429952150908797,
227: 8.430312097113498,
228: 8.430672043318198,
229: 8.431031989522898,
230: 8.431391935727598,
231: 8.4317518819323,
232: 8.432111828137,
233: 8.4324717743417,
234: 8.432831720546401,
235: 8.433191666751101,
236: 8.433551612955801,
237: 8.433911559160503,
238: 8.434271505365203,
239: 8.434631451569903,
240: 8.434991397774603,
241: 8.435351343979304,
242: 8.435711290184004,
243: 8.436071236388704,
244: 8.436431182593406,
245: 8.436791128798106,
246: 8.437151075002806,
247: 8.437511021207506,
248: 8.437870967412207,
249: 8.438230913616907,
250: 8.438590859821607,
251: 8.438950806026309,
252: 8.439310752231009,
253: 8.439670698435709,
254: 8.44003064464041,
255: 8.44039059084511,
256: 8.44075053704981,
257: 8.44111048325451,
258: 8.441470429459212,
259: 8.441830375663912,
260: 8.442190321868612,
261: 8.442550268073314,
262: 8.442910214278013,
263: 8.443270160482713,
264: 8.443630106687413,
265: 8.443990052892115,
266: 8.444349999096815,
267: 8.444709945301515,
268: 8.445069891506217,
269: 8.445429837710916,
270: 8.445789783915616,
271: 8.446149730120318,
272: 8.446509676325018,
273: 8.446869622529718,
274: 8.447229568734418,
275: 8.44758951493912,
276: 8.44794946114382,
277: 8.44830940734852,
278: 8.448669353553221,
279: 8.449029299757921,
280: 8.449389245962621,
281: 8.449749192167321,
282: 8.450109138372023,
283: 8.450469084576723,
284: 8.450829030781422,
285: 8.451188976986124,
286: 8.451548923190824,
287: 8.451908869395524,
288: 8.452268815600226,
289: 8.452628761804926,
290: 8.452988708009626,
291: 8.453348654214325,
292: 8.453708600419027,
293: 8.454068546623727,
294: 8.454428492828427,
295: 8.454788439033129,
296: 8.455148385237829,
297: 8.455508331442529,
298: 8.455868277647228,
299: 8.45622822385193,
300: 8.45658817005663,
301: 8.45694811626133,
302: 8.457308062466032,
303: 8.457668008670732,
304: 8.458027954875432,
305: 8.458387901080131,
306: 8.458747847284833,
307: 8.459107793489533,
308: 8.459467739694233,
309: 8.459827685898935,
310: 8.460187632103635,
311: 8.460547578308335,
312: 8.460907524513036,
313: 8.461267470717736,
314: 8.461627416922436,
315: 8.461987363127136,
316: 8.462347309331838,
317: 8.462707255536538,
318: 8.463067201741238,
319: 8.46342714794594,
320: 8.46378709415064,
321: 8.46414704035534,
322: 8.464506986560039,
323: 8.46486693276474,
324: 8.46522687896944,
325: 8.46558682517414,
326: 8.465946771378842,
327: 8.466306717583542,
328: 8.466666663788242,
329: 8.467026609992944,
330: 8.467386556197644,
331: 8.467746502402344,
332: 8.468106448607044,
333: 8.468466394811745,
334: 8.468826341016445,
335: 8.469186287221145,
336: 8.469546233425847,
337: 8.469906179630547,
338: 8.470266125835247,
339: 8.470626072039947,
340: 8.470986018244648,
341: 8.471345964449348,
342: 8.471705910654048,
343: 8.47206585685875,
344: 8.47242580306345,
345: 8.47278574926815,
346: 8.473145695472851,
347: 8.473505641677551,
348: 8.473865587882251,
349: 8.474225534086951,
350: 8.474585480291653,
351: 8.474945426496353,
352: 8.475305372701053,
353: 8.475665318905754,
354: 8.476025265110454,
355: 8.476385211315154,
356: 8.476745157519854,
357: 8.477105103724556,
358: 8.477465049929256,
359: 8.477824996133956,
360: 8.478184942338657,
361: 8.478544888543357,
362: 8.478904834748057,
363: 8.479264780952759,
364: 8.479624727157459,
365: 8.479984673362159,
366: 8.480344619566859,
367: 8.48070456577156,
368: 8.48106451197626,
369: 8.48142445818096,
370: 8.481784404385662,
371: 8.482144350590362,
372: 8.482504296795062,
373: 8.482864242999762,
374: 8.483224189204464,
375: 8.483584135409163,
376: 8.483944081613863,
377: 8.484304027818565,
378: 8.484663974023265,
379: 8.485023920227965,
380: 8.485383866432667,
381: 8.485743812637367,
382: 8.486103758842066,
383: 8.486463705046766,
384: 8.486823651251468,
385: 8.487183597456168,
386: 8.487543543660868,
387: 8.48790348986557,
388: 8.48826343607027,
389: 8.48862338227497,
390: 8.48898332847967,
391: 8.489343274684371,
392: 8.489703220889071,
393: 8.490063167093771,
},
"fcst_lower": {
0: -np.inf,
1: -np.inf,
2: -np.inf,
3: -np.inf,
4: -np.inf,
5: -np.inf,
6: -np.inf,
7: -np.inf,
8: -np.inf,
9: -np.inf,
10: -np.inf,
11: -np.inf,
12: -np.inf,
13: -np.inf,
14: -np.inf,
15: -np.inf,
16: -np.inf,
17: -np.inf,
18: -np.inf,
19: -np.inf,
20: -np.inf,
21: -np.inf,
22: -np.inf,
23: -np.inf,
24: -np.inf,
25: -np.inf,
26: -np.inf,
27: -np.inf,
28: -np.inf,
29: -np.inf,
30: -np.inf,
31: -np.inf,
32: -np.inf,
33: -np.inf,
34: -np.inf,
35: -np.inf,
36: -np.inf,
37: -np.inf,
38: -np.inf,
39: -np.inf,
40: -np.inf,
41: -np.inf,
42: -np.inf,
43: -np.inf,
44: -np.inf,
45: -np.inf,
46: -np.inf,
47: -np.inf,
48: -np.inf,
49: -np.inf,
50: -np.inf,
51: -np.inf,
52: -np.inf,
53: -np.inf,
54: -np.inf,
55: -np.inf,
56: -np.inf,
57: -np.inf,
58: -np.inf,
59: -np.inf,
60: -np.inf,
61: -np.inf,
62: -np.inf,
63: -np.inf,
64: -np.inf,
65: -np.inf,
66: -np.inf,
67: -np.inf,
68: -np.inf,
69: -np.inf,
70: -np.inf,
71: -np.inf,
72: -np.inf,
73: -np.inf,
74: -np.inf,
75: -np.inf,
76: -np.inf,
77: -np.inf,
78: -np.inf,
79: -np.inf,
80: -np.inf,
81: -np.inf,
82: -np.inf,
83: -np.inf,
84: -np.inf,
85: -np.inf,
86: -np.inf,
87: -np.inf,
88: -np.inf,
89: -np.inf,
90: -np.inf,
91: -np.inf,
92: -np.inf,
93: -np.inf,
94: -np.inf,
95: -np.inf,
96: -np.inf,
97: -np.inf,
98: -np.inf,
99: -np.inf,
100: -np.inf,
101: -np.inf,
102: -np.inf,
103: -np.inf,
104: -np.inf,
105: -np.inf,
106: -np.inf,
107: -np.inf,
108: -np.inf,
109: -np.inf,
110: -np.inf,
111: -np.inf,
112: -np.inf,
113: -np.inf,
114: -np.inf,
115: -np.inf,
116: -np.inf,
117: -np.inf,
118: -np.inf,
119: -np.inf,
120: -np.inf,
121: -np.inf,
122: -np.inf,
123: -np.inf,
124: -np.inf,
125: -np.inf,
126: -np.inf,
127: -np.inf,
128: -np.inf,
129: -np.inf,
130: -np.inf,
131: -np.inf,
132: -np.inf,
133: -np.inf,
134: -np.inf,
135: -np.inf,
136: -np.inf,
137: -np.inf,
138: -np.inf,
139: -np.inf,
140: -np.inf,
141: -np.inf,
142: -np.inf,
143: -np.inf,
144: -np.inf,
145: -np.inf,
146: -np.inf,
147: -np.inf,
148: -np.inf,
149: -np.inf,
150: -np.inf,
151: -np.inf,
152: -np.inf,
153: -np.inf,
154: -np.inf,
155: -np.inf,
156: -np.inf,
157: -np.inf,
158: -np.inf,
159: -np.inf,
160: -np.inf,
161: -np.inf,
162: -np.inf,
163: -np.inf,
164: -np.inf,
165: -np.inf,
166: -np.inf,
167: -np.inf,
168: -np.inf,
169: -np.inf,
170: -np.inf,
171: -np.inf,
172: -np.inf,
173: -np.inf,
174: -np.inf,
175: -np.inf,
176: -np.inf,
177: -np.inf,
178: -np.inf,
179: -np.inf,
180: -np.inf,
181: -np.inf,
182: -np.inf,
183: -np.inf,
184: -np.inf,
185: -np.inf,
186: -np.inf,
187: -np.inf,
188: -np.inf,
189: -np.inf,
190: -np.inf,
191: -np.inf,
192: -np.inf,
193: -np.inf,
194: -np.inf,
195: -np.inf,
196: -np.inf,
197: -np.inf,
198: -np.inf,
199: -np.inf,
200: -np.inf,
201: -np.inf,
202: -np.inf,
203: -np.inf,
204: -np.inf,
205: -np.inf,
206: -np.inf,
207: -np.inf,
208: -np.inf,
209: -np.inf,
210: -np.inf,
211: -np.inf,
212: -np.inf,
213: -np.inf,
214: -np.inf,
215: -np.inf,
216: -np.inf,
217: -np.inf,
218: -np.inf,
219: -np.inf,
220: -np.inf,
221: -np.inf,
222: -np.inf,
223: -np.inf,
224: -np.inf,
225: -np.inf,
226: -np.inf,
227: -np.inf,
228: -np.inf,
229: -np.inf,
230: -np.inf,
231: -np.inf,
232: -np.inf,
233: -np.inf,
234: -np.inf,
235: -np.inf,
236: -np.inf,
237: -np.inf,
238: -np.inf,
239: -np.inf,
240: -np.inf,
241: -np.inf,
242: -np.inf,
243: -np.inf,
244: -np.inf,
245: -np.inf,
246: -np.inf,
247: -np.inf,
248: -np.inf,
249: -np.inf,
250: -np.inf,
251: -np.inf,
252: -np.inf,
253: -np.inf,
254: -np.inf,
255: -np.inf,
256: -np.inf,
257: -np.inf,
258: -np.inf,
259: -np.inf,
260: -np.inf,
261: -np.inf,
262: -np.inf,
263: -np.inf,
264: -np.inf,
265: -np.inf,
266: -np.inf,
267: -np.inf,
268: -np.inf,
269: -np.inf,
270: -np.inf,
271: -np.inf,
272: -np.inf,
273: -np.inf,
274: -np.inf,
275: -np.inf,
276: -np.inf,
277: -np.inf,
278: -np.inf,
279: -np.inf,
280: -np.inf,
281: -np.inf,
282: -np.inf,
283: -np.inf,
284: -np.inf,
285: -np.inf,
286: -np.inf,
287: -np.inf,
288: -np.inf,
289: -np.inf,
290: -np.inf,
291: -np.inf,
292: -np.inf,
293: -np.inf,
294: -np.inf,
295: -np.inf,
296: -np.inf,
297: -np.inf,
298: -np.inf,
299: -np.inf,
300: -np.inf,
301: -np.inf,
302: -np.inf,
303: -np.inf,
304: -np.inf,
305: -np.inf,
306: -np.inf,
307: -np.inf,
308: -np.inf,
309: -np.inf,
310: -np.inf,
311: -np.inf,
312: -np.inf,
313: -np.inf,
314: -np.inf,
315: -np.inf,
316: -np.inf,
317: -np.inf,
318: -np.inf,
319: -np.inf,
320: -np.inf,
321: -np.inf,
322: -np.inf,
323: -np.inf,
324: -np.inf,
325: -np.inf,
326: -np.inf,
327: -np.inf,
328: -np.inf,
329: -np.inf,
330: -np.inf,
331: -np.inf,
332: -np.inf,
333: -np.inf,
334: -np.inf,
335: -np.inf,
336: -np.inf,
337: -np.inf,
338: -np.inf,
339: -np.inf,
340: -np.inf,
341: -np.inf,
342: -np.inf,
343: -np.inf,
344: -np.inf,
345: -np.inf,
346: -np.inf,
347: -np.inf,
348: -np.inf,
349: -np.inf,
350: -np.inf,
351: -np.inf,
352: -np.inf,
353: -np.inf,
354: -np.inf,
355: -np.inf,
356: -np.inf,
357: -np.inf,
358: -np.inf,
359: -np.inf,
360: -np.inf,
361: -np.inf,
362: -np.inf,
363: -np.inf,
364: -np.inf,
365: -np.inf,
366: -np.inf,
367: -np.inf,
368: -np.inf,
369: -np.inf,
370: -np.inf,
371: -np.inf,
372: -np.inf,
373: -np.inf,
374: -np.inf,
375: -np.inf,
376: -np.inf,
377: -np.inf,
378: -np.inf,
379: -np.inf,
380: -np.inf,
381: -np.inf,
382: -np.inf,
383: -np.inf,
384: -np.inf,
385: -np.inf,
386: -np.inf,
387: -np.inf,
388: -np.inf,
389: -np.inf,
390: -np.inf,
391: -np.inf,
392: -np.inf,
393: -np.inf,
},
"fcst_upper": {
0: np.inf,
1: np.inf,
2: np.inf,
3: np.inf,
4: np.inf,
5: np.inf,
6: np.inf,
7: np.inf,
8: np.inf,
9: np.inf,
10: np.inf,
11: np.inf,
12: np.inf,
13: np.inf,
14: np.inf,
15: np.inf,
16: np.inf,
17: np.inf,
18: np.inf,
19: np.inf,
20: np.inf,
21: np.inf,
22: np.inf,
23: np.inf,
24: np.inf,
25: np.inf,
26: np.inf,
27: np.inf,
28: np.inf,
29: np.inf,
30: np.inf,
31: np.inf,
32: np.inf,
33: np.inf,
34: np.inf,
35: np.inf,
36: np.inf,
37: np.inf,
38: np.inf,
39: np.inf,
40: np.inf,
41: np.inf,
42: np.inf,
43: np.inf,
44: np.inf,
45: np.inf,
46: np.inf,
47: np.inf,
48: np.inf,
49: np.inf,
50: np.inf,
51: np.inf,
52: np.inf,
53: np.inf,
54: np.inf,
55: np.inf,
56: np.inf,
57: np.inf,
58: np.inf,
59: np.inf,
60: np.inf,
61: np.inf,
62: np.inf,
63: np.inf,
64: np.inf,
65: np.inf,
66: np.inf,
67: np.inf,
68: np.inf,
69: np.inf,
70: np.inf,
71: np.inf,
72: np.inf,
73: np.inf,
74: np.inf,
75: np.inf,
76: np.inf,
77: np.inf,
78: np.inf,
79: np.inf,
80: np.inf,
81: np.inf,
82: np.inf,
83: np.inf,
84: np.inf,
85: np.inf,
86: np.inf,
87: np.inf,
88: np.inf,
89: np.inf,
90: np.inf,
91: np.inf,
92: np.inf,
93: np.inf,
94: np.inf,
95: np.inf,
96: np.inf,
97: np.inf,
98: np.inf,
99: np.inf,
100: np.inf,
101: np.inf,
102: np.inf,
103: np.inf,
104: np.inf,
105: np.inf,
106: np.inf,
107: np.inf,
108: np.inf,
109: np.inf,
110: np.inf,
111: np.inf,
112: np.inf,
113: np.inf,
114: np.inf,
115: np.inf,
116: np.inf,
117: np.inf,
118: np.inf,
119: np.inf,
120: np.inf,
121: np.inf,
122: np.inf,
123: np.inf,
124: np.inf,
125: np.inf,
126: np.inf,
127: np.inf,
128: np.inf,
129: np.inf,
130: np.inf,
131: np.inf,
132: np.inf,
133: np.inf,
134: np.inf,
135: np.inf,
136: np.inf,
137: np.inf,
138: np.inf,
139: np.inf,
140: np.inf,
141: np.inf,
142: np.inf,
143: np.inf,
144: np.inf,
145: np.inf,
146: np.inf,
147: np.inf,
148: np.inf,
149: np.inf,
150: np.inf,
151: np.inf,
152: np.inf,
153: np.inf,
154: np.inf,
155: np.inf,
156: np.inf,
157: np.inf,
158: np.inf,
159: np.inf,
160: np.inf,
161: np.inf,
162: np.inf,
163: np.inf,
164: np.inf,
165: np.inf,
166: np.inf,
167: np.inf,
168: np.inf,
169: np.inf,
170: np.inf,
171: np.inf,
172: np.inf,
173: np.inf,
174: np.inf,
175: np.inf,
176: np.inf,
177: np.inf,
178: np.inf,
179: np.inf,
180: np.inf,
181: np.inf,
182: np.inf,
183: np.inf,
184: np.inf,
185: np.inf,
186: np.inf,
187: np.inf,
188: np.inf,
189: np.inf,
190: np.inf,
191: np.inf,
192: np.inf,
193: np.inf,
194: np.inf,
195: np.inf,
196: np.inf,
197: np.inf,
198: np.inf,
199: np.inf,
200: np.inf,
201: np.inf,
202: np.inf,
203: np.inf,
204: np.inf,
205: np.inf,
206: np.inf,
207: np.inf,
208: np.inf,
209: np.inf,
210: np.inf,
211: np.inf,
212: np.inf,
213: np.inf,
214: np.inf,
215: np.inf,
216: np.inf,
217: np.inf,
218: np.inf,
219: np.inf,
220: np.inf,
221: np.inf,
222: np.inf,
223: np.inf,
224: np.inf,
225: np.inf,
226: np.inf,
227: np.inf,
228: np.inf,
229: np.inf,
230: np.inf,
231: np.inf,
232: np.inf,
233: np.inf,
234: np.inf,
235: np.inf,
236: np.inf,
237: np.inf,
238: np.inf,
239: np.inf,
240: np.inf,
241: np.inf,
242: np.inf,
243: np.inf,
244: np.inf,
245: np.inf,
246: np.inf,
247: np.inf,
248: np.inf,
249: np.inf,
250: np.inf,
251: np.inf,
252: np.inf,
253: np.inf,
254: np.inf,
255: np.inf,
256: np.inf,
257: np.inf,
258: np.inf,
259: np.inf,
260: np.inf,
261: np.inf,
262: np.inf,
263: np.inf,
264: np.inf,
265: np.inf,
266: np.inf,
267: np.inf,
268: np.inf,
269: np.inf,
270: np.inf,
271: np.inf,
272: np.inf,
273: np.inf,
274: np.inf,
275: np.inf,
276: np.inf,
277: np.inf,
278: np.inf,
279: np.inf,
280: np.inf,
281: np.inf,
282: np.inf,
283: np.inf,
284: np.inf,
285: np.inf,
286: np.inf,
287: np.inf,
288: np.inf,
289: np.inf,
290: np.inf,
291: np.inf,
292: np.inf,
293: np.inf,
294: np.inf,
295: np.inf,
296: np.inf,
297: np.inf,
298: np.inf,
299: np.inf,
300: np.inf,
301: np.inf,
302: np.inf,
303: np.inf,
304: np.inf,
305: np.inf,
306: np.inf,
307: np.inf,
308: np.inf,
309: np.inf,
310: np.inf,
311: np.inf,
312: np.inf,
313: np.inf,
314: np.inf,
315: np.inf,
316: np.inf,
317: np.inf,
318: np.inf,
319: np.inf,
320: np.inf,
321: np.inf,
322: np.inf,
323: np.inf,
324: np.inf,
325: np.inf,
326: np.inf,
327: np.inf,
328: np.inf,
329: np.inf,
330: np.inf,
331: np.inf,
332: np.inf,
333: np.inf,
334: np.inf,
335: np.inf,
336: np.inf,
337: np.inf,
338: np.inf,
339: np.inf,
340: np.inf,
341: np.inf,
342: np.inf,
343: np.inf,
344: np.inf,
345: np.inf,
346: np.inf,
347: np.inf,
348: np.inf,
349: np.inf,
350: np.inf,
351: np.inf,
352: np.inf,
353: np.inf,
354: np.inf,
355: np.inf,
356: np.inf,
357: np.inf,
358: np.inf,
359: np.inf,
360: np.inf,
361: np.inf,
362: np.inf,
363: np.inf,
364: np.inf,
365: np.inf,
366: np.inf,
367: np.inf,
368: np.inf,
369: np.inf,
370: np.inf,
371: np.inf,
372: np.inf,
373: np.inf,
374: np.inf,
375: np.inf,
376: np.inf,
377: np.inf,
378: np.inf,
379: np.inf,
380: np.inf,
381: np.inf,
382: np.inf,
383: np.inf,
384: np.inf,
385: np.inf,
386: np.inf,
387: np.inf,
388: np.inf,
389: np.inf,
390: np.inf,
391: np.inf,
392: np.inf,
393: np.inf,
},
}
)
PEYTON_FCST_LINEAR_INVALID_NEG_ONE = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188:
|
pd.Timestamp("2012-11-06 00:00:00")
|
pandas.Timestamp
|
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
from pandas import Timestamp
def create_dataframe(tuple_data):
"""Create pandas df from tuple data with a header."""
return pd.DataFrame.from_records(tuple_data[1:], columns=tuple_data[0])
### REUSABLE FIXTURES --------------------------------------------------------
@pytest.fixture()
def indices_3years():
"""Three indices over 3 years."""
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 100.0, 100.0, 100.0),
(Timestamp('2012-02-01 00:00:00'), 101.239553643, 96.60525323799999, 97.776838217),
(Timestamp('2012-03-01 00:00:00'), 102.03030533, 101.450821724, 96.59101862),
(Timestamp('2012-04-01 00:00:00'), 104.432402661, 98.000263617, 94.491213369),
(Timestamp('2012-05-01 00:00:00'), 105.122830333, 95.946873831, 93.731891785),
(Timestamp('2012-06-01 00:00:00'), 103.976692567, 97.45914568100001, 90.131064035),
(Timestamp('2012-07-01 00:00:00'), 106.56768678200001, 94.788761174, 94.53487522),
(Timestamp('2012-08-01 00:00:00'), 106.652151036, 98.478217946, 92.56165627700001),
(Timestamp('2012-09-01 00:00:00'), 108.97290730799999, 99.986521241, 89.647230903),
(Timestamp('2012-10-01 00:00:00'), 106.20124385700001, 99.237117891, 92.27819603799999),
(Timestamp('2012-11-01 00:00:00'), 104.11913898700001, 100.993436318, 95.758970985),
(Timestamp('2012-12-01 00:00:00'), 107.76600978, 99.60424011299999, 95.697091336),
(Timestamp('2013-01-01 00:00:00'), 98.74350698299999, 100.357120656, 100.24073830200001),
(Timestamp('2013-02-01 00:00:00'), 100.46305431100001, 99.98213513200001, 99.499007278),
(Timestamp('2013-03-01 00:00:00'), 101.943121499, 102.034291064, 96.043392231),
(Timestamp('2013-04-01 00:00:00'), 99.358987741, 106.513055039, 97.332012817),
(Timestamp('2013-05-01 00:00:00'), 97.128074038, 106.132168479, 96.799806436),
(Timestamp('2013-06-01 00:00:00'), 94.42944162, 106.615734964, 93.72086654600001),
(Timestamp('2013-07-01 00:00:00'), 94.872365481, 103.069773446, 94.490515359),
(Timestamp('2013-08-01 00:00:00'), 98.239415397, 105.458081805, 93.57271149299999),
(Timestamp('2013-09-01 00:00:00'), 100.36774827100001, 106.144579258, 90.314524375),
(Timestamp('2013-10-01 00:00:00'), 100.660205114, 101.844838294, 88.35136848399999),
(Timestamp('2013-11-01 00:00:00'), 101.33948384799999, 100.592230114, 93.02874928899999),
(Timestamp('2013-12-01 00:00:00'), 101.74876982299999, 102.709038791, 93.38277933200001),
(Timestamp('2014-01-01 00:00:00'), 101.73439491, 99.579700011, 104.755837919),
(Timestamp('2014-02-01 00:00:00'), 100.247760523, 100.76732961, 100.197855834),
(Timestamp('2014-03-01 00:00:00'), 102.82080245600001, 99.763171909, 100.252537549),
(Timestamp('2014-04-01 00:00:00'), 104.469889684, 96.207920184, 98.719797067),
(Timestamp('2014-05-01 00:00:00'), 105.268899775, 99.357641836, 99.99786671),
(Timestamp('2014-06-01 00:00:00'), 107.41649204299999, 100.844974811, 96.463821506),
(Timestamp('2014-07-01 00:00:00'), 110.146087435, 102.01075029799999, 94.332755083),
(Timestamp('2014-08-01 00:00:00'), 109.17068484100001, 101.562418115, 91.15410351700001),
(Timestamp('2014-09-01 00:00:00'), 109.872892919, 101.471759564, 90.502291475),
(Timestamp('2014-10-01 00:00:00'), 108.508436998, 98.801947543, 93.97423224399999),
(Timestamp('2014-11-01 00:00:00'), 109.91248118, 97.730489099, 90.50638234200001),
(Timestamp('2014-12-01 00:00:00'), 111.19756703600001, 99.734704555, 90.470418612),
],
).set_index(0, drop=True)
@pytest.fixture()
def weights_3years():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2013-01-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2014-01-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
],
).set_index(0, drop=True)
@pytest.fixture()
def weights_3years_start_feb(weights_3years):
return weights_3years.shift(1, freq='MS')
@pytest.fixture()
def weight_shares_3years():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 0.489537029, 0.21362007800000002, 0.29684289199999997),
(Timestamp('2013-01-01 00:00:00'), 0.535477885, 0.147572705, 0.31694941),
(Timestamp('2014-01-01 00:00:00'), 0.512055362, 0.1940439, 0.293900738),
],
).set_index(0, drop=True)
@pytest.fixture()
def weights_shares_start_feb(weight_shares_3years):
return weight_shares_3years.shift(1, freq='MS')
@pytest.fixture()
def indices_1year(indices_3years):
return indices_3years.loc['2012', :]
@pytest.fixture()
def weights_1year(weights_3years):
return weights_3years.loc['2012', :]
@pytest.fixture()
def indices_6months(indices_3years):
return indices_3years.loc['2012-Jan':'2012-Jun', :]
@pytest.fixture()
def weights_6months(weights_3years):
return weights_3years.loc['2012', :]
@pytest.fixture()
def indices_transposed(indices_3years):
return indices_3years.T
@pytest.fixture()
def weights_transposed(weights_3years):
return weights_3years.T
@pytest.fixture()
def indices_missing(indices_3years):
indices_missing = indices_3years.copy()
change_to_nans = [
('2012-06', 2),
('2012-12', 3),
('2013-10', 2),
('2014-07', 1),
]
for sl in change_to_nans:
indices_missing.loc[sl] = np.nan
return indices_missing
@pytest.fixture()
def indices_missing_transposed(indices_missing):
return indices_missing.T
### AGGREGATION FIXTURES -----------------------------------------------------
@pytest.fixture()
def aggregate_outcome_3years():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 100.0),
(Timestamp('2012-02-01 00:00:00'), 99.22169156),
(Timestamp('2012-03-01 00:00:00'), 100.29190240000001),
(Timestamp('2012-04-01 00:00:00'), 100.10739720000001),
(Timestamp('2012-05-01 00:00:00'), 99.78134264),
(Timestamp('2012-06-01 00:00:00'), 98.47443727),
(Timestamp('2012-07-01 00:00:00'), 100.4796172),
(Timestamp('2012-08-01 00:00:00'), 100.7233716),
(Timestamp('2012-09-01 00:00:00'), 101.31654509999998),
(Timestamp('2012-10-01 00:00:00'), 100.5806089),
(Timestamp('2012-11-01 00:00:00'), 100.9697697),
(Timestamp('2012-12-01 00:00:00'), 102.4399192),
(Timestamp('2013-01-01 00:00:00'), 99.45617890000001),
(Timestamp('2013-02-01 00:00:00'), 100.08652959999999),
(Timestamp('2013-03-01 00:00:00'), 100.0866599),
(Timestamp('2013-04-01 00:00:00'), 99.7722843),
(Timestamp('2013-05-01 00:00:00'), 98.35278839),
(Timestamp('2013-06-01 00:00:00'), 96.00322344),
(Timestamp('2013-07-01 00:00:00'), 95.96105198),
(Timestamp('2013-08-01 00:00:00'), 97.82558448),
(
|
Timestamp('2013-09-01 00:00:00')
|
pandas.Timestamp
|
import requests
import pandas as pd
import json
def load_data():
stations = [{'name': '<NAME>' , 'id': 97280}, {'name': '<NAME>' , 'id': 97100}]
df = pd.DataFrame()
for station in stations:
station_url = "https://opendata-download-metobs.smhi.se/api/version/latest/parameter/14/station/{0}/period/latest-months/data.json".format(station['id'])
r = requests.get(url = station_url)
data = r.json()
station_df = pd.DataFrame(data['value'])
station_df['date'] = pd.to_datetime(station_df['date'], unit='ms')
station_df = station_df.set_index('date')
station_df.index =
|
pd.to_datetime(station_df.index)
|
pandas.to_datetime
|
# coding: utf-8
# In[1]:
get_ipython().system('pip install gensim')
# In[2]:
import pandas as pd
import numpy as np
# In[3]:
from gensim.models import KeyedVectors
WORD2VEC_PATH = 'Word2Vec/glove.w2vformat.6B.100d.txt'
wordembedding = KeyedVectors.load_word2vec_format(WORD2VEC_PATH)
# In[4]:
#CELL FOR TESTING DATAFRAME STRUCTURE
list = ["this", "that", "man", "woman", "apple", "pear", "C"]
new_list = []
#Clean words that are not in vocabulary
for word in list:
if word in wordembedding.vocab:
new_list.append(word)
print(list)
print(new_list)
list = new_list
length = len(list)
similarities_array = np.zeros(shape=(length, length))
i = 0
for word in list:
similarities = []
for word2 in list:
similarities.append(wordembedding.similarity(word, word2))
similarities_array[i] = similarities
i += 1
similarities_df = pd.DataFrame(similarities_array, columns=list, index=list)
print(similarities_df["this"]["woman"])
print(similarities_df)
# In[5]:
#CELL FOR CHECKING VOCABULARY
emotions = ["happy", "sad", "angry", "relaxed"]
existing_set = set()
not_existing_set = set()
for emotion in emotions:
emotion_df = pd.read_csv("dataframes/" + emotion + "_unigram_data.csv") # read csv
list = emotion_df['word'].tolist() # get words
for word in list:
if word in wordembedding.vocab:
existing_set.add(word)
else:
not_existing_set.add(word)
print(len(existing_set))
print(existing_set)
print(len(not_existing_set))
print(not_existing_set)
# In[17]:
emotions = ["happy", "sad", "angry", "relaxed"]
for emotion in emotions:
emotion_df =
|
pd.read_csv("dataframes/" + emotion + "_unigram_data.csv")
|
pandas.read_csv
|
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import json
import logging
import os
import pickle
import numpy as np
import pandas as pd
from sklearn.externals import joblib
import azureml.automl.core
from azureml.automl.core.shared import logging_utilities, log_server
from azureml.telemetry import INSTRUMENTATION_KEY
from inference_schema.schema_decorators import input_schema, output_schema
from inference_schema.parameter_types.numpy_parameter_type import NumpyParameterType
from inference_schema.parameter_types.pandas_parameter_type import PandasParameterType
input_sample = pd.DataFrame({"age": pd.Series(["24"], dtype="int64"), "job": pd.Series(["technician"], dtype="object"), "marital": pd.Series(["single"], dtype="object"), "education": pd.Series(["university.degree"], dtype="object"), "default": pd.Series(["no"], dtype="object"), "housing": pd.Series(["no"], dtype="object"), "loan": pd.Series(["yes"], dtype="object"), "contact": pd.Series(["cellular"], dtype="object"), "month": pd.Series(["jul"], dtype="object"), "duration": pd.Series(["109"], dtype="int64"), "campaign": pd.Series(["3"], dtype="int64"), "pdays": pd.Series(["999"], dtype="int64"), "previous": pd.Series(["0"], dtype="int64"), "poutcome": pd.Series(["nonexistent"], dtype="object"), "emp.var.rate": pd.Series(["1.4"], dtype="float64"), "cons.price.idx": pd.Series(["93.918"], dtype="float64"), "cons.conf.idx":
|
pd.Series(["-42.7"], dtype="float64")
|
pandas.Series
|
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import mars.dataframe as md
import mars.tensor as mt
from mars.tests.core import TestBase, ExecutorForTest
class Test(TestBase):
def setUp(self):
super().setUp()
self.executor = ExecutorForTest()
def testSetIndex(self):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=2)
expected = df1.set_index('y', drop=True)
df3 = df2.set_index('y', drop=True)
pd.testing.assert_frame_equal(
expected, self.executor.execute_dataframe(df3, concat=True)[0])
expected = df1.set_index('y', drop=False)
df4 = df2.set_index('y', drop=False)
pd.testing.assert_frame_equal(
expected, self.executor.execute_dataframe(df4, concat=True)[0])
def testILocGetItem(self):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=2)
# plain index
expected = df1.iloc[1]
df3 = df2.iloc[1]
pd.testing.assert_series_equal(
expected, self.executor.execute_dataframe(df3, concat=True, check_series_name=False)[0])
# plain index on axis 1
expected = df1.iloc[:2, 1]
df4 = df2.iloc[:2, 1]
pd.testing.assert_series_equal(
expected, self.executor.execute_dataframe(df4, concat=True)[0])
# slice index
expected = df1.iloc[:, 2:4]
df5 = df2.iloc[:, 2:4]
pd.testing.assert_frame_equal(
expected, self.executor.execute_dataframe(df5, concat=True)[0])
# plain fancy index
expected = df1.iloc[[0], [0, 1, 2]]
df6 = df2.iloc[[0], [0, 1, 2]]
pd.testing.assert_frame_equal(
expected, self.executor.execute_dataframe(df6, concat=True)[0])
# plain fancy index with shuffled order
expected = df1.iloc[[0], [1, 2, 0]]
df7 = df2.iloc[[0], [1, 2, 0]]
pd.testing.assert_frame_equal(
expected, self.executor.execute_dataframe(df7, concat=True)[0])
# fancy index
expected = df1.iloc[[1, 2], [0, 1, 2]]
df8 = df2.iloc[[1, 2], [0, 1, 2]]
pd.testing.assert_frame_equal(
expected, self.executor.execute_dataframe(df8, concat=True)[0])
# fancy index with shuffled order
expected = df1.iloc[[2, 1], [1, 2, 0]]
df9 = df2.iloc[[2, 1], [1, 2, 0]]
pd.testing.assert_frame_equal(
expected, self.executor.execute_dataframe(df9, concat=True)[0])
# one fancy index
expected = df1.iloc[[2, 1]]
df10 = df2.iloc[[2, 1]]
pd.testing.assert_frame_equal(
expected, self.executor.execute_dataframe(df10, concat=True)[0])
# plain index
expected = df1.iloc[1, 2]
df11 = df2.iloc[1, 2]
self.assertEqual(
expected, self.executor.execute_dataframe(df11, concat=True)[0])
# bool index array
expected = df1.iloc[[True, False, True], [2, 1]]
df12 = df2.iloc[[True, False, True], [2, 1]]
pd.testing.assert_frame_equal(
expected, self.executor.execute_dataframe(df12, concat=True)[0])
# bool index array on axis 1
expected = df1.iloc[[2, 1], [True, False, True]]
df14 = df2.iloc[[2, 1], [True, False, True]]
pd.testing.assert_frame_equal(
expected, self.executor.execute_dataframe(df14, concat=True)[0])
# bool index
expected = df1.iloc[[True, False, True], [2, 1]]
df13 = df2.iloc[md.Series([True, False, True], chunk_size=1), [2, 1]]
pd.testing.assert_frame_equal(
expected, self.executor.execute_dataframe(df13, concat=True)[0])
# test Series
data = pd.Series(np.arange(10))
series = md.Series(data, chunk_size=3).iloc[:3]
pd.testing.assert_series_equal(
self.executor.execute_dataframe(series, concat=True)[0], data.iloc[:3])
series = md.Series(data, chunk_size=3).iloc[4]
self.assertEqual(
self.executor.execute_dataframe(series, concat=True)[0], data.iloc[4])
series = md.Series(data, chunk_size=3).iloc[[2, 3, 4, 9]]
pd.testing.assert_series_equal(
self.executor.execute_dataframe(series, concat=True)[0], data.iloc[[2, 3, 4, 9]])
series = md.Series(data, chunk_size=3).iloc[[4, 3, 9, 2]]
pd.testing.assert_series_equal(
self.executor.execute_dataframe(series, concat=True)[0], data.iloc[[4, 3, 9, 2]])
series = md.Series(data).iloc[5:]
pd.testing.assert_series_equal(
self.executor.execute_dataframe(series, concat=True)[0], data.iloc[5:])
# bool index array
selection = np.random.RandomState(0).randint(2, size=10, dtype=bool)
series = md.Series(data).iloc[selection]
pd.testing.assert_series_equal(
self.executor.execute_dataframe(series, concat=True)[0], data.iloc[selection])
# bool index
series = md.Series(data).iloc[md.Series(selection, chunk_size=4)]
pd.testing.assert_series_equal(
self.executor.execute_dataframe(series, concat=True)[0], data.iloc[selection])
def testILocSetItem(self):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=2)
# plain index
expected = df1
expected.iloc[1] = 100
df2.iloc[1] = 100
pd.testing.assert_frame_equal(
expected, self.executor.execute_dataframe(df2, concat=True)[0])
# slice index
expected.iloc[:, 2:4] = 1111
df2.iloc[:, 2:4] = 1111
pd.testing.assert_frame_equal(
expected, self.executor.execute_dataframe(df2, concat=True)[0])
# plain fancy index
expected.iloc[[0], [0, 1, 2]] = 2222
df2.iloc[[0], [0, 1, 2]] = 2222
pd.testing.assert_frame_equal(
expected, self.executor.execute_dataframe(df2, concat=True)[0])
# fancy index
expected.iloc[[1, 2], [0, 1, 2]] = 3333
df2.iloc[[1, 2], [0, 1, 2]] = 3333
pd.testing.assert_frame_equal(
expected, self.executor.execute_dataframe(df2, concat=True)[0])
# plain index
expected.iloc[1, 2] = 4444
df2.iloc[1, 2] = 4444
pd.testing.assert_frame_equal(
expected, self.executor.execute_dataframe(df2, concat=True)[0])
# test Series
data = pd.Series(np.arange(10))
series = md.Series(data, chunk_size=3)
series.iloc[:3] = 1
data.iloc[:3] = 1
pd.testing.assert_series_equal(
self.executor.execute_dataframe(series, concat=True)[0], data)
series.iloc[4] = 2
data.iloc[4] = 2
pd.testing.assert_series_equal(
self.executor.execute_dataframe(series, concat=True)[0], data)
series.iloc[[2, 3, 4, 9]] = 3
data.iloc[[2, 3, 4, 9]] = 3
pd.testing.assert_series_equal(
self.executor.execute_dataframe(series, concat=True)[0], data)
series.iloc[5:] = 4
data.iloc[5:] = 4
pd.testing.assert_series_equal(
self.executor.execute_dataframe(series, concat=True)[0], data)
def testLocGetItem(self):
rs = np.random.RandomState(0)
# index and columns are labels
raw1 = pd.DataFrame(rs.randint(10, size=(5, 4)),
index=['a1', 'a2', 'a3', 'a4', 'a5'],
columns=['a', 'b', 'c', 'd'])
# columns are labels
raw2 = raw1.copy()
raw2.reset_index(inplace=True, drop=True)
# columns are non unique and monotonic
raw3 = raw1.copy()
raw3.columns = ['a', 'b', 'b', 'd']
# columns are non unique and non monotonic
raw4 = raw1.copy()
raw4.columns = ['b', 'a', 'b', 'd']
# index that is timestamp
raw5 = raw1.copy()
raw5.index = pd.date_range('2020-1-1', periods=5)
df1 = md.DataFrame(raw1, chunk_size=2)
df2 = md.DataFrame(raw2, chunk_size=2)
df3 = md.DataFrame(raw3, chunk_size=2)
df4 = md.DataFrame(raw4, chunk_size=2)
df5 = md.DataFrame(raw5, chunk_size=2)
df = df2.loc[3, 'b']
result = self.executor.execute_tensor(df, concat=True)[0]
expected = raw2.loc[3, 'b']
self.assertEqual(result, expected)
df = df1.loc['a3', 'b']
result = self.executor.execute_tensor(df, concat=True, check_shape=False)[0]
expected = raw1.loc['a3', 'b']
self.assertEqual(result, expected)
df = df2.loc[1:4, 'b':'d']
result = self.executor.execute_dataframe(df, concat=True)[0]
expected = raw2.loc[1:4, 'b': 'd']
pd.testing.assert_frame_equal(result, expected)
df = df2.loc[:4, 'b':]
result = self.executor.execute_dataframe(df, concat=True)[0]
expected = raw2.loc[:4, 'b':]
pd.testing.assert_frame_equal(result, expected)
# slice on axis index whose index_value does not have value
df = df1.loc['a2':'a4', 'b':]
result = self.executor.execute_dataframe(df, concat=True)[0]
expected = raw1.loc['a2':'a4', 'b':]
pd.testing.assert_frame_equal(result, expected)
df = df2.loc[:, 'b']
result = self.executor.execute_dataframe(df, concat=True)[0]
expected = raw2.loc[:, 'b']
pd.testing.assert_series_equal(result, expected)
# 'b' is non-unique
df = df3.loc[:, 'b']
result = self.executor.execute_dataframe(df, concat=True)[0]
expected = raw3.loc[:, 'b']
pd.testing.assert_frame_equal(result, expected)
# 'b' is non-unique, and non-monotonic
df = df4.loc[:, 'b']
result = self.executor.execute_dataframe(df, concat=True)[0]
expected = raw4.loc[:, 'b']
pd.testing.assert_frame_equal(result, expected)
# label on axis 0
df = df1.loc['a2', :]
result = self.executor.execute_dataframe(df, concat=True)[0]
expected = raw1.loc['a2', :]
pd.testing.assert_series_equal(result, expected)
# label-based fancy index
df = df2.loc[[3, 0, 1], ['c', 'a', 'd']]
result = self.executor.execute_dataframe(df, concat=True)[0]
expected = raw2.loc[[3, 0, 1], ['c', 'a', 'd']]
pd.testing.assert_frame_equal(result, expected)
# label-based fancy index, asc sorted
df = df2.loc[[0, 1, 3], ['a', 'c', 'd']]
result = self.executor.execute_dataframe(df, concat=True)[0]
expected = raw2.loc[[0, 1, 3], ['a', 'c', 'd']]
pd.testing.assert_frame_equal(result, expected)
# label-based fancy index in which non-unique exists
selection = rs.randint(2, size=(5,), dtype=bool)
df = df3.loc[selection, ['b', 'a', 'd']]
result = self.executor.execute_dataframe(df, concat=True)[0]
expected = raw3.loc[selection, ['b', 'a', 'd']]
pd.testing.assert_frame_equal(result, expected)
df = df3.loc[md.Series(selection), ['b', 'a', 'd']]
result = self.executor.execute_dataframe(df, concat=True)[0]
expected = raw3.loc[selection, ['b', 'a', 'd']]
|
pd.testing.assert_frame_equal(result, expected)
|
pandas.testing.assert_frame_equal
|
# -*- coding: utf-8 -*-
'''
:author <NAME>
:licence MIT
'''
#load packages
import numpy as np
import pandas as pd
import os
from datetime import datetime
from collections import defaultdict
#for plotting
import plotly
import plotly.graph_objs as go
#pickle for saving - obsolete
#import pickle as pkl
class Zonar():
def read_raw(self, fname):
"""
reads in the raw acoustic data from the Zonar dive data
Parameters
----------
fname : string
Path to Zonar raw file
Returns
-------
bfile - numpy array of the raw data
"""
try:
print('\nReading file %s...'%os.path.split(fname)[1])
bfile = np.fromfile(fname, dtype='uint8')
except FileExistsError:
print("No such file")
return bfile
def read_header(self, bfile, k=1):
"""
reads in the header data of the raw file.
Parameters
----------
bfile : array
Numpy array from raw zonar data as created by Zonar.read_raw()
k : int, optional
Starting position of the header in the file. The default is 1.
Returns
-------
int
PID - Packet ID for the FOLLOWING (!) packet
k : int
Ending position of the header in the file.
head : pandas DataFrame
Datatframe containing the header information.
Header contains:
Name Description Format
---- ----------- -------
ver software version uint8 (1 byte)
size header size uint8 (1 byte)
pid Packet ID for the following packet uint8 (1 byte)
nbytes Size of the following packet i4 (4 bytes)
chksum Checksum for the following packet i4 (4 bytes)
"""
# define the header datatypes to get the information and the
# correct size
dthead = np.dtype([('ver','uint8'),
('size', 'uint8'),
('pid', 'uint8'),
('nbytes','i4'),
('chksum','i4')])
# reade the header information into pandas dataframe from the buffered
# np array
head = pd.DataFrame(np.frombuffer(bfile[k:k+dthead.itemsize],dthead))
# define the positon in the file after the header
k= k + dthead.itemsize
return head['pid'], k, head
def read_start(self, bfile, k):
"""
Reads the Dive Start packet (56 bytes of info).
Contains the Configuration of the Zonar for this dive.
Parameters
----------
bfile : array
Numpy array from raw zonar data as created by Zonar.read_raw()
k : int, optional
Starting position of Dive info in the file.
This position is provided by the previously read in part.
Returns
-------
k : int
Ending position of the Dive configuration information in the file.
start : pandas DataFrame
Dive Starting information, includes information shared by both frequencies:
Name Format Description
---- ------ -----------
ver uint8 packet version
diveNo i2 Dive number (from glider)
sprayTm i4 Glider Time [s] (from glider)
zooTm i4 Zonar Time [s]
nPings uint8 Number of pings in each Burst
dtBurst uint8 Number of seconds between bursts, if in continuous mode
warmup i2 time to warmup the electronics before transmit [ms]
tbin i2 time to average data into each bin [ms]
barker uint8 TRUE if use barker-code, else straight freq
nBit uint8 Number of bits in barker code
freqstart : pandas DataFrame
Frequency specific Dive Starting information:
Name Format Description
---- ------ -----------
freq i2 nominal frequency [kHz]
pulse i2 transmit pulse duration [ms]
blank i2 time between end of transmit and the first scan [ms]
dt i2 period between scans [ms] e.g. dt=200 ms -> 5 kHz sample rate
tScan i2 duration to take scans [ms], i.e. tWait = tPing
tPing i2 time interval between pings [ms]
nScan i2 number of a/d scans taken, i.e. nScan = floor(1000 * tScan / dt)
tWait i2 time before next scan [ms], i.e. tWait = tPing - nScan * dt - blank - pulse
nBin i2 number of scans to average per bin, i.e. nBin = round(1000 * tBin / dt)
gn i2 number of a/d counts per 1 dB re V
"""
csync = int('aa', 16) # 0xAA = expected sync character
sync = bfile[k] #read the sync position
k += 1
if sync == csync:
#start
dtstart = np.dtype([('ver', 'uint8'),
('diveNo','i2'),
('sprayTm', 'i4'),
('zooTm','i4'),
('nPings', 'uint8'),
('dtBurst', 'uint8'),
('warmup', 'i2'),
('tbin', 'i2'),
('barker', 'uint8'),
('nBit', 'uint8')])
start = pd.DataFrame(np.frombuffer(bfile[k:k+dtstart.itemsize],dtstart))
k = k + dtstart.itemsize
dtfreq = np.dtype([('freq', 'i2'),
('pulse', 'i2'),
('blank', 'i2'),
('dt', 'i2'),
('tScan', 'i2'),
('tPing', 'i2'),
('nScan', 'i2'),
('tWait', 'i2'),
('nBin', 'i2'),
('gn', 'i2')])
freqstart = pd.DataFrame()
for i in range(0,2):
freqstart = freqstart.append(pd.DataFrame(np.frombuffer(bfile[k:k+dtfreq.itemsize],dtfreq)), ignore_index=True)
k=k+dtfreq.itemsize
else:
k = -1
start = 0
freqstart = 0
return k, start, freqstart
def read_burst(self, bfile, k):
"""
reads one selected burst header and data
Parameters
----------
bfile : array
Numpy array from raw zonar data as created by Zonar.read_raw()
k : int, optional
Starting position of Dive info in the file.
This position is provided by the previously read in part.
Returns
-------
k : int
Ending position of the Dive configuration information in the file.
burst : pandas DataFrame
Burst header data (12 bytes of header data):
Name Format Description
---- ------ -----------
ver uint8 Allows for future versions of the data packet, set to 0
beam uint8 Frequency ID with 1 = 200 kHz, 2= 1000 kHz, refer to dive start package for exact frequency
nP uint8 Number of pings in the burst
nS i2 Number of scans per Ping
press i2 pressure from glider [LSB, 0.1 dBar] with 1 LSB = 0.1 dBar
zooTm i4 time stamp: can be synched with glider time using dive_start info [s] (unix epoch time)
rs : pandas DataFrame
Burst data:
2 bytes for each nP * nS. Sequential per ping (nS for ping number 1, nS for ping number 2, ....)
"""
csync = int('aa', 16) # 0xAA = expected sync character
sync = bfile[k] #sync char found
k = k + 1
if csync == sync:
#read burst header data
dtburst = np.dtype([('ver', 'uint8'),
('beam', 'uint8'),
('nP', 'uint8'),
('nS', 'i2'),
('press', 'i2'),
('zooTm', 'i4'),])
#get burst data
burst = pd.DataFrame(np.frombuffer(bfile[k : k + dtburst.itemsize], dtburst))
k = k + dtburst.itemsize
nB = burst['nP'] * burst['nS'] #total scans over all pings
rs = pd.DataFrame(np.frombuffer(bfile[k : int(k + nB[0] * 2)], dtype='i2', count=nB[0]))
ping_lst = range(0,burst['nP'][0])
rs['Ping'] = np.repeat(ping_lst, burst['nS'][0])
k = k + nB[0] * 2
else:
k = -1
burst = 0
rs = 0
return k, burst, rs
def read_avg(self, bfile,k):
"""
reads the average packet data
Parameters
----------
bfile : array
Numpy array from raw zonar data as created by Zonar.read_raw()
k : int, optional
Starting position of average pacet data in the file.
This position is provided by the previously read in part.
Returns
-------
k : int
Ending position of the Dive configuration information in the file.
burst : pandas DataFrame
Burst header data (12 bytes of header data):
Name Format Description
---- ------ -----------
ver uint8 Allows for future versions of the data packet, set to 0
beam uint8 Frequency ID with 1 = 200 kHz, 2= 1000 kHz, refer to dive start package for exact frequency
nBin uint8 Number of averaged bins = floor(total number of Scans / scans per bin)
press i2 pressure from glider [LSB, 0.1 dBar] with 1 LSB = 0.1 dBar
zooTm i4 time stamp: can be synched with glider time using dive_start info [s] (unix epoch time)
rs : pandas DataFrame
Burst data:
2 bytes for each averaged return data
"""
csync = int('aa', 16) # 0xAA = expected sync character
sync = bfile[k]; k += 1 #sync char found
if sync == csync: # match!!! continue
dtavg = np.dtype([('ver', 'uint8'),
('beam', 'uint8'),
('nBin', 'uint8'),
('press','i2'),
('zooTm','i4')])
avg = pd.DataFrame(np.frombuffer(bfile[k : k + dtavg.itemsize], dtavg))
k = k + dtavg.itemsize
rs = np.frombuffer(bfile[k : int(k + avg['nBin'][0] * 2)], dtype='i2', count=avg['nBin'][0])
k = k + avg['nBin'][0] * 2 + 2
else:
k = -1
avg = 0
rs = 0
return k, avg, rs
def init_cal(self, **kwargs):
"""
intializes original calibration values
!!!These need to be corrected for the latest calibration values!!!
Parameters
----------
**kwargs : np array of size 2
Any calibration values can be overwritten:
Gain float calibration gain
Noise float Noise
CalNoise float calibration noise
sl float Source level
tau float transmit pulse duration [ms]
beam_deg float beam angle in degrees
alpha float anntenuation coefficient
cspeed float sound speed in surrounding medium [m/s]
gn int Number of a/d counts per dB re V
Returns
-------
cal : defaultdict
Gain float calibration gain
Noise float Noise
CalNoise float calibration noise
sl float Source level
tau float transmit pulse duration [ms]
beam_deg float beam angle in degrees
alpha float anntenuation coefficient
cspeed float sound speed in surrounding medium [m/s]
gn int Number of a/d counts per dB re V
beam_rad float beam angle in radians
"""
cal = defaultdict(list)
cal["Gain"] = [54, 54] #system gain
cal["TS_Gain"] = [0,0] #calibraiton gain
cal["Noise"] = [27, 39] #noise
cal["CalNoise"] = [31, 37] #calibration noise
cal["sl"] = [103, 113] #source elvel
cal["tau"] = [0.006, 0.006] #pulse duration [s]
cal["beam_deg"] = [9.8, 4] #beam angle
cal["alpha"] = [0.054, 0.38] #attenuation coefficient
cal["cspeed"] = 1500 #sound speed in surrounding medium
cal['gn'] = [40,40]
cal.update(kwargs) #update with provided values
cal["beam_rad"] = list(np.array(cal["beam_deg"]) * np.pi / 180) #convert beam angle from degrees to radians
return cal
def update_cal(self, raws, start, cal, **kwargs):
"""
updates calibration data in the raw output
Parameters
----------
raws : pandas DataFrame
raw output see read_one_dive() for details
start : pandas DataFrame
read_start output()
cal : defaultdict
init_cal output
**kwargs : np array of size 2
Any calibration values can be overwritten:
Gain float calibration gain
Noise float Noise
CalNoise float calibration noise
sl float Source level
tau float transmit pulse duration [ms]
beam_deg float beam angle in degrees
alpha float anntenuation coefficient
cspeed float sound speed in surrounding medium [m/s]
gn int Number of a/d counts per dB re V
Returns
-------
cal : defaultdict
Calibration output, see cal_init() for details
raws : pandas DataFrame
raw data output see read_one_dive() for details
"""
#update the cal values provided
cal.update(kwargs)
#update raw data
raws['Frequency'] = np.array(start['freq'])[raws['beam']-1]
raws['Gain'] = np.array(cal['Gain'])[raws['beam']-1]
raws['Noise'] = np.array(cal['Noise'])[raws['beam']-1]
raws['CalNoise'] = np.array(cal['CalNoise'])[raws['beam']-1]
raws['sl'] = np.array(cal['sl'])[raws['beam']-1]
raws['tau'] = np.array(cal['tau'])[raws['beam']-1]
raws['beam_deg'] = np.array(cal['beam_deg'])[raws['beam']-1]
raws['beam_rad'] = np.array(cal['beam_rad'])[raws['beam']-1]
raws['alpha'] = np.array(cal['alpha'])[raws['beam']-1]
raws['c'] = cal['cspeed']
raws['nomwl'] = raws['c'] / raws['Frequency']
raws['k'] = 2 * np.pi / raws['nomwl']
raws['a'] = 1.6 / ( raws['k'] * np.sin(raws['beam_rad']/2))#active area
raws['psiD'] = 10 * np.log10( 5.78 / ( ( raws['k'] * raws['a'] ) ** 2))#equivalent beam angle in steradians
#update depth with new cal data
dz,raws = self.add_depth(start, raws)
#update Sv with the new claibration data
raws = self.get_Sv(start, raws)
return cal, raws
def read_one_dive(self, bfile, **kwargs):
"""
reads one full dive
Parameters
----------
bfile : array
Numpy array from raw zonar data as created by Zonar.read_raw()
**kwargs : TYPE
kwargs can be calibration values:
Gain - calibration gain, defaults 54, 43
Noise, noise defaults to 27, 39
CalNoise, calibration noise defaults to 31, 37
sl, source level, defaults to 103, 113
tau, pulse duration, defaults to 0.006, 0.006
beam_deg, beam angle defaults to 9.8, 4
alpha, attenuation coefficient, defaults to 0.054, 0.38
cspeed, sound speed in surrounding medium, defaults to 1500
gn, number of a/d counts per dB re V, defaults to 40, 40
Returns
-------
raws : pandas DataFrame
Raw data including header and calibration information:
Name Format Description
---- ------ -----------
Raw uint8 Raw count data
Ping int Ping number
dive i2 Dive number (from glider)
beam uint8 Frequency ID with 1 = 200 kHz, 2= 1000 kHz, refer to dive start package for exact frequency
nBurst uint8 Number of burst
press i2 pressure from glider [LSB, 0.1 dBar] with 1 LSB = 0.1 dBar
zooTm i4 time stamp: can be synched with glider time using dive_start info [s] (unix epoch time)
sprayTm i4 Glider Time [s] (from glider)
nP uint8 Number of pings in the burst
nS i2 number of a/d scans taken, i.e. nScan = floor(1000 * tScan / dt)
Frequency i2 nominal frequency [kHz]
Gain float calibration gain
Noise float Noise
CalNoise float calibration noise
sl float Source level
tau float transmit pulse duration [ms]
beam_deg float beam angle in degrees
beam_rad float beam angle in radians
alpha float anntenuation coefficient
c float sound speed in surrounding medium [m/s]
nomwl float nominal wave length, lambda = c / Frequency [m]
k float wave number, 2pi / lambda
a float active area 1.6 * (k / sin(beam_rad/2))
psiD float equivalent beam area 10 * log10 (5.78 / ((ka)**2))
nScan i2 number of a/d scans taken, i.e. nScan = floor(1000 * tScan / dt)
dt i2 period between scans [ms] e.g. dt=200 ms -> 5 kHz sample rate
blank i2 time between end of transmit and the first scan [ms]
tScan i2 duration to take scans [ms], i.e. tWait = tPing
tPing i2 time interval between pings [ms]
tWait i2 time before next scan [ms], i.e. tWait = tPing - nScan * dt - blank - pulse
dz float distance from transducer with the center of first scan being z0 = blank + tau * 1000 / 2 * c / 2 / 1000 , dz becomes z0 + nscan * c / 2 / 1000 * dt * 0.001
z float pressure [dBar] or depth [m] with the pressure in dBar zb = press/10, z becomes z = zb + dz * cos(17 * pi / 180), assuming a glider tilt angle of 17 degrees
start_time date %Y-%m-%d %H:%M:%S of Glider time, sprayTm
dive_time date %Y-%m-%d %H:%M:%S of zooTm, Zonar Time
avgRaws : pandas DataFrame
Averaged Data:
0 int chksum
dive int Dive number
beam uint8 Frequency ID with 1 = 200 kHz, 2= 1000 kHz, refer to dive start package for exact frequency
nAvg int Number of averaged bin
nBin uint8 Number of averaged bins = floor(total number of Scans / scans per bin)
zooTm i4 time stamp: can be synched with glider time using dive_start info [s] (unix epoch time)
sprayTm i4 Glider Time [s] (from glider)
nP uint8 Number of pings in the burst
freqstarts : pandas DataFrame
Frequency specific information:
freq int Nominal Frequency [kHz]
pulse int Pulse duration [ms]
blank int blanking time [ms]
dt int period between scans [ms] e.g. dt=200 ms -> 5 kHz sample rate
tPing i2 time interval between pings [ms]
tScan i2 duration to take scans [ms], i.e. tWait = tPing
tWait i2 time before next scan [ms], i.e. tWait = tPing - nScan * dt - blank - pulse
nBin uint8 Number of averaged bins = floor(total number of Scans / scans per bin)
gn int Number of a/d counts per dB re V
cal : defaultdict
Calibration information:
Gain float calibration gain
Noise float Noise
CalNoise float calibration noise
sl float Source level
tau float transmit pulse duration [ms]
beam_deg float beam angle in degrees
alpha float anntenuation coefficient
cspeed float sound speed in surrounding medium [m/s]
gn int Number of a/d counts per dB re V
beam_rad float beam angle in radians
"""
print('Processing dive...')
#initialise cal and update with input if any is provided
cal = self.init_cal(**kwargs)
#start timer
t0 = datetime.now()
#-- init values ----------------------
id_miss = int('c0', 16) #header id for start-mission (future use).
id_start = int('c1', 16) #start-dive info (settings)
id_end = int('c2', 16) #end-dive info
id_burst = int('c3', 16) #burst raw data
id_avg = int('c4', 16) #burst avg data
id_EOF = int('ff', 16) #end-of-file
#initialise empty dataframes
#starts = pd.DataFrame()
freqstarts = pd.DataFrame()
bursts = pd.DataFrame()
raws = pd.DataFrame()
avgs =
|
pd.DataFrame()
|
pandas.DataFrame
|
from datetime import timedelta
from functools import partial
import itertools
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
from zipline.pipeline.loaders.earnings_estimates import (
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import pytest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date, sids, tuples, end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples, columns=[SID_FIELD_NAME, "estimate", "knowledge_date"])
df = df.pivot_table(
columns=SID_FIELD_NAME, values="estimate", index="knowledge_date", dropna=False
)
df = df.reindex(pd.date_range(start_date, end_date))
# Index name is lost during reindex.
df.index = df.index.rename("knowledge_date")
df["at_date"] = end_date.tz_localize("utc")
df = df.set_index(["at_date", df.index.tz_localize("utc")]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp("2014-12-28", tz="utc")
END_DATE = pd.Timestamp("2015-02-04", tz="utc")
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError("make_loader")
@classmethod
def make_events(cls):
raise NotImplementedError("make_events")
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days,
self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
"s" + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(
cls.events, {column.name: val for column, val in cls.columns.items()}
)
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: "event_date",
MultipleColumnsEstimates.fiscal_quarter: "fiscal_quarter",
MultipleColumnsEstimates.fiscal_year: "fiscal_year",
MultipleColumnsEstimates.estimate1: "estimate1",
MultipleColumnsEstimates.estimate2: "estimate2",
}
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate1": [1.0, 2.0],
"estimate2": [3.0, 4.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def make_expected_out(cls):
raise NotImplementedError("make_expected_out")
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp("2015-01-15", tz="utc"),
end_date=pd.Timestamp("2015-01-15", tz="utc"),
)
assert_frame_equal(results.sort_index(1), self.expected_out.sort_index(1))
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-10"),
"estimate1": 1.0,
"estimate2": 3.0,
FISCAL_QUARTER_FIELD_NAME: 1.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-20"),
"estimate1": 2.0,
"estimate2": 4.0,
FISCAL_QUARTER_FIELD_NAME: 2.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
dummy_df = pd.DataFrame(
{SID_FIELD_NAME: 0},
columns=[
SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
"estimate",
],
index=[0],
)
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {
c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1, bad_dataset2, good_dataset)
for c in dataset.columns
}
p = Pipeline(columns)
err_msg = (
r"Passed invalid number of quarters -[0-9],-[0-9]; "
r"must pass a number of quarters >= 0"
)
with pytest.raises(ValueError, match=err_msg):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with pytest.raises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = [
"split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof",
]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(
itertools.product(
(
NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
),
)
)
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
with pytest.raises(ValueError):
loader(
dummy_df,
{column.name: val for column, val in columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"),
)
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp("2015-01-28", tz="utc")
q1_knowledge_dates = [
pd.Timestamp("2015-01-01"),
pd.Timestamp("2015-01-04"),
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-11"),
]
q2_knowledge_dates = [
pd.Timestamp("2015-01-14"),
pd.Timestamp("2015-01-17"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-23"),
]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
] # One day late
q2_release_dates = [
pd.Timestamp("2015-01-25"), # One day early
pd.Timestamp("2015-01-26"),
]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates + cls.q2_knowledge_dates, 4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (
q1e1 < q1e2
and q2e1 < q2e2
# All estimates are < Q2's event, so just constrain Q1
# estimates.
and q1e1 < cls.q1_release_dates[0]
and q1e2 < cls.q1_release_dates[0]
):
sid_estimates.append(
cls.create_estimates_df(q1e1, q1e2, q2e1, q2e2, sid)
)
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates + sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-13"), pd.Timestamp("2015-01-26")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-26"),
],
"estimate": [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid,
}
)
@classmethod
def create_estimates_df(cls, q1e1, q1e2, q2e1, q2e2, sid):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
"estimate": [0.1, 0.2, 0.3, 0.4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
}
)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert sid_estimates.isnull().all().all()
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[
self.get_expected_estimate(
q1_knowledge[
q1_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
q2_knowledge[
q2_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
date.tz_localize(None),
).set_index([[date]])
for date in sid_estimates.index
],
axis=0,
)
sid_estimates.index = all_expected.index.copy()
assert_equal(all_expected[sid_estimates.columns], sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q2_knowledge.iloc[-1:]
elif (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate": [1.0, 2.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(
columns=[cls.columns[col] + "1" for col in cls.columns]
+ [cls.columns[col] + "2" for col in cls.columns],
index=cls.trading_days,
)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ("1", "2")
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(expected[expected_name])
else:
expected[expected_name] = expected[expected_name].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge(
[
{c.name + "1": c.latest for c in dataset1.columns},
{c.name + "2": c.latest for c in dataset2.columns},
]
)
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + "1" for col in self.columns]
q2_columns = [col.name + "2" for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(
sorted(np.array(q1_columns + q2_columns)), sorted(results.columns.values)
)
assert_equal(
self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1),
)
class NextEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp(
"2015-01-11", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp("2015-01-11", tz="UTC") : pd.Timestamp(
"2015-01-20", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ["estimate", "event_date"]:
expected.loc[
pd.Timestamp("2015-01-06", tz="UTC") : pd.Timestamp(
"2015-01-10", tz="UTC"
),
col_name + "2",
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-09", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 2
expected.loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 3
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_YEAR_FIELD_NAME + "2",
] = 2015
return expected
class PreviousEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp(
"2015-01-19", tz="UTC"
)
] = cls.events[raw_name].iloc[0]
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ["estimate", "event_date"]:
expected[col_name + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[col_name].iloc[0]
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 4
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 2014
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 1
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 2015
return expected
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
]
* 2,
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-20"),
],
"estimate": [11.0, 12.0, 21.0] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6,
}
)
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError("assert_compute")
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=pd.Timestamp("2015-01-13", tz="utc"),
# last event date we have
end_date=pd.Timestamp("2015-01-14", tz="utc"),
)
class PreviousVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp("2015-02-10", tz="utc")
window_test_start_date = pd.Timestamp("2015-01-05")
critical_dates = [
pd.Timestamp("2015-01-09", tz="utc"),
pd.Timestamp("2015-01-15", tz="utc"),
pd.Timestamp("2015-01-20", tz="utc"),
pd.Timestamp("2015-01-26", tz="utc"),
pd.Timestamp("2015-02-05", tz="utc"),
pd.Timestamp("2015-02-10", tz="utc"),
]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-02-10"),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp("2015-01-18"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-04-01"),
],
"estimate": [100.0, 101.0] + [200.0, 201.0] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
}
)
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-15"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-02-05"),
pd.Timestamp("2015-02-05"),
],
"estimate": [110.0, 111.0] + [310.0, 311.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10,
}
)
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-07"),
cls.window_test_start_date,
pd.Timestamp("2015-01-17"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
],
"estimate": [120.0, 121.0] + [220.0, 221.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20,
}
)
concatted = pd.concat(
[sid_0_timeline, sid_10_timeline, sid_20_timeline]
).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [
sid for i in range(len(sids) - 1) for sid in range(sids[i], sids[i + 1])
] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids(),
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(
self, start_date, num_announcements_out
):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date)
- self.trading_days.get_loc(self.window_test_start_date)
+ 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = (
timelines[num_announcements_out]
.loc[today]
.reindex(trading_days[: today_idx + 1])
.values
)
timeline_start_idx = len(today_timeline) - window_len
assert_almost_equal(estimate, today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp("2015-02-10", tz="utc"),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat(
[
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-20"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-21"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 111, pd.Timestamp("2015-01-22")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-01-22", "2015-02-04")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-02-05", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 201, pd.Timestamp("2015-02-10")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 221, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_previous = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-02-09")
]
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-02-10")),
(10, np.NaN, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
)
]
)
return {1: oneq_previous, 2: twoq_previous}
class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
pd.Timestamp("2015-01-09"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
end_date,
)
for end_date in pd.date_range("2015-01-12", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(0, 101, pd.Timestamp("2015-01-20")),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
pd.Timestamp("2015-01-20"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-01-22")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, 310, pd.Timestamp("2015-01-09")),
(10, 311, pd.Timestamp("2015-01-15")),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-01-23", "2015-02-05")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-02-06", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(0, 201, pd.Timestamp("2015-02-10")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-11")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-12", "2015-01-16")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
pd.Timestamp("2015-01-20"),
)
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-02-10")
]
)
return {1: oneq_next, 2: twoq_next}
class WithSplitAdjustedWindows(WithEstimateWindows):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows and with
split adjustments.
"""
split_adjusted_asof_date = pd.Timestamp("2015-01-14")
@classmethod
def make_events(cls):
# Add an extra sid that has a release before the split-asof-date in
# order to test that we're reversing splits correctly in the previous
# case (without an overwrite) and in the next case (with an overwrite).
sid_30 = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-09"),
# For Q2, we want it to start early enough
# that we can have several adjustments before
# the end of the first quarter so that we
# can test un-adjusting & readjusting with an
# overwrite.
cls.window_test_start_date,
# We want the Q2 event date to be enough past
# the split-asof-date that we can have
# several splits and can make sure that they
# are applied correctly.
pd.Timestamp("2015-01-20"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
],
"estimate": [130.0, 131.0, 230.0, 231.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 30,
}
)
# An extra sid to test no splits before the split-adjusted-asof-date.
# We want an event before and after the split-adjusted-asof-date &
# timestamps for data points also before and after
# split-adjsuted-asof-date (but also before the split dates, so that
# we can test that splits actually get applied at the correct times).
sid_40 = pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-15")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-02-10"),
],
"estimate": [140.0, 240.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 40,
}
)
# An extra sid to test all splits before the
# split-adjusted-asof-date. All timestamps should be before that date
# so that we have cases where we un-apply and re-apply splits.
sid_50 = pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-12")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-02-10"),
],
"estimate": [150.0, 250.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 50,
}
)
return pd.concat(
[
# Slightly hacky, but want to make sure we're using the same
# events as WithEstimateWindows.
cls.__base__.make_events(),
sid_30,
sid_40,
sid_50,
]
)
@classmethod
def make_splits_data(cls):
# For sid 0, we want to apply a series of splits before and after the
# split-adjusted-asof-date we well as between quarters (for the
# previous case, where we won't see any values until after the event
# happens).
sid_0_splits = pd.DataFrame(
{
SID_FIELD_NAME: 0,
"ratio": (-1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100),
"effective_date": (
pd.Timestamp("2014-01-01"), # Filter out
# Split before Q1 event & after first estimate
pd.Timestamp("2015-01-07"),
# Split before Q1 event
pd.Timestamp("2015-01-09"),
# Split before Q1 event
pd.Timestamp("2015-01-13"),
# Split before Q1 event
pd.Timestamp("2015-01-15"),
# Split before Q1 event
pd.Timestamp("2015-01-18"),
# Split after Q1 event and before Q2 event
pd.Timestamp("2015-01-30"),
# Filter out - this is after our date index
pd.Timestamp("2016-01-01"),
),
}
)
sid_10_splits = pd.DataFrame(
{
SID_FIELD_NAME: 10,
"ratio": (0.2, 0.3),
"effective_date": (
# We want a split before the first estimate and before the
# split-adjusted-asof-date but within our calendar index so
# that we can test that the split is NEVER applied.
pd.Timestamp("2015-01-07"),
# Apply a single split before Q1 event.
pd.Timestamp("2015-01-20"),
),
}
)
# We want a sid with split dates that collide with another sid (0) to
# make sure splits are correctly applied for both sids.
sid_20_splits = pd.DataFrame(
{
SID_FIELD_NAME: 20,
"ratio": (
0.4,
0.5,
0.6,
0.7,
0.8,
0.9,
),
"effective_date": (
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-15"),
pd.Timestamp("2015-01-18"),
pd.Timestamp("2015-01-30"),
),
}
)
# This sid has event dates that are shifted back so that we can test
# cases where an event occurs before the split-asof-date.
sid_30_splits = pd.DataFrame(
{
SID_FIELD_NAME: 30,
"ratio": (8, 9, 10, 11, 12),
"effective_date": (
# Split before the event and before the
# split-asof-date.
pd.Timestamp("2015-01-07"),
# Split on date of event but before the
# split-asof-date.
pd.Timestamp("2015-01-09"),
# Split after the event, but before the
# split-asof-date.
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-15"),
pd.Timestamp("2015-01-18"),
),
}
)
# No splits for a sid before the split-adjusted-asof-date.
sid_40_splits = pd.DataFrame(
{
SID_FIELD_NAME: 40,
"ratio": (13, 14),
"effective_date": (
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-22"),
),
}
)
# No splits for a sid after the split-adjusted-asof-date.
sid_50_splits = pd.DataFrame(
{
SID_FIELD_NAME: 50,
"ratio": (15, 16),
"effective_date": (
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
),
}
)
return pd.concat(
[
sid_0_splits,
sid_10_splits,
sid_20_splits,
sid_30_splits,
sid_40_splits,
sid_50_splits,
]
)
class PreviousWithSplitAdjustedWindows(WithSplitAdjustedWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=["estimate"],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat(
[
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
# Undo all adjustments that haven't happened yet.
(30, 131 * 1 / 10, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150 * 1 / 15 * 1 / 16, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-12")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0 * 1 / 16, pd.Timestamp("2015-01-09")),
],
pd.Timestamp("2015-01-13"),
),
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
pd.Timestamp("2015-01-14"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131 * 11, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-15", "2015-01-16")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121 * 0.7 * 0.8, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-20", "2015-01-21")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 111 * 0.3, pd.Timestamp("2015-01-22")),
(20, 121 * 0.7 * 0.8, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-22", "2015-01-29")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101 * 7, pd.Timestamp("2015-01-20")),
(10, 111 * 0.3, pd.Timestamp("2015-01-22")),
(20, 121 * 0.7 * 0.8 * 0.9, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-30", "2015-02-04")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101 * 7, pd.Timestamp("2015-01-20")),
(10, 311 * 0.3, pd.Timestamp("2015-02-05")),
(20, 121 * 0.7 * 0.8 * 0.9, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-02-05", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 201, pd.Timestamp("2015-02-10")),
(10, 311 * 0.3, pd.Timestamp("2015-02-05")),
(20, 221 * 0.8 * 0.9, pd.Timestamp("2015-02-10")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 240.0 * 13 * 14, pd.Timestamp("2015-02-10")),
(50, 250.0, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_previous = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-19")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131 * 11 * 12, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-01-20", "2015-02-09")
]
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 101 * 7, pd.Timestamp("2015-02-10")),
(10, np.NaN, pd.Timestamp("2015-02-05")),
(20, 121 * 0.7 * 0.8 * 0.9, pd.Timestamp("2015-02-10")),
(30, 131 * 11 * 12, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-02-10")),
(50, 150.0, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
)
]
)
return {1: oneq_previous, 2: twoq_previous}
class NextWithSplitAdjustedWindows(WithSplitAdjustedWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=["estimate"],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100 * 1 / 4, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(20, 120 * 5 / 3, cls.window_test_start_date),
(20, 121 * 5 / 3, pd.Timestamp("2015-01-07")),
(30, 130 * 1 / 10, cls.window_test_start_date),
(30, 131 * 1 / 10, pd.Timestamp("2015-01-09")),
(40, 140, pd.Timestamp("2015-01-09")),
(50, 150.0 * 1 / 15 * 1 / 16, pd.Timestamp("2015-01-09")),
],
pd.Timestamp("2015-01-09"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 100 * 1 / 4, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120 * 5 / 3, cls.window_test_start_date),
(20, 121 * 5 / 3, pd.Timestamp("2015-01-07")),
(30, 230 * 1 / 10, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp("2015-01-10")),
(50, 250.0 * 1 / 15 * 1 / 16, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-01-12"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
(30, 230, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp("2015-01-10")),
(50, 250.0 * 1 / 16, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-01-13"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
(30, 230, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp("2015-01-10")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-01-14"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100 * 5, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120 * 0.7, cls.window_test_start_date),
(20, 121 * 0.7, pd.Timestamp("2015-01-07")),
(30, 230 * 11, cls.window_test_start_date),
(40, 240, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
end_date,
)
for end_date in pd.date_range("2015-01-15", "2015-01-16")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 100 * 5 * 6, cls.window_test_start_date),
(0, 101, pd.Timestamp("2015-01-20")),
(10, 110 * 0.3, pd.Timestamp("2015-01-09")),
(10, 111 * 0.3, pd.Timestamp("2015-01-12")),
(20, 120 * 0.7 * 0.8, cls.window_test_start_date),
(20, 121 * 0.7 * 0.8, pd.Timestamp("2015-01-07")),
(30, 230 * 11 * 12, cls.window_test_start_date),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 240 * 13, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-01-20"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5 * 6, pd.Timestamp("2015-01-12")),
(10, 110 * 0.3, pd.Timestamp("2015-01-09")),
(10, 111 * 0.3, pd.Timestamp("2015-01-12")),
(20, 220 * 0.7 * 0.8, cls.window_test_start_date),
(20, 221 * 0.8, pd.Timestamp("2015-01-17")),
(40, 240 * 13, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-01-21"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5 * 6, pd.Timestamp("2015-01-12")),
(10, 110 * 0.3, pd.Timestamp("2015-01-09")),
(10, 111 * 0.3, pd.Timestamp("2015-01-12")),
(20, 220 * 0.7 * 0.8, cls.window_test_start_date),
(20, 221 * 0.8, pd.Timestamp("2015-01-17")),
(40, 240 * 13 * 14, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-01-22"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5 * 6, pd.Timestamp("2015-01-12")),
(10, 310 * 0.3, pd.Timestamp("2015-01-09")),
(10, 311 * 0.3, pd.Timestamp("2015-01-15")),
(20, 220 * 0.7 * 0.8, cls.window_test_start_date),
(20, 221 * 0.8, pd.Timestamp("2015-01-17")),
(40, 240 * 13 * 14, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
end_date,
)
for end_date in pd.date_range("2015-01-23", "2015-01-29")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5 * 6 * 7, pd.Timestamp("2015-01-12")),
(10, 310 * 0.3, pd.Timestamp("2015-01-09")),
(10, 311 * 0.3, pd.Timestamp("2015-01-15")),
(20, 220 * 0.7 * 0.8 * 0.9, cls.window_test_start_date),
(20, 221 * 0.8 * 0.9, pd.Timestamp("2015-01-17")),
(40, 240 * 13 * 14, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
end_date,
)
for end_date in pd.date_range("2015-01-30", "2015-02-05")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5 * 6 * 7, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220 * 0.7 * 0.8 * 0.9, cls.window_test_start_date),
(20, 221 * 0.8 * 0.9, pd.Timestamp("2015-01-17")),
(40, 240 * 13 * 14, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
end_date,
)
for end_date in pd.date_range("2015-02-06", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5 * 6 * 7, pd.Timestamp("2015-01-12")),
(0, 201, pd.Timestamp("2015-02-10")),
(10, np.NaN, cls.window_test_start_date),
(20, 220 * 0.7 * 0.8 * 0.9, cls.window_test_start_date),
(20, 221 * 0.8 * 0.9, pd.Timestamp("2015-01-17")),
(40, 240 * 13 * 14, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220 * 5 / 3, cls.window_test_start_date),
(30, 230 * 1 / 10, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date),
(50, np.NaN, cls.window_test_start_date),
],
pd.Timestamp("2015-01-09"),
)
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 1 / 4, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220 * 5 / 3, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date),
],
pd.Timestamp("2015-01-12"),
)
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-13", "2015-01-14")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220 * 0.7, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-15", "2015-01-16")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5 * 6, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220 * 0.7 * 0.8, cls.window_test_start_date),
(20, 221 * 0.8, pd.Timestamp("2015-01-17")),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date),
],
pd.Timestamp("2015-01-20"),
)
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-02-10")
]
)
return {1: oneq_next, 2: twoq_next}
class WithSplitAdjustedMultipleEstimateColumns(WithEstimates):
"""
ZiplineTestCase mixin for having multiple estimate columns that are
split-adjusted to make sure that adjustments are applied correctly.
Attributes
----------
test_start_date : pd.Timestamp
The start date of the test.
test_end_date : pd.Timestamp
The start date of the test.
split_adjusted_asof : pd.Timestamp
The split-adjusted-asof-date of the data used in the test, to be used
to create all loaders of test classes that subclass this mixin.
Methods
-------
make_expected_timelines_1q_out -> dict[pd.Timestamp -> dict[str ->
np.array]]
The expected array of results for each date of the date range for
each column. Only for 1 quarter out.
make_expected_timelines_2q_out -> dict[pd.Timestamp -> dict[str ->
np.array]]
The expected array of results for each date of the date range. For 2
quarters out, so only for the column that is requested to be loaded
with 2 quarters out.
Tests
-----
test_adjustments_with_multiple_adjusted_columns
Tests that if you have multiple columns, we still split-adjust
correctly.
test_multiple_datasets_different_num_announcements
Tests that if you have multiple datasets that ask for a different
number of quarters out, and each asks for a different estimates column,
we still split-adjust correctly.
"""
END_DATE = pd.Timestamp("2015-02-10", tz="utc")
test_start_date = pd.Timestamp("2015-01-06", tz="utc")
test_end_date = pd.Timestamp("2015-01-12", tz="utc")
split_adjusted_asof = pd.Timestamp("2015-01-08")
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: "event_date",
MultipleColumnsEstimates.fiscal_quarter: "fiscal_quarter",
MultipleColumnsEstimates.fiscal_year: "fiscal_year",
MultipleColumnsEstimates.estimate1: "estimate1",
MultipleColumnsEstimates.estimate2: "estimate2",
}
@classmethod
def make_events(cls):
sid_0_events = pd.DataFrame(
{
# We only want a stale KD here so that adjustments
# will be applied.
TS_FIELD_NAME: [pd.Timestamp("2015-01-05"), pd.Timestamp("2015-01-05")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
],
"estimate1": [1100.0, 1200.0],
"estimate2": [2100.0, 2200.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
}
)
# This is just an extra sid to make sure that we apply adjustments
# correctly for multiple columns when we have multiple sids.
sid_1_events = pd.DataFrame(
{
# We only want a stale KD here so that adjustments
# will be applied.
TS_FIELD_NAME: [pd.Timestamp("2015-01-05"), pd.Timestamp("2015-01-05")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-08"),
pd.Timestamp("2015-01-11"),
],
"estimate1": [1110.0, 1210.0],
"estimate2": [2110.0, 2210.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 1,
}
)
return pd.concat([sid_0_events, sid_1_events])
@classmethod
def make_splits_data(cls):
sid_0_splits = pd.DataFrame(
{
SID_FIELD_NAME: 0,
"ratio": (0.3, 3.0),
"effective_date": (
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-09"),
),
}
)
sid_1_splits = pd.DataFrame(
{
SID_FIELD_NAME: 1,
"ratio": (0.4, 4.0),
"effective_date": (
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-09"),
),
}
)
return pd.concat([sid_0_splits, sid_1_splits])
@classmethod
def make_expected_timelines_1q_out(cls):
return {}
@classmethod
def make_expected_timelines_2q_out(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithSplitAdjustedMultipleEstimateColumns, cls).init_class_fixtures()
cls.timelines_1q_out = cls.make_expected_timelines_1q_out()
cls.timelines_2q_out = cls.make_expected_timelines_2q_out()
def test_adjustments_with_multiple_adjusted_columns(self):
dataset = MultipleColumnsQuartersEstimates(1)
timelines = self.timelines_1q_out
window_len = 3
class SomeFactor(CustomFactor):
inputs = [dataset.estimate1, dataset.estimate2]
window_length = window_len
def compute(self, today, assets, out, estimate1, estimate2):
assert_almost_equal(estimate1, timelines[today]["estimate1"])
assert_almost_equal(estimate2, timelines[today]["estimate2"])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=self.test_start_date,
# last event date we have
end_date=self.test_end_date,
)
def test_multiple_datasets_different_num_announcements(self):
dataset1 = MultipleColumnsQuartersEstimates(1)
dataset2 = MultipleColumnsQuartersEstimates(2)
timelines_1q_out = self.timelines_1q_out
timelines_2q_out = self.timelines_2q_out
window_len = 3
class SomeFactor1(CustomFactor):
inputs = [dataset1.estimate1]
window_length = window_len
def compute(self, today, assets, out, estimate1):
assert_almost_equal(estimate1, timelines_1q_out[today]["estimate1"])
class SomeFactor2(CustomFactor):
inputs = [dataset2.estimate2]
window_length = window_len
def compute(self, today, assets, out, estimate2):
assert_almost_equal(estimate2, timelines_2q_out[today]["estimate2"])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est1": SomeFactor1(), "est2": SomeFactor2()}),
start_date=self.test_start_date,
# last event date we have
end_date=self.test_end_date,
)
class PreviousWithSplitAdjustedMultipleEstimateColumns(
WithSplitAdjustedMultipleEstimateColumns, ZiplineTestCase
):
@classmethod
def make_loader(cls, events, columns):
return PreviousSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=["estimate1", "estimate2"],
split_adjusted_asof=cls.split_adjusted_asof,
)
@classmethod
def make_expected_timelines_1q_out(cls):
return {
pd.Timestamp("2015-01-06", tz="utc"): {
"estimate1": np.array([[np.NaN, np.NaN]] * 3),
"estimate2": np.array([[np.NaN, np.NaN]] * 3),
},
pd.Timestamp("2015-01-07", tz="utc"): {
"estimate1": np.array([[np.NaN, np.NaN]] * 3),
"estimate2": np.array([[np.NaN, np.NaN]] * 3),
},
pd.Timestamp("2015-01-08", tz="utc"): {
"estimate1": np.array([[np.NaN, np.NaN]] * 2 + [[np.NaN, 1110.0]]),
"estimate2": np.array([[np.NaN, np.NaN]] * 2 + [[np.NaN, 2110.0]]),
},
pd.Timestamp("2015-01-09", tz="utc"): {
"estimate1": np.array(
[[np.NaN, np.NaN]]
+ [[np.NaN, 1110.0 * 4]]
+ [[1100 * 3.0, 1110.0 * 4]]
),
"estimate2": np.array(
[[np.NaN, np.NaN]]
+ [[np.NaN, 2110.0 * 4]]
+ [[2100 * 3.0, 2110.0 * 4]]
),
},
pd.Timestamp("2015-01-12", tz="utc"): {
"estimate1": np.array(
[[np.NaN, np.NaN]] * 2 + [[1200 * 3.0, 1210.0 * 4]]
),
"estimate2": np.array(
[[np.NaN, np.NaN]] * 2 + [[2200 * 3.0, 2210.0 * 4]]
),
},
}
@classmethod
def make_expected_timelines_2q_out(cls):
return {
|
pd.Timestamp("2015-01-06", tz="utc")
|
pandas.Timestamp
|
import datetime
import random
import re
from typing import List, Text
import pandas as pd
import numpy as np
from tqdm import tqdm
from transformers import AutoTokenizer
''' '''
class PreprocessFirewall(object):
def __init__(self, logs: List[Text]) -> None:
self.logs = logs
@staticmethod
def _cleanTimelineMessage(l: Text):
l = re.sub(r'^.*?%ASA-\w+-\d-\d+:', '', l)
# OR
l = re.sub(r'^.*?%ASA--\d-\d+:', '', l)
# OR
#l = re.sub(r'^.*?%ASA--\d-\d+:', '', l)
# %ASA-bridge-6-1100
# there are some messages
# started with %ASA--4-733:
# started with %ASA-session-\d-\d+::
# manually omitted
return l
@staticmethod
def _cleanParanthesis(l: Text):
'''for cleaning extra IP information '''
l = re.sub(r'\(([^()]*)\)', '', l)
return l
@staticmethod
def _info_bracket_fix(l: Text):
''' clean bracket around information '''
xxx_match = [xxx.group() for xxx in re.finditer(r"(\[)[a-z ]+(\])",l)]
xxx_bound = [xxx.groups() for xxx in re.finditer(r"(\[)[a-z ]+(\])",l)]
xxx_out = l
if len(xxx_match)>0:
for xxxx in xxx_bound[0]:
xxx_out = xxx_out.replace(xxxx,"")
return xxx_out
@staticmethod
def _clean_HEX(l: Text):
xxx = re.sub(r"((?<=[^A-Za-z0-9])|^)(0x[a-f0-9A-F]+)((?=[^A-Za-z0-9])|$)","",l)
xxx = re.sub("\[, \]","",xxx)
return xxx
@staticmethod
def _augment_some_special_chars(l: Text):
xx = re.sub("\B_\B"," ",l)
xx = re.sub("->","to",xx)
return xx
@staticmethod
def _cleanlefovers(l: Text):
'''for cleaning extra brackets for hexadecimal number '''
l = re.sub(r'[\[\],]', '', l)
return l
@staticmethod
def _fix_missing_IP(l: Text):
''' if there is x in it, attain some number to IT '''
# ^ start of the line
# $ end of the line
REGEX_parts = r"(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?|[a-z])"
R = re.compile(REGEX_parts, re.S)
set_x = random.randrange(1, 256, 1)
l_edited = R.sub(lambda m: m.group().replace('.x', str(set_x), 1), l)
return l_edited
@staticmethod
def _fix_range_IP(l: Text):
''' if there is range in it, attain some number to IT '''
# ^ start of the line
# $ end of the line
REGEX_parts = r"(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.((\d|\d\d+|1\d\d+|2[0-4]\d)\-(1\d\d*|2[0-4]\d|250))"
R = re.compile(REGEX_parts, re.S)
l_edited = R.sub(lambda m: m.group().replace(
m[4], str(random.randrange(int(m[5]), int(m[6]), 1)), 1), l)
return l_edited
@staticmethod
def _fix_emptystrings(l: Text):
''' removes extra empty lines '''
xx = re.sub(r"^[ \t\r\n]+|[ \t\r\n]+$","",l)
xx = re.sub(r"\s{2,}"," ",l)
return xx
#@staticmethod
#def _getTime(l: Text):
# ''' extracts time from log line '''
# found = re.findall(
# "((00|[0-9]|1[0-9]|2[0-3]):([0-9]*|[0-5][0-9]):([0-9]*|[0-5][0-9]))", l.strip())
# foundTime = datetime.time(
# int(found[0][1]), int(found[0][2]), int(found[0][3]))
# return foundTime
def clean(self, l: Text):
# line edited - le
le = self._cleanTimelineMessage(l)
le = self._cleanParanthesis(le)
le = le.lower()
le = self._clean_HEX(le)
le = self._info_bracket_fix(le)
le = self._augment_some_special_chars(le)
le = self._fix_missing_IP(le)
le = self._fix_range_IP(le)
le = self._fix_emptystrings(le)
return le
def save(self):
''' save log file as csv '''
try:
import pandas as pd
from tqdm import tqdm
except ImportError:
raise
print('Saving the file...')
df = {'time': [], 'log': []}
# For every sentence...
tn = self.__len__() # total number of logs
print(f'Total number of logs: {tn}')
stride = 1 # a step for tqdm
# checking up file through a file viewer is important so we limit row length with 1M
LIMIT = 1000000
startTime = datetime.datetime.now().strftime("%H_%M_%S")
parts = int(tn/LIMIT)
residual = tn % LIMIT
parts = parts+1 if residual > 0 else parts
for part in range(0, parts, stride):
df = {'time': [], 'log': []}
start = part*LIMIT
end = start+LIMIT if tn-start > LIMIT else start+residual
print(f'Part-{part+1} working...')
for i in tqdm(range(start, end, stride)):
line = self.logs[i]
# if there is a match
foundTime = self._getTime(line)
# line edited - le
le = self.clean(line)
df['log'].append(le)
df['time'].append(foundTime)
df = pd.DataFrame(data=df, columns=['time', 'log'])
df.to_csv("data/firewall/anomaly-log-part-" + str(part+1) + "-" +
startTime+".csv", index=False, header=True)
del(df)
def __getitem__(self, idx):
#time = self._getTime(self.logs[idx])
text = self.clean(self.logs[idx])
#return (time, text)
return text
def __len__(self):
return len(self.logs)
def main():
#bids_logs = PreprocessFirewall(out['log'].tolist())
# DAY1
p1 = pd.read_csv("data/firewall/anomaly/day1-labeled-part1.csv", sep=',')
p2 = pd.read_csv("data/firewall/anomaly/day1-labeled-part2.csv", sep=',')
p3 = pd.read_csv("data/firewall/anomaly/day1-labeled-part3.csv", sep=',')
p4 = pd.read_csv("data/firewall/anomaly/day1-labeled-part4.csv", sep=',')
p5 = pd.read_csv("data/firewall/anomaly/day1-labeled-part5.csv", sep=',')
p6 = pd.read_csv("data/firewall/anomaly/day1-labeled-part6.csv", sep=',')
p7 = pd.read_csv("data/firewall/anomaly/day1-labeled-part7.csv", sep=',')
p8 = pd.read_csv("data/firewall/anomaly/day1-labeled-part8.csv", sep=',')
p9 = pd.read_csv("data/firewall/anomaly/day1-labeled-part9.csv", sep=',')
p10 = pd.read_csv("data/firewall/anomaly/day1-labeled-part10.csv", sep=',')
p11 = pd.read_csv("data/firewall/anomaly/day1-labeled-part11.csv", sep=',')
p12 = pd.read_csv("data/firewall/anomaly/day1-labeled-part12.csv", sep=',')
whole_day1 = [p1, p2, p3, p4, p5, p6, p7, p8, p9,p10,p11,p12]
df_day1 = pd.concat(whole_day1)
#df_day1.info()
#df_day1['atype'] = np.where(df_day1.label == 1, 'Collective','-')
df_day1['type'] = np.where(df_day1.label == 1, 'DDOS','NORMAL')
# DAY 2
p1 = pd.read_csv("data/firewall/anomaly/day2-labeled-part1.csv", sep=',')
p2 = pd.read_csv("data/firewall/anomaly/day2-labeled-part2.csv", sep=',')
p3 = pd.read_csv("data/firewall/anomaly/day2-labeled-part3.csv", sep=',')
whole_day2 = [p1, p2, p3]
df_day2 = pd.concat(whole_day2)
#df_day2['atype'] = np.where(df_day2.log.str.contains(r"192.168.2.175/55892|192.168.2.175/55891|10.200.150.201"), 'Collective','-')#conditional
df_day2['type'] = np.where(df_day2.log.str.contains(r"192.168.2.175/55892|192.168.2.175/55891"), 'PS','NORMAL')
df_day2.loc[df_day2.log.str.contains(r"10.200.150.201"), 'type'] = 'RD'
#df_day2.loc[df_day2.log.str.contains(r"10.200.150.201"), 'atype'] = 'Point'
#df_day2.info()
# DAY 3
df_day3 = pd.read_csv("data/firewall/anomaly/day3-labeled.csv", sep=',')
df_day3['type'] = np.where(df_day3.log.str.contains("192.168.2.251"), 'RD','NORMAL')
#df_day3['atype'] = np.where(df_day3.log.str.contains("192.168.2.251"), 'Point','-')
# Concat data first
days = [df_day1, df_day2, df_day3]
whole_data = pd.concat(days)
# lets init tokenizer here
tokenizer = AutoTokenizer.from_pretrained('roberta-base')
# get groups
grouped = whole_data.groupby(["type"])
types = ['DDOS','PS','RD','NORMAL']
# new
adict={'log':[],'label':[],'type':[],'time':[]}
def add_row(arow,alabel,atype,atime):
adict['log'].append(arow)
adict['label'].append(alabel)
adict['type'].append(atype)
adict['time'].append(atime)
for t in types:
print(f'TYPE: {t} \n')
# anomaly group
if t != 'DDOS':
if t == 'PS':
ag = grouped.get_group(t).sample(frac=0.1, random_state=666)
else:
ag = grouped.get_group(t)
else:
ag = grouped.get_group(t).sample(frac=0.01, random_state=666)
# preproces and get all logs
ag_logs = PreprocessFirewall(ag['log'].tolist())
ag_time = ag['time'].tolist()
# gel label
ag_label = ag['label'].tolist()[0]
#
arow = ""
# expecting to create max or less 512 token length log and its label
count=0
for i in tqdm(range(len(ag_logs))):
alog = ag_logs[i]
atime = ag_time[i]
if t != 'RD':
input_ids = tokenizer.encode(alog, add_special_tokens=False)
count += len(input_ids)
if count <= 512:
arow += alog +" "+tokenizer.sep_token+" "
if i == len(ag_logs)-1:
add_row(arow[:-1],ag_label,t,atime)
count = 0
else:
add_row(arow,ag_label,t,atime)
# re-init
count = 0
arow = alog +" "+tokenizer.sep_token+" "
input_ids = tokenizer.encode(alog, add_special_tokens=False)
count += len(input_ids)
else:
arow = alog +" "+tokenizer.sep_token
add_row(arow,ag_label,t,atime)
adf =
|
pd.DataFrame(adict)
|
pandas.DataFrame
|
from ..feature_types import primary_feature, log
from ..raw.gps import gps
from sklearn.cluster import KMeans
import pandas as pd
import numpy as np
import math
from sklearn.cluster import DBSCAN
import LAMP
@primary_feature(
name='cortex.significant_locations',
dependencies=[gps],
attach=False
)
def significant_locations(k_max=10, eps=1e-5, **kwargs):
"""
Get the coordinates of significant locations visited by the participant in the
specified timeframe using the KMeans clustering method.
NOTE: Via DBSCan, this algorithm first reduces the amount of gps readings used to generate significant locations. If there is a large amount of new gps data to reduce, this step can take a long time
NOTE: DBScan uses O(n*k) memory. If you run it on a large GPS dataframe (>100k points), a memory crash could occur
NOTE: This algorithm does NOT return the centroid radius and thus cannot be used
to coalesce multiple SigLocs into one.
:param k_max (int): The maximum KMeans clusters to test (FIXME).
:return latitude (float): The latitude of the SigLoc centroid.
:return longitude (float): The longitude of the SigLoc centroid.
:return radius (float): The radius of the SigLoc centroid (in meters).
:return proportion (float): The proportion of GPS events located within this
centeroid compared to all GPS events over the entire time window.
:return duration (int): The duration of time spent by the participant in the centroid.
"""
# Calculates straight-line (not great-circle) distance between two GPS points on
# Earth in kilometers; equivalent to roughly ~55% - 75% of the Haversian (great-circle)
# distance. 110.25 is conversion metric marking the length of a spherical degree.
#
# https://jonisalonen.com/2014/computing-distance-between-coordinates-can-be-simple-and-fast/
def euclid(g0, g1):
def _euclid(lat, lng, lat0, lng0): # degrees -> km
return 110.25 * ((((lat - lat0) ** 2) + (((lng - lng0) * np.cos(lat0)) ** 2)) ** 0.5)
return _euclid(g0[0], g0[1], g1[0], g1[1])
#Get DB scan metadata fir
try:
reduced_data = LAMP.Type.get_attachment(kwargs['id'], 'cortex.significant_locations.reduced')['data']#['data']
except:
reduced_data = {'end':0, 'data':[]}
reduced_data_end = reduced_data['end']
new_reduced_data = reduced_data['data'].copy()
if reduced_data_end < kwargs['end']: #update reduced data by getting new gps data and running dbscan
### DBSCAN ###
_gps = gps(**{**kwargs, 'start':reduced_data_end})['data']
df =
|
pd.DataFrame.from_dict(_gps)
|
pandas.DataFrame.from_dict
|
from os.path import abspath, dirname, join
from unittest import TestCase
import numpy as np
import pandas as pd
from nose_parameterized import parameterized
from pandas import Timedelta, read_csv
from pandas.util.testing import assert_index_equal
from pytz import UTC
from trading_calendars import all_trading_minutes, get_calendar
from trading_calendars.exchange_calendar_xshg import XSHGExchangeCalendar
def T(x):
return pd.Timestamp(x, tz=UTC)
class XSHGCalendarTestCase(TestCase):
# Override in subclasses.
answer_key_filename = 'xshg'
calendar_class = XSHGExchangeCalendar
# Affects tests that care about the empty periods between sessions. Should
# be set to False for 24/7 calendars.
GAPS_BETWEEN_SESSIONS = True
# Affects tests that care about early closes. Should be set to False for
# calendars that don't have any early closes.
HAVE_EARLY_CLOSES = True
# Affects tests that care about late opens. Since most do not, defaulting
# to False.
HAVE_LATE_OPENS = False
# Affects test_sanity_check_session_lengths. Should be set to the largest
# number of hours that ever appear in a single session.
MAX_SESSION_HOURS = 0
# Affects test_minute_index_to_session_labels.
# Change these if the start/end dates of your test suite don't contain the
# defaults.
MINUTE_INDEX_TO_SESSION_LABELS_START = pd.Timestamp('2011-01-04', tz=UTC)
MINUTE_INDEX_TO_SESSION_LABELS_END = pd.Timestamp('2011-04-04', tz=UTC)
# Affects tests around daylight savings. If possible, should contain two
# dates that are not both in the same daylight savings regime.
DAYLIGHT_SAVINGS_DATES = ["2004-04-05", "2004-11-01"]
# Affects test_start_end. Change these if your calendar start/end
# dates between 2010-01-03 and 2010-01-10 don't match the defaults.
TEST_START_END_FIRST = pd.Timestamp('2010-01-03', tz=UTC)
TEST_START_END_LAST = pd.Timestamp('2010-01-10', tz=UTC)
TEST_START_END_EXPECTED_FIRST = pd.Timestamp('2010-01-04', tz=UTC)
TEST_START_END_EXPECTED_LAST = pd.Timestamp('2010-01-08', tz=UTC)
MAX_SESSION_HOURS = 5.5
HALF_SESSION_HOURS = 2.0
HAVE_EARLY_CLOSES = False
MINUTE_INDEX_TO_SESSION_LABELS_END = pd.Timestamp('2011-04-07', tz=UTC)
@staticmethod
def load_answer_key(filename):
"""
Load a CSV from tests/resources/{filename}.csv
"""
fullpath = join(
dirname(abspath(__file__)),
'./resources',
filename + '.csv',
)
return read_csv(
fullpath,
index_col=0,
# NOTE: Merely passing parse_dates=True doesn't cause pandas to set
# the dtype correctly, and passing all reasonable inputs to the
# dtype kwarg cause read_csv to barf.
parse_dates=[0, 1, 2],
date_parser=lambda x: pd.Timestamp(x, tz=UTC)
)
def setUp(self):
self.answers = self.load_answer_key(self.answer_key_filename)
self.start_date = self.answers.index[0]
self.end_date = self.answers.index[-1]
self.calendar = self.calendar_class(self.start_date, self.end_date)
self.one_minute = pd.Timedelta(minutes=1)
self.one_hour = pd.Timedelta(hours=1)
def tearDown(self):
self.calendar = None
self.answers = None
def test_sanity_check_session_lengths(self):
# make sure that no session is longer than self.MAX_SESSION_HOURS hours
for session in self.calendar.all_sessions:
o, c = self.calendar.open_and_close_for_session(session)
delta = c - o
self.assertLessEqual(delta.seconds / 3600, self.MAX_SESSION_HOURS)
def test_sanity_check_am_session_lengths(self):
# make sure that no session is longer than self.HALF_SESSION_HOURS hours
for session in self.calendar.all_sessions:
o, c = self.calendar.am_open_and_close_for_session(session)
delta = c - o
self.assertLessEqual(delta.seconds / 3600, self.HALF_SESSION_HOURS)
def test_sanity_check_pm_session_lengths(self):
# make sure that no session is longer than self.HALF_SESSION_HOURS hours
for session in self.calendar.all_sessions:
o, c = self.calendar.pm_open_and_close_for_session(session)
delta = c - o
self.assertLessEqual(delta.seconds / 3600, self.HALF_SESSION_HOURS)
def test_calculated_against_csv(self):
assert_index_equal(self.calendar.schedule.index, self.answers.index)
def test_is_open_on_minute(self):
one_minute = pd.Timedelta(minutes=1)
for market_minute in self.answers.market_open:
market_minute_utc = market_minute
# The exchange should be classified as open on its first minute
self.assertTrue(self.calendar.is_open_on_minute(market_minute_utc))
if self.GAPS_BETWEEN_SESSIONS:
# Decrement minute by one, to minute where the market was not
# open
pre_market = market_minute_utc - one_minute
self.assertFalse(self.calendar.is_open_on_minute(pre_market))
for market_minute in self.answers.market_close:
close_minute_utc = market_minute
# should be open on its last minute
self.assertTrue(self.calendar.is_open_on_minute(close_minute_utc))
if self.GAPS_BETWEEN_SESSIONS:
# increment minute by one minute, should be closed
post_market = close_minute_utc + one_minute
self.assertFalse(self.calendar.is_open_on_minute(post_market))
def _verify_minute(self, calendar, minute,
next_open_answer, prev_open_answer,
next_close_answer, prev_close_answer):
self.assertEqual(
calendar.next_open(minute),
next_open_answer
)
self.assertEqual(
self.calendar.previous_open(minute),
prev_open_answer
)
self.assertEqual(
self.calendar.next_close(minute),
next_close_answer
)
self.assertEqual(
self.calendar.previous_close(minute),
prev_close_answer
)
def test_next_prev_open_close(self):
# for each session, check:
# - the minute before the open (if gaps exist between sessions)
# - the first minute of the session
# - the second minute of the session
# - the minute before the close
# - the last minute of the session
# - the first minute after the close (if gaps exist between sessions)
opens = self.answers.market_open.iloc[1:-2]
closes = self.answers.market_close.iloc[1:-2]
previous_opens = self.answers.market_open.iloc[:-1]
previous_closes = self.answers.market_close.iloc[:-1]
next_opens = self.answers.market_open.iloc[2:]
next_closes = self.answers.market_close.iloc[2:]
for (open_minute, close_minute,
previous_open, previous_close,
next_open, next_close) in zip(opens, closes,
previous_opens, previous_closes,
next_opens, next_closes):
minute_before_open = open_minute - self.one_minute
# minute before open
if self.GAPS_BETWEEN_SESSIONS:
self._verify_minute(
self.calendar, minute_before_open, open_minute,
previous_open, close_minute, previous_close
)
# open minute
self._verify_minute(
self.calendar, open_minute, next_open, previous_open,
close_minute, previous_close
)
# second minute of session
self._verify_minute(
self.calendar, open_minute + self.one_minute, next_open,
open_minute, close_minute, previous_close
)
# minute before the close
self._verify_minute(
self.calendar, close_minute - self.one_minute, next_open,
open_minute, close_minute, previous_close
)
# the close
self._verify_minute(
self.calendar, close_minute, next_open, open_minute,
next_close, previous_close
)
# minute after the close
if self.GAPS_BETWEEN_SESSIONS:
self._verify_minute(
self.calendar, close_minute + self.one_minute, next_open,
open_minute, next_close, close_minute
)
def test_next_prev_minute(self):
all_minutes = self.calendar.all_minutes
# test 20,000 minutes because it takes too long to do the rest.
for idx, minute in enumerate(all_minutes[1:20000]):
self.assertEqual(
all_minutes[idx + 2],
self.calendar.next_minute(minute)
)
self.assertEqual(
all_minutes[idx],
self.calendar.previous_minute(minute)
)
# test a couple of non-market minutes
if self.GAPS_BETWEEN_SESSIONS:
for open_minute in self.answers.market_open[1:]:
hour_before_open = open_minute - self.one_hour
self.assertEqual(
open_minute,
self.calendar.next_minute(hour_before_open)
)
for close_minute in self.answers.market_close[1:]:
hour_after_close = close_minute + self.one_hour
self.assertEqual(
close_minute,
self.calendar.previous_minute(hour_after_close)
)
def test_minute_to_session_label(self):
for idx, info in enumerate(self.answers[1:-2].iterrows()):
session_label = info[1].name
open_minute = info[1].iloc[0]
close_minute = info[1].iloc[1]
hour_into_session = open_minute + self.one_hour
minute_before_session = open_minute - self.one_minute
minute_after_session = close_minute + self.one_minute
next_session_label = self.answers.iloc[idx + 2].name
previous_session_label = self.answers.iloc[idx].name
# verify that minutes inside a session resolve correctly
minutes_that_resolve_to_this_session = [
self.calendar.minute_to_session_label(open_minute),
self.calendar.minute_to_session_label(open_minute,
direction="next"),
self.calendar.minute_to_session_label(open_minute,
direction="previous"),
self.calendar.minute_to_session_label(open_minute,
direction="none"),
self.calendar.minute_to_session_label(hour_into_session),
self.calendar.minute_to_session_label(hour_into_session,
direction="next"),
self.calendar.minute_to_session_label(hour_into_session,
direction="previous"),
self.calendar.minute_to_session_label(hour_into_session,
direction="none"),
self.calendar.minute_to_session_label(close_minute),
self.calendar.minute_to_session_label(close_minute,
direction="next"),
self.calendar.minute_to_session_label(close_minute,
direction="previous"),
self.calendar.minute_to_session_label(close_minute,
direction="none"),
session_label
]
if self.GAPS_BETWEEN_SESSIONS:
minutes_that_resolve_to_this_session.append(
self.calendar.minute_to_session_label(
minute_before_session
)
)
minutes_that_resolve_to_this_session.append(
self.calendar.minute_to_session_label(
minute_before_session,
direction="next"
)
)
minutes_that_resolve_to_this_session.append(
self.calendar.minute_to_session_label(
minute_after_session,
direction="previous"
)
)
self.assertTrue(all(x == minutes_that_resolve_to_this_session[0]
for x in minutes_that_resolve_to_this_session))
minutes_that_resolve_to_next_session = [
self.calendar.minute_to_session_label(minute_after_session),
self.calendar.minute_to_session_label(minute_after_session,
direction="next"),
next_session_label
]
self.assertTrue(all(x == minutes_that_resolve_to_next_session[0]
for x in minutes_that_resolve_to_next_session))
self.assertEqual(
self.calendar.minute_to_session_label(minute_before_session,
direction="previous"),
previous_session_label
)
if self.GAPS_BETWEEN_SESSIONS:
# Make sure we use the cache correctly
minutes_that_resolve_to_different_sessions = [
self.calendar.minute_to_session_label(minute_after_session,
direction="next"),
self.calendar.minute_to_session_label(
minute_after_session,
direction="previous"
),
self.calendar.minute_to_session_label(minute_after_session,
direction="next"),
]
self.assertEqual(
minutes_that_resolve_to_different_sessions,
[next_session_label,
session_label,
next_session_label]
)
# make sure that exceptions are raised at the right time
with self.assertRaises(ValueError):
self.calendar.minute_to_session_label(open_minute, "asdf")
if self.GAPS_BETWEEN_SESSIONS:
with self.assertRaises(ValueError):
self.calendar.minute_to_session_label(
minute_before_session,
direction="none"
)
@parameterized.expand([
(1, 0),
(2, 0),
(2, 1),
])
def test_minute_index_to_session_labels(self, interval, offset):
minutes = self.calendar.minutes_for_sessions_in_range(
self.MINUTE_INDEX_TO_SESSION_LABELS_START,
self.MINUTE_INDEX_TO_SESSION_LABELS_END,
)
minutes = minutes[range(offset, len(minutes), interval)]
np.testing.assert_array_equal(
pd.DatetimeIndex(
minutes.map(self.calendar.minute_to_session_label)
),
self.calendar.minute_index_to_session_labels(minutes),
)
def test_next_prev_session(self):
session_labels = self.answers.index[1:-2]
max_idx = len(session_labels) - 1
# the very first session
first_session_label = self.answers.index[0]
with self.assertRaises(ValueError):
self.calendar.previous_session_label(first_session_label)
# all the sessions in the middle
for idx, session_label in enumerate(session_labels):
if idx < max_idx:
self.assertEqual(
self.calendar.next_session_label(session_label),
session_labels[idx + 1]
)
if idx > 0:
self.assertEqual(
self.calendar.previous_session_label(session_label),
session_labels[idx - 1]
)
# the very last session
last_session_label = self.answers.index[-1]
with self.assertRaises(ValueError):
self.calendar.next_session_label(last_session_label)
@staticmethod
def _find_full_session(calendar):
for session_label in calendar.schedule.index:
if session_label not in calendar.early_closes:
return session_label
return None
def test_minutes_for_period(self):
# full session
# find a session that isn't an early close. start from the first
# session, should be quick.
full_session_label = self._find_full_session(self.calendar)
if full_session_label is None:
raise ValueError("Cannot find a full session to test!")
minutes = self.calendar.minutes_for_session(full_session_label)
am_open, am_close = self.calendar.am_open_and_close_for_session(
full_session_label
)
pm_open, pm_close = self.calendar.pm_open_and_close_for_session(
full_session_label
)
np.testing.assert_array_equal(
minutes,
all_trading_minutes(am_open, pm_close)
)
# early close period
if self.HAVE_EARLY_CLOSES:
early_close_session_label = self.calendar.early_closes[0]
minutes_for_early_close = \
self.calendar.minutes_for_session(early_close_session_label)
_open, _close = self.calendar.open_and_close_for_session(
early_close_session_label
)
np.testing.assert_array_equal(
minutes_for_early_close,
pd.date_range(start=_open, end=_close, freq="min")
)
# late open period
if self.HAVE_LATE_OPENS:
late_open_session_label = self.calendar.late_opens[0]
minutes_for_late_open = \
self.calendar.minutes_for_session(late_open_session_label)
_open, _close = self.calendar.open_and_close_for_session(
late_open_session_label
)
np.testing.assert_array_equal(
minutes_for_late_open,
|
pd.date_range(start=_open, end=_close, freq="min")
|
pandas.date_range
|
from random import uniform
import numpy as np
from global_land_mask import globe
from pandas import DataFrame
import pandas as pd
# If you are using Sentinel-1 and Sentinel-2 data please use the default values
def new_point(latitude_from=-56, latitude_to=84, longitude_from=-180, longitude_to=180):
'''
It generates randomly a geo point
'''
latitude = uniform(latitude_from, latitude_to)
longitude = uniform(longitude_from, longitude_to)
return latitude, longitude
def get_land_coordinates(number = 500, gui=False):
'''
It generates a list of points distributed on the Earth land
'''
i = 0
points = np.zeros((3,number))
while i<number:
lat, lon = new_point()
# If the generated point is on land, it will be added to the list
# otherwise a new point will be generated
if globe.is_land(lat, lon):
points[0, i] = lat
points[1, i] = lon
i = i + 1
print(' # Points generated')
return points
def save_points(points, path = './points.csv'):
'''
It saves the generated points into a csv file
'''
lat = points[0,:]
lon = points[1,:]
state = points[2,:]
points_to_save = {'Latitude':lat, 'Longitude':lon, 'State':state}
globe_points = DataFrame(data = points_to_save)
globe_points.to_csv(path)
print(' # Points saved')
def load_points(path = './points.csv'):
'''
It loads the preaviously generated points from a csv file
'''
data_frame =
|
pd.read_csv(path, index_col=0)
|
pandas.read_csv
|
#
# Copyright (c) 2020 IBM Corp.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# aggregate.py
#
# Gremlin traversal steps that perform aggregation
from abc import ABC
from typing import *
import pandas as pd
from text_extensions_for_pandas.gremlin.traversal.base import GraphTraversal,\
UnaryTraversal
from text_extensions_for_pandas.gremlin.traversal.format import ValuesTraversal
from text_extensions_for_pandas.gremlin.traversal.underscore import __
class AggregateTraversal(UnaryTraversal, ABC):
"""
Base class for all traversals that implement aggregation and return a single
scalar value.
"""
def pandas_agg_name(self) -> str:
"""
:return: The name of the equivalent Pandas aggregation type
"""
raise NotImplementedError("Subclasses must implement this method")
class SumTraversal(AggregateTraversal):
"""
Gremlin `sum()` step.
If applied to a span-valued input, combines the spans into a minimal span
that covers all inputs.
"""
def __init__(self, parent):
UnaryTraversal.__init__(self, parent)
def compute_impl(self) -> None:
tail_type = self.parent.step_types[-1]
if tail_type != "p":
raise NotImplementedError(f"Sum of step type '{tail_type}' not "
f"implemented")
input_series = self.parent.last_step()
self._set_attrs(
paths=pd.DataFrame({0: [input_series.sum()]}),
step_types=["p"],
aliases={}
)
def pandas_agg_name(self) -> str:
return "sum"
####################################################
# syntactic sugar to allow aggregates without __
def sum_():
return __.sum()
# end syntactic sugar
####################################################
class GroupTraversal(UnaryTraversal):
"""
Gremlin `group` step, usually modulated by one or more 'by` modulators.
"""
def __init__(self, parent):
UnaryTraversal.__init__(self, parent)
self._key = None # Type: str
self._value = None # Type: AggregateTraversal
def by(self, key_or_value: Union[str, AggregateTraversal]) -> \
"GroupTraversal":
"""
`by` modulator to `group()`, for adding a key or value to the group
step.
Modifies this object in place.
:param key_or_value: If a string, name of the field to group by; if
an `AggregateTraversal`, subquery to run for each group.
:returns: A pointer to this object (after modification) to enable
operation chaining.
"""
if isinstance(key_or_value, str):
if self._key is not None:
raise ValueError("Tried to set key of group() step twice")
self._key = key_or_value
elif isinstance(key_or_value, AggregateTraversal):
if self._value is not None:
raise ValueError("Tried to set value of group() step twice")
self._value = key_or_value
else:
raise ValueError(f"Object '{str(key_or_value)}' passed to "
f"group().by() is of type '{type(key_or_value)}',"
f"which is not a supported type.")
return self
def compute_impl(self) -> None:
tail_type = self.parent.step_types[-1]
if tail_type != "v":
raise NotImplementedError(f"Grouping on a path that ends in step "
f"type '{tail_type}' not implemented")
if (self._key is not None
and self._value is not None
# TODO: Parent must be values
and isinstance(self._value.parent, ValuesTraversal)
and self._value.parent.parent == __):
# Fast path for the common case group().by(key).by(values.().agg())
# Runs a direct Pandas aggregate over the selected vertices from the
# last step, then translates the results into the format that
# a Gremlin group step is supposed to use (key->value pairs)
values_field = self._value.parent.field_name
flat_aggs = (
self.parent.last_vertices()
.groupby([self._key])
.aggregate({self._key: "first",
values_field: self._value.pandas_agg_name()})
)
# flat_aggs is a 2-column dataframe of key and value. Convert to
# a record.
result_record = {
r[0]: r[1] for r in flat_aggs.itertuples(index=False)
}
self._set_attrs(
paths=
|
pd.DataFrame({0: [result_record]})
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 22 10:04:24 2019
@author: nsde
"""
#%%
import pandas as pd
import numpy as np
from ..base import Dataset
#%%
class wine_quality(Dataset):
def _create_dataframe(self):
for f in self.files:
if 'winequality-red.csv' in f:
df_red =
|
pd.read_csv(f, sep=';')
|
pandas.read_csv
|
import pytest
import pandas as pd
import numpy as np
import pyarrow as pa
from ray.air.constants import TENSOR_COLUMN_NAME
from ray.air.util.data_batch_conversion import convert_batch_type_to_pandas
from ray.air.util.data_batch_conversion import convert_pandas_to_batch_type
from ray.air.util.data_batch_conversion import DataType
from ray.air.util.tensor_extensions.pandas import TensorArray
from ray.air.util.tensor_extensions.arrow import ArrowTensorArray
def test_pandas_pandas():
input_data = pd.DataFrame({"x": [1, 2, 3]})
expected_output = input_data
actual_output = convert_batch_type_to_pandas(input_data)
assert expected_output.equals(actual_output)
assert convert_pandas_to_batch_type(actual_output, type=DataType.PANDAS).equals(
input_data
)
def test_numpy_pandas():
input_data = np.array([1, 2, 3])
expected_output = pd.DataFrame({TENSOR_COLUMN_NAME: TensorArray([1, 2, 3])})
actual_output = convert_batch_type_to_pandas(input_data)
assert expected_output.equals(actual_output)
assert np.array_equal(
convert_pandas_to_batch_type(actual_output, type=DataType.NUMPY), input_data
)
def test_numpy_multi_dim_pandas():
input_data = np.arange(12).reshape((3, 2, 2))
expected_output = pd.DataFrame({TENSOR_COLUMN_NAME: TensorArray(input_data)})
actual_output = convert_batch_type_to_pandas(input_data)
assert expected_output.equals(actual_output)
assert np.array_equal(
convert_pandas_to_batch_type(actual_output, type=DataType.NUMPY), input_data
)
def test_numpy_object_pandas():
input_data = np.array([[1, 2, 3], [1]], dtype=object)
expected_output = pd.DataFrame({TENSOR_COLUMN_NAME: TensorArray(input_data)})
actual_output = convert_batch_type_to_pandas(input_data)
assert expected_output.equals(actual_output)
assert np.array_equal(
convert_pandas_to_batch_type(actual_output, type=DataType.NUMPY), input_data
)
def test_dict_fail():
input_data = {"x": "y"}
with pytest.raises(ValueError):
convert_batch_type_to_pandas(input_data)
def test_dict_pandas():
input_data = {"x": np.array([1, 2, 3])}
expected_output = pd.DataFrame({"x": TensorArray(input_data["x"])})
actual_output = convert_batch_type_to_pandas(input_data)
assert expected_output.equals(actual_output)
output_array = convert_pandas_to_batch_type(actual_output, type=DataType.NUMPY)
assert np.array_equal(output_array, input_data["x"])
def test_dict_multi_dim_to_pandas():
tensor = np.arange(12).reshape((3, 2, 2))
input_data = {"x": tensor}
expected_output = pd.DataFrame({"x": TensorArray(tensor)})
actual_output = convert_batch_type_to_pandas(input_data)
assert expected_output.equals(actual_output)
output_array = convert_pandas_to_batch_type(actual_output, type=DataType.NUMPY)
assert np.array_equal(output_array, input_data["x"])
def test_dict_pandas_multi_column():
array_dict = {"x": np.array([1, 2, 3]), "y": np.array([4, 5, 6])}
expected_output = pd.DataFrame({k: TensorArray(v) for k, v in array_dict.items()})
actual_output = convert_batch_type_to_pandas(array_dict)
assert expected_output.equals(actual_output)
output_dict = convert_pandas_to_batch_type(actual_output, type=DataType.NUMPY)
for k, v in output_dict.items():
assert np.array_equal(v, array_dict[k])
def test_arrow_pandas():
df =
|
pd.DataFrame({"x": [1, 2, 3]})
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import lightgbm as lgb
import time
train_1 = pd.read_csv("dataset/validation_2/train_complete.csv")
train_2 = pd.read_csv("dataset/validation_3/train_complete.csv")
val = pd.read_csv("dataset/validation/train_complete.csv")
evaluation = False
if evaluation:
train = pd.concat([train_1, train_2])
eval_group = val.groupby('queried_record_id').size().values
else:
train =
|
pd.concat([train_1, train_2, val])
|
pandas.concat
|
"""Tests for the sdv.constraints.tabular module."""
import numpy as np
import pandas as pd
import pytest
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import (
ColumnFormula, CustomConstraint, GreaterThan, UniqueCombinations)
def dummy_transform():
pass
def dummy_reverse_transform():
pass
def dummy_is_valid():
pass
class TestCustomConstraint():
def test___init__(self):
"""Test the ``CustomConstraint.__init__`` method.
The ``transform``, ``reverse_transform`` and ``is_valid`` methods
should be replaced by the given ones, importing them if necessary.
Setup:
- Create dummy functions (created above this class).
Input:
- dummy transform and revert_transform + is_valid FQN
Output:
- Instance with all the methods replaced by the dummy versions.
"""
is_valid_fqn = __name__ + '.dummy_is_valid'
# Run
instance = CustomConstraint(
transform=dummy_transform,
reverse_transform=dummy_reverse_transform,
is_valid=is_valid_fqn
)
# Assert
assert instance.transform == dummy_transform
assert instance.reverse_transform == dummy_reverse_transform
assert instance.is_valid == dummy_is_valid
class TestUniqueCombinations():
def test___init__(self):
"""Test the ``UniqueCombinations.__init__`` method.
It is expected to create a new Constraint instance and receiving the names of
the columns that need to produce unique combinations.
Side effects:
- instance._colums == columns
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns)
# Assert
assert instance._columns == columns
def test__valid_separator_valid(self):
"""Test ``_valid_separator`` for a valid separator.
If the separator and data are valid, result is ``True``.
Input:
- Table data (pandas.DataFrame)
Output:
- True (bool).
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance._separator = '#'
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
is_valid = instance._valid_separator(table_data, instance._separator, columns)
# Assert
assert is_valid
def test__valid_separator_non_valid_separator_contained(self):
"""Test ``_valid_separator`` passing a column that contains the separator.
If any of the columns contains the separator string, result is ``False``.
Input:
- Table data (pandas.DataFrame) with a column that contains the separator string ('#')
Output:
- False (bool).
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance._separator = '#'
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', '#', 'f'],
'c': ['g', 'h', 'i']
})
is_valid = instance._valid_separator(table_data, instance._separator, columns)
# Assert
assert not is_valid
def test__valid_separator_non_valid_name_joined_exists(self):
"""Test ``_valid_separator`` passing a column whose name is obtained after joining
the column names using the separator.
If the column name obtained after joining the column names using the separator
already exists, result is ``False``.
Input:
- Table data (pandas.DataFrame) with a column name that will be obtained by joining
the column names and the separator.
Output:
- False (bool).
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance._separator = '#'
# Run
table_data = pd.DataFrame({
'b#c': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
is_valid = instance._valid_separator(table_data, instance._separator, columns)
# Assert
assert not is_valid
def test_fit(self):
"""Test the ``UniqueCombinations.fit`` method.
The ``UniqueCombinations.fit`` method is expected to:
- Call ``UniqueCombinations._valid_separator``.
- Find a valid separator for the data and generate the joint column name.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
instance.fit(table_data)
# Asserts
expected_combinations = pd.DataFrame({
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
assert instance._separator == '#'
assert instance._joint_column == 'b#c'
pd.testing.assert_frame_equal(instance._combinations, expected_combinations)
def test_is_valid_true(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_false(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['D', 'E', 'F'],
'c': ['g', 'h', 'i']
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``UniqueCombinations.transform`` method.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns concatenated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b#c': ['d#g', 'e#h', 'f#i']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_transform_not_all_columns_provided(self):
"""Test the ``UniqueCombinations.transform`` method.
If some of the columns needed for the transform are missing, and
``fit_columns_model`` is False, it will raise a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns, fit_columns_model=False)
instance.fit(table_data)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def reverse_transform(self):
"""Test the ``UniqueCombinations.reverse_transform`` method.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
transformed_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b#c': ['d#g', 'e#h', 'f#i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(transformed_data)
# Run
out = instance.reverse_transform(transformed_data)
# Assert
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
pd.testing.assert_frame_equal(expected_out, out)
class TestGreaterThan():
def test___init___strict_false(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._strict == False
"""
# Run
instance = GreaterThan(low='a', high='b')
# Asserts
assert instance._low == 'a'
assert instance._high == 'b'
assert instance._strict is False
def test___init___strict_true(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
- strict = True
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._stric == True
"""
# Run
instance = GreaterThan(low='a', high='b', strict=True)
# Asserts
assert instance._low == 'a'
assert instance._high == 'b'
assert instance._strict is True
def test_fit_int(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns with the high one
being made of integers.
Side Effect:
- The _dtype attribute gets `int` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'i'
def test_fit_float(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns with the high one
being made of float values.
Side Effect:
- The _dtype attribute gets `float` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'f'
def test_fit_datetime(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns of datetimes.
Side Effect:
- The _dtype attribute gets `datetime` as the value.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01']),
'b': pd.to_datetime(['2020-01-02'])
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'M'
def test_is_valid_strict_false(self):
"""Test the ``GreaterThan.is_valid`` method with strict False.
If strict is False, equal values should count as valid
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- False should be returned for the strictly invalid row and True
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_strict_true(self):
"""Test the ``GreaterThan.is_valid`` method with strict True.
If strict is True, equal values should count as invalid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- True should be returned for the strictly valid row and False
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=False)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_transform_int_drop_none(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type int.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'#a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_int_drop_high(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type int.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the high column.
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the high column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'#a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_int_drop_low(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type int.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the low column.
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the low column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'#a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_float_drop_none(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type float.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
'#a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_datetime_drop_none(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type datetime.
If the columns are of type datetime, ``transform`` is expected
to convert the timedelta distance into numeric before applying
the +1 and logarithm.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with values at a distance of exactly 1 second.
Output:
- Same table with a diff column of the logarithms
of the dinstance in nanoseconds + 1, which is np.log(1_000_000_001).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'#a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_not_all_columns_provided(self):
"""Test the ``GreaterThan.transform`` method.
If some of the columns needed for the transform are missing, it will raise
a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, fit_columns_model=False)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(
|
pd.DataFrame({'a': ['a', 'b', 'c']})
|
pandas.DataFrame
|
import time
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.ticker import (MultipleLocator, AutoMinorLocator)
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler,StandardScaler
from sklearn.metrics import accuracy_score,top_k_accuracy_score, mean_squared_error, mean_absolute_error
from multiprocessing import Pool
from functools import partial
from sklearn.multioutput import MultiOutputRegressor
from sklearn.neural_network import MLPRegressor
def prediction_plot(real_val,predic_val,ax_lim_low,ax_lim_high,majr_tick,mnr_tick,ax_label):
#Plot estiamted vs real values
df=
|
pd.DataFrame({'actual':real_val, 'predicted':predic_val})
|
pandas.DataFrame
|
"""Test numpy engine."""
import hypothesis.strategies as st
import pandas as pd
import pytest
from hypothesis import given
from pandera.engines import pandas_engine
from pandera.errors import ParserError
@pytest.mark.parametrize(
"data_type", list(pandas_engine.Engine.get_registered_dtypes())
)
def test_pandas_data_type(data_type):
"""Test numpy engine DataType base class."""
if data_type.type is None:
# don't test data types that require parameters e.g. Category
return
pandas_engine.Engine.dtype(data_type)
pandas_engine.Engine.dtype(data_type.type)
pandas_engine.Engine.dtype(str(data_type.type))
with pytest.warns(UserWarning):
pd_dtype = pandas_engine.DataType(data_type.type)
with pytest.warns(UserWarning):
pd_dtype_from_str = pandas_engine.DataType(str(data_type.type))
assert pd_dtype == pd_dtype_from_str
assert not pd_dtype.check("foo")
@pytest.mark.parametrize(
"data_type", list(pandas_engine.Engine.get_registered_dtypes())
)
def test_pandas_data_type_coerce(data_type):
"""
Test that pandas data type coercion will raise a ParserError. on failure.
"""
if data_type.type is None:
# don't test data types that require parameters e.g. Category
return
try:
data_type().try_coerce(
|
pd.Series(["1", "2", "a"])
|
pandas.Series
|
import re
import os
import time
import logging
from tqdm import tqdm
import pandas as pd
import numpy as np
from glob import glob
import torch
from transformers import BertTokenizer
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import TensorDataset, DataLoader
from ignite.engine import Engine, Events
from ignite.metrics import RunningAverage
from ignite.handlers import ModelCheckpoint, EarlyStopping, global_step_from_engine
from ignite.contrib.handlers import ProgressBar
from model import NER_Model
from utils import cmed_ner_metric
logger = logging.getLogger(__name__)
# Setup logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)
import random
def set_seed(seed: int):
"""
Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch`` and/or ``tf``
(if installed).
Args:
seed (:obj:`int`): The seed to set.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
class cMedNER:
def __init__(self, dataset, max_split_len=120, max_seq_len=128,
model_name_or_path="bert-base-chinese",per_gpu_batch_size=8,
embed_size=300, no_cuda=False,
dense_layer_type="linear", dropout=0.5, embed_type="random",
vector_file="", bert_lr=1e-5, crf_lr=1e-3, patience=3,
output_dir="results/cmt_ner", n_saved=3, max_epochs=100):
set_seed(42)
self.train_path = os.path.join(dataset, "train.txt")
self.dev_path = os.path.join(dataset, "dev.txt")
self.test_path = os.path.join(dataset, "test.txt")
self.max_split_len = max_split_len
self.max_seq_len = max_seq_len
self.bert_tokenizer = BertTokenizer.from_pretrained(model_name_or_path)
self.en_list = ["bod", "dis", "sym", "pro", "dru", "ite", "mic", "equ", "dep"]
self.label_list = ['<pad>', '<start>', '<end>', "O"]
for en in self.en_list:
for pre in ["B-", "I-", "E-", "S-"]:
self.label_list.append(pre + en)
self.per_gpu_batch_size = per_gpu_batch_size
self.embed_size = embed_size
self.no_cuda = no_cuda
self.dense_layer_type = dense_layer_type
self.dropout = dropout
self.embed_type = embed_type
self.model_name_or_path = model_name_or_path
self.vector_file = vector_file
self.bert_lr = bert_lr
self.crf_lr = crf_lr
self.patience = patience
self.output_dir = output_dir
self.n_saved = n_saved
self.max_epochs = max_epochs
device = torch.device("cuda" if torch.cuda.is_available() and not self.no_cuda else "cpu")
self.n_gpu = max(torch.cuda.device_count() if not self.no_cuda else 1, 1)
self.device = device
if 'bert' not in self.embed_type:
model_name = "{}_{}_crf".format(self.embed_type, self.dense_layer_type)
else:
embed_type = os.path.split(self.model_name_or_path)[-1]
model_name = "{}_{}_crf".format(embed_type, self.dense_layer_type)
self.model_name = model_name
self.output_dir = "{}/{}".format(output_dir, model_name)
def evaluation(self, gold_file, start_time, train_time):
predix = os.path.split(gold_file)[-1].replace(".txt", "")
pre_file = os.path.join(self.output_dir, '{}_{}.txt'.format(predix, self.model_name))
score_file = os.path.join(self.output_dir, 'score_{}_{}.txt'.format(predix, self.model_name))
with open(score_file, 'w', encoding="utf-8") as w:
res = cmed_ner_metric(pre_file, gold_file, self.en_list)
w.write("overall_s:\t{}".format(res['overall_s']))
w.write("\n")
w.write("{}".format(res['detial_s']))
w.write("\n")
w.write("message:\n{}".format(res['message']))
w.write("\n")
w.write("train time cost:\t {:.2f} s".format(train_time))
w.write("\n")
w.write("time cost:\t {:.2f} s".format(time.time() - start_time - train_time))
w.write("\n")
w.write("args:\n{}".format('\n'.join(['%s:%s' % item for item in self.__dict__.items()])))
def export_results(self, unlabel_path):
X, cut_his, originalTexts = self.get_X(unlabel_path)
y_pred = self.predict(X)
entity_data = []
predix = os.path.split(unlabel_path)[-1].replace(".txt", "")
X_align, y_align = originalTexts, self.alignment_X_y(originalTexts, cut_his, y_pred)
for i, (text, y) in enumerate(tqdm(zip(X_align, y_align), desc="Decoding")):
entities = []
for k, label in enumerate(y):
if "-" in label:
tag_1 = label.split("-")[0]
tag_2 = label.split("-")[1]
## Single
if tag_1 == "S":
start_pos = k
end_pos = k + 1
entity = text[start_pos: end_pos]
en_line = "{} {} {}".format(start_pos, end_pos-1, tag_2)
entities.append(en_line)
entity_data.append((i + 1, entity, tag_2, start_pos, end_pos))
if tag_1 == "B":
start_pos = k
end_pos = k + 1
for j in range(start_pos + 1, len(y)):
if y[j] == "I-" + tag_2:
end_pos += 1
elif y[j] == 'E-' + tag_2:
end_pos += 1
break
else:
break
entity = text[start_pos: end_pos]
en_line = "{} {} {}".format(start_pos, end_pos - 1, tag_2)
entities.append(en_line)
entity_data.append((i + 1, entity, tag_2, start_pos, end_pos))
with open(os.path.join(self.output_dir, '{}_{}.txt'.format(predix, self.model_name)), 'a', encoding="utf-8") as f:
entity_text = "|||".join(entities)
s = "{}|||{}|||".format(text, entity_text)
f.write(s)
f.write("\n")
tempDF = pd.DataFrame(data=entity_data, columns=['text_id', 'entity', 'label_type', 'start_pos', 'end_pos'])
tempDF.to_csv(os.path.join(self.output_dir, "tmp_entities_{}_{}.csv".format(predix, self.model_name)), index=False)
def alignment_X_y(self, originalTexts, cut_his, y_pred):
y_align = []
for i, X in enumerate(originalTexts):
cut_index = cut_his[i]
if isinstance(cut_index, int):
y_ = y_pred[cut_index]
else:
y_ =[]
for index in cut_index:
y_.extend(y_pred[index])
assert len(X) == len(y_), 'i:{};text_len:{};while label_len:{}'.format(i, len(X), len(y_))
y_align.append(y_)
assert len(originalTexts) == len(y_align)
return y_align
def train(self):
## train data
train_X, train_y, _ = self.get_X_y(self.train_path)
train_input_ids, train_input_mask_ids, train_label_ids, train_label_mask_ids = self.get_X_y_ids(train_X, train_y)
## dev data
dev_X, dev_y, _ = self.get_X_y(self.dev_path)
dev_input_ids, dev_input_mask_ids, dev_label_ids, dev_label_mask_ids = self.get_X_y_ids(dev_X, dev_y)
train_ds = TensorDataset(train_input_ids, train_input_mask_ids, train_label_ids, train_label_mask_ids)
dev_ds = TensorDataset(dev_input_ids, dev_input_mask_ids, dev_label_ids, dev_label_mask_ids)
batch_size = self.n_gpu * self.per_gpu_batch_size
train_iter = DataLoader(train_ds, batch_size=batch_size, shuffle=True, drop_last=True)
dev_iter = DataLoader(dev_ds, batch_size=batch_size, shuffle=True, drop_last=True)
model = NER_Model(vocab_size=self.bert_tokenizer.vocab_size, embed_size=self.embed_size,
num_tags=len(self.label_list), max_len=self.max_seq_len, device=self.device,
dense_layer_type=self.dense_layer_type, dropout=self.dropout, embed_type=self.embed_type,
model_name_or_path=self.model_name_or_path, vector_file=self.vector_file)
model.to(self.device)
if self.n_gpu > 1:
model = torch.nn.DataParallel(model)
logger.info("model.named_parameters()")
for n, p in model.named_parameters():
logger.info(n)
parameters = [{
"params": [p for n, p in model.named_parameters() if "bert" in n],
"lr": self.bert_lr
}, {
"params": [p for n, p in model.named_parameters() if "bert" not in n],
"lr": self.crf_lr
}]
optimizer = torch.optim.AdamW(parameters, lr=self.crf_lr)
tb_writer = SummaryWriter()
def train_fn(engine, batch):
model.train()
optimizer.zero_grad()
batch = tuple(t.to(self.device) for t in batch)
labels = batch[2]
inputs = {
"token_ids": batch[0],
"input_masks": batch[1],
"label_ids": labels,
}
loss, sequence_tags = model(**inputs)
score = (sequence_tags == labels).float().detach().cpu().numpy()
condition_1 = (labels != self.label_list.index("O")).detach().cpu().numpy()
condition_2 = (labels != self.label_list.index("<pad>")).detach().cpu().numpy()
patten = np.logical_and(condition_1, condition_2)
score = score[patten].mean()
if self.n_gpu > 1:
loss = loss.mean()
## tensorboard
global_step = global_step_from_engine(engine)(engine, engine.last_event_name)
# tb_writer.add_scalar('learning_rate', scheduler.get_lr()[0], global_step)
tb_writer.add_scalar('train_loss', loss.item(), global_step)
tb_writer.add_scalar('train_score', score.item(), global_step)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3.0)
optimizer.step()
return loss.item(), score
trainer = Engine(train_fn)
RunningAverage(output_transform=lambda x: x[0]).attach(trainer, 'loss')
RunningAverage(output_transform=lambda x: x[1]).attach(trainer, 'score')
def dev_fn(engine, batch):
model.eval()
optimizer.zero_grad()
with torch.no_grad():
batch = tuple(t.to(self.device) for t in batch)
labels = batch[2]
inputs = {
"token_ids": batch[0],
"input_masks": batch[1],
"label_ids": labels,
}
loss, sequence_tags = model(**inputs)
score = (sequence_tags == labels).float().detach().cpu().numpy()
condition_1 = (labels != self.label_list.index("O")).detach().cpu().numpy()
condition_2 = (labels != self.label_list.index("<pad>")).detach().cpu().numpy()
patten = np.logical_and(condition_1, condition_2)
score = score[patten].mean()
if self.n_gpu > 1:
loss = loss.mean()
## tensorboard
global_step = global_step_from_engine(engine)(engine, engine.last_event_name)
# tb_writer.add_scalar('learning_rate', scheduler.get_lr()[0], global_step)
tb_writer.add_scalar('dev_loss', loss.item(), global_step)
tb_writer.add_scalar('dev_score', score.item(), global_step)
return loss.item(), score
dev_evaluator = Engine(dev_fn)
RunningAverage(output_transform=lambda x: x[0]).attach(dev_evaluator, 'loss')
RunningAverage(output_transform=lambda x: x[1]).attach(dev_evaluator, 'score')
pbar = ProgressBar(persist=True, bar_format="")
pbar.attach(trainer, ['loss', 'score'])
pbar.attach(dev_evaluator, ['loss', 'score'])
def score_fn(engine):
loss = engine.state.metrics['loss']
score = engine.state.metrics['score']
'''
if score < 0.5:
logger.info("Too low to learn!")
trainer.terminate()
'''
return score / (loss + 1e-12)
handler = EarlyStopping(patience=self.patience, score_function=score_fn, trainer=trainer)
dev_evaluator.add_event_handler(Events.COMPLETED, handler)
@trainer.on(Events.EPOCH_COMPLETED)
def log_dev_results(engine):
dev_evaluator.run(dev_iter)
dev_metrics = dev_evaluator.state.metrics
avg_score = dev_metrics['score']
avg_loss = dev_metrics['loss']
logger.info(
"Validation Results - Epoch: {} Avg score: {:.2f} Avg loss: {:.2f}"
.format(engine.state.epoch, avg_score, avg_loss))
def model_score(engine):
score = engine.state.metrics['score']
return score
checkpointer = ModelCheckpoint(self.output_dir, "cmed_ner", n_saved=self.n_saved,
create_dir=True, score_name="model_score",
score_function=model_score,
global_step_transform=global_step_from_engine(trainer),
require_empty=False)
dev_evaluator.add_event_handler(Events.COMPLETED, checkpointer,
{self.model_name: model.module if hasattr(model, 'module') else model})
# Clear cuda cache between training/testing
def empty_cuda_cache(engine):
torch.cuda.empty_cache()
import gc
gc.collect()
trainer.add_event_handler(Events.EPOCH_COMPLETED, empty_cuda_cache)
dev_evaluator.add_event_handler(Events.COMPLETED, empty_cuda_cache)
trainer.run(train_iter, max_epochs=self.max_epochs)
def predict(self, X):
all_input_ids, all_input_mask_ids, all_label_ids, all_label_mask_ids = self.get_X_y_ids(X)
dataset = TensorDataset(all_input_ids, all_input_mask_ids, all_label_ids)
batch_size = self.n_gpu * self.per_gpu_batch_size
dataloader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=False)
model = NER_Model(vocab_size=self.bert_tokenizer.vocab_size, embed_size=self.embed_size,
num_tags=len(self.label_list), max_len=self.max_seq_len, device=self.device,
dense_layer_type=self.dense_layer_type, dropout=self.dropout, embed_type=self.embed_type,
model_name_or_path=self.model_name_or_path, vector_file=self.vector_file)
model.to(self.device)
y_preds = []
for model_state_path in glob(os.path.join(self.output_dir, '*{}*.pt*'.format(self.model_name))):
model.load_state_dict(torch.load(model_state_path))
y_pred = self.single_predict(model, dataloader)
y_preds.append(y_pred)
y_preds = torch.tensor(y_preds)
y_pred = torch.mode(y_preds, dim=0).values
y_pred = y_pred.numpy()
preds_list = [[] for _ in range(all_label_mask_ids.shape[0])]
for i in range(all_label_mask_ids.shape[0]):
for j in range(all_label_mask_ids.shape[1]):
if all_label_mask_ids[i, j] != -100:
preds_list[i].append(self.label_list[y_pred[i][j]])
return preds_list
def single_predict(self, model, dataloader):
if self.n_gpu > 1:
model = torch.nn.DataParallel(model)
model.eval()
preds = None
with torch.no_grad():
for batch in tqdm(dataloader, desc="Predicting"):
batch = tuple(t.to(self.device) for t in batch)
inputs = {
"token_ids": batch[0],
"input_masks": batch[1],
}
_, sequence_tags = model(**inputs)
sequence_tags = sequence_tags.detach().cpu().numpy()
if preds is None:
preds = sequence_tags
else:
preds = np.append(preds, sequence_tags, axis=0)
return preds
def get_X_y_ids(self, X, y=None):
all_input_ids = []
all_label_ids = []
all_input_mask_ids = []
all_label_mask_ids = []
for i, X_ in enumerate(tqdm(X, desc="Tokens to ids")):
text = list(map(str.lower, X_))
input_ids = self.bert_tokenizer.encode(text=text)
input_mask_ids = [1] * len(input_ids)
padding_len = self.max_seq_len - len(input_ids)
input_ids += [self.bert_tokenizer.pad_token_id] * padding_len
input_mask_ids += [0] * padding_len
try:
y_ = ['<start>'] + y[i] + ['<end>']
y_ += ['<pad>'] * padding_len
label_mask_id = [-100] + [100] * len(y[i]) + [-100]
label_mask_id += [-100] * padding_len
except:
y_ = ['<start>', '<end>'] + ['<pad>'] * (self.max_seq_len - 2)
label_mask_id = [-100 if idx in [
self.bert_tokenizer.pad_token_id,
self.bert_tokenizer.cls_token_id,
self.bert_tokenizer.sep_token_id] else 100 for idx in input_ids]
label_ids = list(map(self.label_list.index, y_))
assert len(input_ids) == len(input_mask_ids) == len(label_ids) == len(label_mask_id) == self.max_seq_len
all_input_ids.append(input_ids)
all_input_mask_ids.append(input_mask_ids)
all_label_ids.append(label_ids)
all_label_mask_ids.append(label_mask_id)
if i == 0:
logger.info("tokens:\n{}".format(text))
logger.info("token_ids: \n{}".format(input_ids))
logger.info("labels:\n{}".format(y_))
logger.info("label_ids: \n{}".format(label_ids))
all_input_ids = torch.tensor(all_input_ids, dtype=torch.long)
all_input_mask_ids = torch.tensor(all_input_mask_ids, dtype=torch.long)
all_label_ids = torch.tensor(all_label_ids, dtype=torch.long)
all_label_mask_ids = torch.tensor(all_label_mask_ids, dtype=torch.long)
return all_input_ids, all_input_mask_ids, all_label_ids, all_label_mask_ids
def get_X_y(self, file_path):
X = []
y = []
flag = 0
entity_data = []
with open(file_path, 'r', encoding="utf-8") as reader:
for i, line in enumerate(tqdm(reader.readlines(), desc="Read {}".format(file_path))):
line_list = line.split("|||")
line_list.pop()
originalText = line_list.pop(0)
text = self.clean_text(originalText)
entities = line_list
if len(text) < self.max_split_len:
X_ = list(text)
y_ = ['O'] * len(X_)
for entity in entities:
en_list = entity.split(" ")
start_pos = int(en_list[0])
end_pos = int(en_list[1]) + 1
tag = en_list[2]
if end_pos - start_pos > 1:
y_[start_pos] = 'B-' + tag
for i in range(start_pos+1, end_pos-1):
y_[i] = 'I-' + tag
y_[end_pos - 1] = 'E-' + tag
else:
y_[start_pos] = 'S-' + tag
entity_data.append((text[start_pos: end_pos], tag))
X.append(X_)
y.append(y_)
else:
# split text
dot_index_list = self.get_dot_index(text)
X_list, y_list, entity_data_ = self.get_short_text_label(text, dot_index_list, entities)
assert len(text) == sum(map(len, X_list))
if flag < 3:
logger.info("full text:\n{}".format(text))
X_list_str = list(map("".join, X_list))
logger.info("short texts:\n{}".format("\n".join(X_list_str)))
flag += 1
X.extend(X_list)
y.extend(y_list)
entity_data.extend(entity_data_)
vocab_df = pd.DataFrame(data=entity_data, columns=['entity', 'label_type'])
vocab_df.drop_duplicates(inplace=True, ignore_index=True)
assert len(X) == len(y)
return X, y, vocab_df
def get_X(self, unlabeled_file):
X = []
cut_his = {}
originalTexts = []
print_flag = 0
with open(unlabeled_file, 'r', encoding='utf-8') as f:
for text_id, line in enumerate(tqdm(f.readlines(), desc="Reading {}".format(unlabeled_file))):
line_list = line.split("|||")
originalText = line_list.pop(0)
originalTexts.append(originalText)
text = self.clean_text(originalText)
if len(text) < self.max_split_len:
X.append(list(text))
cut_his[text_id] = len(X) - 1
else:
# split text
dot_index_list = self.get_dot_index(text)
flag = 0
text_id_list = []
if print_flag < 3:
logger.info("full text:\n{}".format(text))
for i, do_index in enumerate(dot_index_list):
short_text = text[flag: do_index + 1]
if print_flag < 3:
logger.info("short texts:\n{}".format(short_text))
# print("Short text:{}".format(short_text))
X_ = list(short_text)
X.append(X_)
text_id_list.append(len(X) - 1)
flag = do_index + 1
print_flag += 1
cut_his[text_id] = text_id_list
return X, cut_his, originalTexts
def get_short_text_label(self, text, dot_index_list, entities):
X = []
y = []
flag = 0
entity_data = []
for i, dot_index in enumerate(dot_index_list):
short_text = text[flag : dot_index+1]
X_ = list(short_text)
y_ = ["O"] * len(X_)
for entity in entities:
en_list = entity.split(" ")
start_pos = int(en_list[0])
end_pos = int(en_list[1]) + 1
tag = en_list[2]
k = start_pos - flag
en_list = []
if end_pos - start_pos > 1:
if k >= 0 and k < len(y_):
y_[k] = 'B-' + tag
en_list.append(X_[k])
for j in range(start_pos + 1, end_pos - 1):
j = j - flag
if j >= 0 and j < len(y_):
y_[j] = 'I-' + tag
en_list.append(X_[j])
e = end_pos - 1 - flag
if e >= 0 and e < len(y_):
y_[e] = 'E-' + tag
en_list.append(X_[e])
else:
if k >= 0 and k < len(y_):
y_[k] = 'S-' + tag
en_list.append(X_[k])
if len(en_list) > 0:
entity_data.append(("".join(en_list), tag))
flag = dot_index + 1
X.append(X_)
y.append(y_)
return X, y, entity_data
def get_dot_index(self, text):
flag = 0
text_ = text
dot_index_list = []
while (len(text_) > self.max_split_len):
text_ = text_[:self.max_split_len]
index_list = []
for match in re.finditer("[,|,|;|;|。|、]", text_):
index_list.append(match.span()[0])
index_list.sort()
if len(index_list) > 1:
last_dot = index_list.pop()
else:
last_dot = len(text_)
dot_index_list.append(last_dot + flag)
text_ = text[(last_dot + flag) :]
flag += last_dot
dot_index_list.append(len(text))
return dot_index_list
def clean_text(self, text):
def special2n(string):
string = string.replace(r"\n", "")
return re.sub("[ |\t|\r|\n|\\\|\u0004]", "_", string)
def strQ2B(ustr):
"全角转半角"
rstr = ""
for uchar in ustr:
inside_code = ord(uchar)
# 全角空格直接转换
if inside_code == 12288:
inside_code = 32
# 全角字符(除空格)根据关系转化
elif (inside_code >= 65281 and inside_code <= 65374):
inside_code -= 65248
rstr += chr(inside_code)
return rstr
return strQ2B(special2n(text)).lower()
def explore_dataset(self, file_path):
text_list = []
entity_list = []
with open(file_path, 'r', encoding="utf8") as reader:
for i, line in enumerate(tqdm(reader.readlines(), desc="Read {}".format(file_path))):
line_list = line.split("|||")
line_list.pop()
text = line_list.pop(0)
text_list.append(text)
for en_l in line_list:
en_l = en_l.split(" ")
try:
start_pos = int(en_l[0])
except ValueError:
print(i+1, line)
end_pos = int(en_l[1]) + 1
label_type = en_l[2]
entity_list.append((label_type, text[start_pos: end_pos]))
text_df = pd.DataFrame(data=text_list, columns=['text'])
entity_df =
|
pd.DataFrame(data=entity_list, columns=['label_type', 'entity'])
|
pandas.DataFrame
|
import json
import requests
from typing import List
import pandas as pd
from utils import request_to_json, get_repo_names
class GitHubUsers:
"""
Get information about contributors and contributions.
"""
def get_repo_contributors(self, repo: str = None) -> json:
"""
Get specific repository contributors.
Args:
repo (str, optional): Repository name. Defaults to None.
Returns:
json: json with all information about contributors.
"""
url = f"https://api.github.com/repos/dyvenia/{repo}/contributors"
req = requests.get(url)
if req.status_code != 200:
return {}
return request_to_json(url)
def get_all_contributions(self, repos: List[str] = None) -> pd.DataFrame:
"""
Get all contributions for list of repos for all contributors.
Args:
repos (List[str], optional): List of repository names. Defaults to None.
Returns:
pd.DataFrame: DF
"""
dfs = []
for repo in repos:
contributors = self.get_repo_contributors(repo=repo)
contrib_dict = {}
contributor_list = []
for contrib in contributors:
if "[bot]" not in contrib["login"]:
contrib_dict = {
"repo": repo,
"login": contrib["login"],
"contributions": contrib["contributions"],
}
contributor_list.append(contrib_dict)
df_dict =
|
pd.DataFrame(contributor_list)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import json
import csv
import argparse
import re
from collections import OrderedDict
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Converts Covid-19 Data \
Tabels provided by the RKI to a simpler format \
to fit the model")
parser.add_argument(
"--source",
nargs=1,
dest="input_csv",
default="../data/raw/COVID19.csv",
help="provide the source csv table")
parser.add_argument(
"--destination",
nargs=1,
dest="output_csv",
default="../data/diseases/covid19.csv",
help="provide the destination file")
args = parser.parse_args()
counties = OrderedDict()
with open("../data/raw/germany_county_shapes.json", "r") as data_file:
shape_data = json.load(data_file)
for idx, val in enumerate(shape_data["features"]):
id_current = val["properties"]["RKI_ID"]
name_current = val["properties"]["RKI_NameDE"]
counties[name_current] = id_current
covid19_data = pd.read_csv(args.input_csv, sep=',')
# this complicated procedure removes timezone information.
regex = re.compile(r"([0-9]+)-([0-9]+)-([0-9]+)T.*")
start_year, start_month, start_day = regex.search(
covid19_data['Meldedatum'].min()).groups()
end_year, end_month, end_day = regex.search(
covid19_data['Meldedatum'].max()).groups()
start_date = pd.Timestamp(
int(start_year), int(start_month), int(start_day))
end_date = pd.Timestamp(int(end_year), int(end_month), int(end_day))
dates = [day for day in
|
pd.date_range(start_date, end_date)
|
pandas.date_range
|
# Copyright (c) 2018-2022, NVIDIA CORPORATION.
import numpy as np
import pandas as pd
import pytest
from pandas.api import types as ptypes
import cudf
from cudf.api import types as types
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, True),
(pd.CategoricalDtype, True),
# Pandas objects.
(
|
pd.Series(dtype="bool")
|
pandas.Series
|
#! /user/bin/evn python
# -*- coding:utf8 -*-
"""
@Author : <NAME>
@Contact : <EMAIL>
@Project : MVLSTM
@File : preprocess.py
@Time : 18-11-13 上午11:14
@Software : PyCharm
@Copyright: "Copyright (c) 2018 <NAME>. All Rights Reserved"
"""
import pandas as pd
import random
import codecs
import json
excel_path = './question.xls'
question_bd = './paragraph_column.txt'
qq_path_bd = 'q2q_pair_bd.txt'
qq_path_tk = './q2q_pair_tk.txt'
question_tk = './question.csv'
test_path = './testset.txt'
primary_question_dict_path_tk = './primary_question_dict_tk.json'
primary_question_dict_path_bd = './primary_question_dict_bd.json'
front_noise = './front_noise.txt'
end_noise = './end_noise.txt'
def load_noise(front_path, end_path):
"""
Load the front and end noise
:param front_path:
:param end_path:
:return: List: front_list, end_list
"""
front_list = []
end_list = []
with open(front_path, 'r', encoding='utf-8') as front_f:
while True:
line = front_f.readline()
if not line:
print('Front noise phrase load finished!')
break
front_list.append(line.replace('\n', ''))
with open(end_path, 'r', encoding='utf-8') as end_f:
while True:
line = end_f.readline()
if not line:
print('End noise phrase load finished!')
return front_list, end_list
end_list.append(line.replace('\n', ''))
def txt2QQpair_tune(path, out_path, front_path='./front_noise.txt', end_path='./end_noise.txt'):
"""
input: csv files: 问题ID 问题 主问题ID
transform the txt data to Question-Question pairs
:return: Tag 问题 问题
"""
# load noise prefix and end_fix
front_list, end_list = load_noise(front_path, end_path)
# load primary question
primary_question_dict = {}
with open(path, 'r', encoding='utf-8') as bd_f:
while True:
line = bd_f.readline()
if not line:
print('Primary question dict construct successfully!')
break
try:
temp_data = (line.replace('\n', '')).strip().split('\t')
if len(temp_data) != 3:
continue
temp_id = int(temp_data[0])
temp_context = temp_data[1]
temp_pid = int(temp_data[2])
if not temp_context.strip():
continue
if temp_pid == 0:
primary_question_dict[temp_id] = temp_context.replace('\n', '') # key: id, value: question
except Exception as e:
print(line)
print(e)
primary_question_dict_json = json.dumps(primary_question_dict, ensure_ascii=False)
with open(primary_question_dict_path_bd, 'w', encoding='utf-8') as pqd_f:
pqd_f.write(primary_question_dict_json)
# end of load primary question
# construct question to question pair
questions1 = []
questions2 = []
flags = []
with open(path, 'r', encoding='utf-8') as bd_f:
while True:
line = bd_f.readline()
# print(len(flags))
if len(flags) >= 200000:
break
if not line:
print('question to question matching data construct successfully')
break
try:
temp_data = (line.replace('\n', '')).strip().split('\t')
if len(temp_data) != 3:
continue
temp_id = int(temp_data[0])
temp_context = temp_data[1]
temp_pid = int(temp_data[2])
if not temp_context.strip():
continue
if temp_pid != 0:
if len(flags) < 150000:
temp_context_noise = random.choice(front_list) + temp_context + random.choice(end_list)
questions1.append(temp_context_noise.replace('\n', ''))
questions2.append(primary_question_dict[temp_pid])
flags.append(1)
# else:
# temp_context_noise = temp_context
# questions1.append((temp_context.replace('\n', '')))
# add unnoise data
questions1.append(temp_context.replace('\n', ''))
questions2.append(primary_question_dict[temp_pid])
flags.append(1)
temp_dict = primary_question_dict.copy()
primary_id_raw = list(temp_dict.keys())
# negative sample: 2:2
primary_id_raw.remove(temp_pid)
fake_id = random.choice(primary_id_raw)
questions1.append(temp_context.replace('\n', ''))
questions2.append(primary_question_dict[fake_id])
flags.append(0)
primary_id_raw.remove(fake_id)
fake_id = random.choice(primary_id_raw)
questions1.append(temp_context.replace('\n', ''))
questions2.append(primary_question_dict[fake_id])
flags.append(0)
primary_id_raw.remove(fake_id)
fake_id = random.choice(primary_id_raw)
questions1.append(temp_context.replace('\n', ''))
questions2.append(primary_question_dict[fake_id])
flags.append(0)
questions1.append(temp_context.replace('\n', ''))
questions2.append(temp_context.replace('\n', ''))
flags.append(1)
except Exception as e:
print(line)
print(e)
with codecs.open(out_path, 'w', encoding='utf-8') as qq:
for flag, q1, q2 in zip(flags, questions1, questions2):
if q1 and q2:
qq.write(str(flag) + '\t' + str(q1) + '\t' + str(q2) + '\n')
def csv2QQpair_tune(path, out_path, front_path='./front_noise.txt', end_path='./end_noise.txt'):
"""
input: csv files: 问题ID 问题 主问题ID
transform the csv data to Question-Question pairs
:return: Tag 问题 问题
"""
# load noise prefix and end_fix
front_list, end_list = load_noise(front_path, end_path)
primary_question_dict = {}
# suitable for csv file only
csv_data = pd.read_csv(path, sep='\t', header=None, index_col=0)
for key, data in csv_data.iterrows():
if not (data[1].strip() or data[2].strip()):
continue
if data[2] == 0:
primary_question_dict[key] = (data[1]).replace('\n', '') # key: id, value: question
primary_question_dict_json = json.dumps(primary_question_dict, ensure_ascii=False)
with open(primary_question_dict_path_tk, 'w', encoding='utf-8') as pqd_f:
pqd_f.write(primary_question_dict_json)
questions1 = []
questions2 = []
flags = []
# suitable for csv file only
for key, data in csv_data.iterrows():
if not (data[1].strip() or data[2].strip()):
continue
temp_context = (data[1]).replace("\n", "")
if data[2] != 0:
if len(flags) < 100000:
temp_context_noise = random.choice(front_list) + temp_context + random.choice(end_list)
# True
questions1.append(temp_context_noise)
questions2.append(primary_question_dict[data[2]])
flags.append(1)
# else:
# temp_context_noise = temp_context
# add unnoise data
questions1.append(temp_context.replace('\n', ''))
questions2.append(primary_question_dict[data[2]])
flags.append(1)
temp_dict = primary_question_dict.copy() # 浅拷贝,避免修改主问题列表
# dict.keys() 返回dict_keys类型,其性质类似集合(set)而不是列表(list),因此不能使用索引获取其元素
primary_id_raw = list(temp_dict.keys())
# negative sample ratio: 2:2
primary_id_raw.remove(data[2]) # 先去除该问题主问题id,再随机负采样
fake_id = random.choice(primary_id_raw)
questions1.append(temp_context)
questions2.append(primary_question_dict[fake_id])
flags.append(0)
primary_id_raw.remove(fake_id)
fake_id = random.choice(primary_id_raw)
questions1.append(temp_context)
questions2.append(primary_question_dict[fake_id])
flags.append(0)
primary_id_raw.remove(fake_id)
fake_id = random.choice(primary_id_raw)
questions1.append(temp_context)
questions2.append(primary_question_dict[fake_id])
flags.append(0)
questions1.append(temp_context.replace('\n', ''))
questions2.append(temp_context.replace('\n', ''))
flags.append(1)
with codecs.open(out_path, 'w', encoding='utf-8') as qq:
for flag, q1, q2 in zip(flags, questions1, questions2):
if q1 and q2:
qq.write(str(flag) + '\t' + str(q1) + '\t' + str(q2) + '\n')
def excel2csv(path, out_path):
"""
Multi-Sheets excel file, needs to be convert to one file
:param path: str
:param out_path: str
:return:
"""
io = pd.io.excel.ExcelFile(path)
excel_data = pd.read_excel(io,
sheet_name=['question', 'question(2)', 'question(3)'],
# sheet_name=['Sheet1', 'Sheet2', 'Sheet3'],
usecols=[0, 1, 2], # 0: id, 1: question, 2:parent_id
index_col=0,
header=None)
csv_df =
|
pd.concat([excel_data['question'], excel_data['question(2)'], excel_data['question(3)']])
|
pandas.concat
|
#!/usr/bin/env python3
import sys
import os
import argparse
import subprocess
import readline
import pandas as pd
import numpy as np
from plio.io.io_bae import read_gpf, save_gpf
from appl_tools.pedr import pedrtab2df
from appl_tools.surfacefit import run_pc_align, update_gpf, ascii_dtm2csv
def parse_args():
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description = """This script aligns Tie Points from a Socet Set/Socet GXP Ground Point File (GPF) to a reference elevation data set
by acting as a thin wrapper around the 'pc_align' program from the NASA Ames Stereo Pipeline.
Typically, Tie Points are too sparse to be reliably aligned to a reference using the iterative closest points algorithm in pc_align. Therefore, this script allows pc_align
to first be applied to a (potentially low-resolution) digital terrain model derived from the stereopair that corresponds to the GPF file.
The resulting transformation matrix is applied to the Tie Points during a second call to pc_align.
The transformed latitude, longitude, and height values from the Tie Points are then written to a new GPF with their sigmas set equal to 1 and the "known" flag
changed from "1" (Tie Point) to "3" (XYZ Control). Non-Tie Points from the original GPF are written to the new GPF with their "known" flags changed to "1."
Tie Points from the original GPF that were not active ("stat" = 0) are copied "as-is" into the new GPF. The output GPF preserves the order of the ground points from the original GPF.
If it is desired to update all active points in the input GPF, use the '--all-points' flag. The modified points will still have their "known" flag set to "3" (XYZ Control) in the
output GPF.
The Ames Stereo Pipeline program pc_align must be available in the user's path or somewhere else where Python can find it.
More information about the Ames Stereo Pipeline is available on the project's Git repository: https://github.com/NeoGeographyToolkit/StereoPipeline""",
epilog = """EXAMPLES:
Align Tie Points from a HiRISE DTM in Socet's ASCII DTM format to a reference DTM in GeoTIFF format.
The Socet ASCII DTM will be automatically converted to a pc_align-compatible CSV, but the user should specify either the datum or planetary radii that describe the
surface that the Socet DTM's heights are referenced to. In the following example, the Socet DTM is referenced to the Mars 2000 ellipsoid.
%(prog)s --radii 3396190 3376000 --max-displacement 50 CTX_reference_dtm.tif HiRISE_Gale_low_res.asc HiRISE_Gale.gpf output_HiRISE_Gale.gpf
This script can be used to simulate the behavior of the legacy SurfaceFit Perl script wherein Socet Tie Points from a Mars DTM referenced to an ellipsoid
are aligned to MOLA shot data referenced to the geoid, the "--max-displacement" parameter was fixed at 300 meters and the datum was set to the IAU sphere ("D_MARS"):
%(prog)s --max-displacement 300 --datum D_MARS MOLA_reference.tab CTX_NE_Syrtis_low_res_aate.asc CTX_NE_Syrtis.gpf tfm_CTX_NE_Syrtis.gpf \n
""")
parser.add_argument("ref_dtm",
help="The name of the file that contains the reference elevation data.")
parser.add_argument("socet_dtm",
help = "The name of the file containing the Socet Set or GXP DTM to be aligned. Must be in ASCII format.")
parser.add_argument("socet_gpf",
help = "The name of the Socet Ground Point File that will be updated using the transform that was calculated for socet_dtm.")
parser.add_argument("tfm_socet_gpf",
help = """Name to use for the output (transformed) ground point file. Must include ".gpf" extension.""")
parser.add_argument("--all-points",
action='store_true',
help = "This flag will force updating of all active (stat = 1) points in socet_gpf, not just tie points (known = 0).")
parser.add_argument("--s_srs",
help = """PROJ string describing the projected spatial reference system of the input GPF. If omitted, script assumes a geographic SRS with shape defined by --datum or --radius. If ref_dtm is CSV, it must use same SRS as the GPF file.""",
nargs='?',
type=str)
parser.add_argument("--gxp",
action='store_true',
help = "Flag to indicate input GPF is in Socet GXP format. Output GPF will be in legacy Socet Set format.")
parser.add_argument("--max-displacement",
type=float,
nargs=1,
help="Maximum expected displacement of source points as result of alignment, in meters (after the initial guess transform is applied to the source points). Used for removing gross outliers in the source (movable) pointcloud.")
refshape = parser.add_mutually_exclusive_group(required=True)
refshape.add_argument("--datum",
nargs=1,
choices=['D_MARS', 'D_MOON', 'MOLA', 'NAD27', 'NAD83', 'WGS72', 'WGS_1984'],
help = """Use this datum for heights in the input GPF file and any other input CSV files.""")
refshape.add_argument("--radii",
nargs=2,
metavar=('semi-major-axis','semi-minor-axis'),
type=float,
help="""Semi-major and semi-minor axes, expressed in meters, that define the ellipsoid that heights in the input GPF file and any other input CSV files are referenced to.""")
parser.add_argument('pc_align_args',
nargs = argparse.REMAINDER,
help = """Additional arguments that will be passed directly to pc_align.""")
args = parser.parse_args()
return args
def main(user_args):
ref_dtm = user_args.ref_dtm
socet_dtm = user_args.socet_dtm
socet_gpf = user_args.socet_gpf
tfm_socet_gpf = user_args.tfm_socet_gpf
all_points = user_args.all_points
datum = user_args.datum
radii = user_args.radii
max_displacement = user_args.max_displacement
pc_align_args = user_args.pc_align_args
s_srs = user_args.s_srs
gxp = user_args.gxp
if os.path.splitext(tfm_socet_gpf)[1] != ".gpf":
print("""USER ERROR: Output file name must use ".gpf" extension""")
sys.exit(1)
ref_basename = os.path.splitext(ref_dtm)[0]
ref_ext = os.path.splitext(ref_dtm)[1]
socet_dtm_basename = os.path.splitext(socet_dtm)[0]
src_ext = os.path.splitext(socet_dtm)[1]
if ref_ext.lower() == '.tab':
print("\n\n *** WARNING: Using MOLA heights above geoid ***\n\n")
ref_dtm_pc_align = (ref_basename + "_RefPC.csv")
pedr_df = pedrtab2df(ref_dtm)
# Assume ographic lat, lon, topo columns exist
pedr_df.to_csv(path_or_buf=(ref_dtm_pc_align), header=False, index=False,
columns=['areod_lat','long_East','topography'])
elif ref_ext.lower() == '.asc':
ref_dtm_pc_align = (ref_basename + "_RefPC.csv")
ascii_dtm2csv(ref_dtm, ref_dtm_pc_align)
elif ref_ext.lower() == '.csv':
ref_dtm_pc_align = ref_dtm
else:
# assume raster and let pc_align complain if it's not
ref_dtm_pc_align = ref_dtm
if src_ext.lower() == '.asc':
socet_dtm_pc_align = (socet_dtm_basename + ".csv")
ascii_dtm2csv(socet_dtm,socet_dtm_pc_align)
else:
# assume raster and let pc_align complain if it's not
socet_dtm_pc_align = socet_dtm
# Read in the Socet ground point file using plio's read_gpf()
if gxp:
gpf_df = read_gpf(socet_gpf, gxp=True)
# Modify DataFrame to resemble legacy Socet Set format
# Rename "use" and "point_type" to their Socet Set equivalents
gpf_df.rename(columns={'use':'stat', 'point_type':'known'}, inplace=True)
if not s_srs:
gpf_df.lat_Y_North = np.radians(gpf_df['lat_Y_North'])
gpf_df.long_X_East = np.radians(((gpf_df['long_X_East'] + 180) % 360) - 180)
else:
gpf_df = read_gpf(socet_gpf)
# Set the index of the GPF dataframe to be the point_id column
gpf_df.set_index('point_id', drop=False, inplace=True)
# If user passed "--all-points" option, copy *all active* points to new data frame
# Otherwise, copy active tie points (point_type == 0) only
# Note that DataFrame is named "tp_df" regardless of whether it includes only tiepoints or not
if all_points:
tp_df = gpf_df[(gpf_df.stat == 1)].copy()
else:
tp_df = gpf_df[(gpf_df.known == 0) & (gpf_df.stat == 1)].copy()
if not s_srs:
tp_df.lat_Y_North = np.degrees(tp_df.lat_Y_North)
tp_df.long_X_East = ((360 + np.degrees(tp_df.long_X_East)) % 360)
align_prefix = (socet_dtm_basename + '_pcAligned_DTM')
gpf_align_prefix = (socet_dtm_basename + '_pcAligned_gpfTies')
# Build arguments list and perform alignment with pc_align
align_args = ["--max-displacement", str(max_displacement[0]),
"--save-inv-transformed-reference-points",
"-o", align_prefix]
# Extend the list of arguments for pc_align to include the datum or radii as necessary
if datum is not None:
align_args.extend(["--datum", str(datum[0])])
elif radii is not None:
align_args.extend(["--semi-major-axis", str(radii[0]), "--semi-minor-axis", str(radii[1])])
# If the user passed additional arguments for pc_align, extend align_args to include them
if pc_align_args:
align_args.extend(pc_align_args)
# Extend the list to place point clouds at the end of the list of arguments for pc_align
align_args.extend([socet_dtm_pc_align, ref_dtm_pc_align])
print("Aligning " + socet_dtm_pc_align + " to " + ref_dtm_pc_align)
try:
run_align = run_pc_align(align_args)
except subprocess.CalledProcessError as e:
print(e)
sys.exit(1)
# Write out CSV (compatible with pc_align) containing lat/long/height of points to be updated
socet_gpf_csv = ((os.path.splitext(socet_gpf)[0]) + '.csv')
tp_df.to_csv(path_or_buf=socet_gpf_csv,
header=False,
index=False,
columns=['lat_Y_North','long_X_East','ht'])
# Build arguments list and apply transformation to selected points from GPF using pc_align
# Set num-iterations = 0 because only going to apply existing transform
transform_matrix = (align_prefix + '-inverse-transform.txt')
apply_tfm_args = ["--initial-transform",transform_matrix,
"--num-iterations","0",
"--max-displacement", str(max_displacement[0]),
"--save-transformed-source-points",
"-o", gpf_align_prefix ]
# Extend the list of arguments for pc_align to include the datum or radii as necessary
if datum is not None:
apply_tfm_args.extend(["--datum", str(datum[0])])
elif radii is not None:
apply_tfm_args.extend(["--semi-major-axis", str(radii[0]), "--semi-minor-axis", str(radii[1])])
if s_srs:
apply_tfm_args.extend(["--csv-proj4", str(s_srs)])
apply_tfm_args.extend(["--csv-format", str('''2:easting 1:northing 3:height_above_datum''')])
# Extend the list to place point clouds at the end of the list of arguments for pc_align
apply_tfm_args.extend([ref_dtm_pc_align,socet_gpf_csv])
# Apply transform from previous pc_align run to tie points CSV
print("Calling pc_align with 0 iterations to apply transform from previous run to Tie Points from GPF")
try:
run_align = run_pc_align(apply_tfm_args)
except subprocess.CalledProcessError as e:
print(e)
sys.exit(1)
# mergeTransformedGPFTies
# Convert the transformed tie points from CSV to a pandas DataFrame
t = np.genfromtxt((gpf_align_prefix + '-trans_source.csv'),delimiter=',',
skip_header=3,dtype='unicode')
id_list = tp_df['point_id'].tolist()
tfm_index =
|
pd.Index(id_list)
|
pandas.Index
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 16 23:11:56 2017
@author: Flamingo
"""
import pandas as pd
import numpy as np
import datetime
import copy
import sys
sys.path.append('../TOOLS')
from IJCAI2017_TOOL import *
#%% readin shop data
HOLI = pd.read_csv('../additional/HOLI.csv')
HOLI = HOLI.set_index(['DATE'],drop = True)
HOLI_TAB = HOLI.transpose()
HOLI_TAB.columns = [str((datetime.datetime.strptime('20150626','%Y%m%d') + datetime.timedelta(days=x)).date()) for x in range( HOLI_TAB.shape[1])]
#%% readin shop data
PAYNW = pd.read_csv('../data/user_pay_new.csv')
VIENW = pd.read_csv('../data/user_view_new.csv')
PAYNW_SHOP_DATE = PAYNW.groupby(['SHOP_ID','DATE'],as_index = False).sum()
PAYNW_SHOP_DATE = PAYNW_SHOP_DATE[['SHOP_ID','DATE','Num_post']]
#PAYNW_TAB_FIX = pd.read_csv('FillOctober.csv')
#PAYNW_TAB_FIX['DATE'] = [ (lambda x:str(datetime.datetime.strptime('2015/06/26','%Y/%m/%d').date() ) ) (x) for x in PAYNW_TAB_FIX['DATE']]
#
#PAYNW_SHOP_DATE = pd.concat([PAYNW_SHOP_DATE ,PAYNW_TAB_FIX],axis = 0)
#
#
#PAYNW_SHOP_DATE = PAYNW_SHOP_DATE.drop_duplicates(subset = ['SHOP_ID','DATE'], keep = 'last')
#PAYNW_SHOP_DATE = PAYNW_SHOP_DATE.sort_values(by = ['SHOP_ID','DATE'])
PAYNW_SHOP_DATE.reset_index(level=0)
PAYNW_TAB = pd.pivot_table(PAYNW_SHOP_DATE, values=['Num_post'], index=['SHOP_ID'],columns=['DATE'], aggfunc=np.sum)
#PAYNW_TAB = pd.pivot_table(PAYNW, values=['Num_post'], index=['SHOP_ID'],columns=['DATE'], aggfunc=np.sum)
PAYNW_TAB = pd.concat( [PAYNW_TAB[PAYNW_TAB.columns[0:169:1]], pd.DataFrame({'A':[np.nan],},index=np.arange(1,2001)),PAYNW_TAB[PAYNW_TAB.columns[169::1]] ], axis = 1)
PAYNW_TAB.columns = [str((datetime.datetime.strptime('20150626','%Y%m%d') + datetime.timedelta(days=x)).date()) for x in range( PAYNW_TAB.shape[1])]
PAYNW_TAB['2015-12-12'] = PAYNW_TAB['2015-12-13']
PAYNW_TAB_T = PAYNW_TAB.transpose()
#%% shop_related_features
SHOP_INFO = pd.read_csv("../external/SHOP_FEATURES_0221.csv",low_memory=False)
SHOP_SC = ['SC00']
SHOP_SD = map(lambda x:'SD'+ str(x).zfill(2), np.arange(5))
SHOP_SE = map(lambda x:'SE'+ str(x).zfill(2), np.arange(1))
SHOP_SF = map(lambda x:'SF'+ str(x).zfill(2), np.arange(1))
SHOP_SG = map(lambda x:'SG'+ str(x).zfill(2), np.arange(4))
SHOP_SH = map(lambda x:'SH'+ str(x).zfill(2), np.arange(2))
SHOP_SI = [(lambda x:('SI'+ str(x).zfill(2))) (x) for x in range(10)]
SHOP_SJ = map(lambda x:'SJ'+ str(x).zfill(2), np.arange(15))
SHOP_columns = SHOP_SC + SHOP_SD + SHOP_SE + SHOP_SF + SHOP_SG + SHOP_SH + SHOP_SI + SHOP_SJ
#%%
TRN_N = 21
TST_N = 14
TST_PAD_N = 14 + 4
end_date = datetime.datetime.strptime('2016-10-31','%Y-%m-%d')
day_N = 494
date_list = [str((end_date- datetime.timedelta(days=x)).date()) for x in range(day_N)]
date_list.reverse()
#%%
TRAIN = pd.DataFrame()
train_date_zip = zip(date_list[0:day_N-(TRN_N+TST_N)+1],date_list[TRN_N-1:day_N-TST_N+1],date_list[TRN_N:day_N-TST_N+2], date_list[TRN_N+TST_N-1:day_N])
train_date_zip_df = pd.DataFrame(train_date_zip)
train_date_zip_df.columns = ['TRN_STA','TRN_END','TST_STA','TST_END']
for TRN_STA,TRN_END,TST_STA,TST_END in train_date_zip:
TRAIN_temp = PAYNW_TAB.loc[:,TRN_STA:TST_END]
TRAIN_temp.columns = np.arange(TRAIN_temp.shape[1])
TRAIN_temp.reset_index(level=0, inplace=True)
TRAIN_temp.loc[:,'TRN_STA'] = str(TRN_STA)
TRAIN_temp.loc[:,'TRN_END'] = str(TRN_END)
TRAIN_temp.loc[:,'TST_STA'] = str(TST_STA)
TRAIN_temp.loc[:,'TST_END'] = str(TST_END)
TRAIN = pd.concat( [TRAIN,TRAIN_temp],)
#%%
TRAIN = TRAIN.reset_index(np.arange(len(TRAIN)),drop = True)
TRAIN_TRN_C = map(lambda x:'SA'+ str(x).zfill(2), np.arange(TRN_N))
TRAIN_TST_C = map(lambda x:'SB'+ str(x).zfill(2), np.arange(TST_N))
TRAIN.columns = ['SHOP_ID'] + TRAIN_TRN_C + TRAIN_TST_C + ['TRN_STA','TRN_END','TST_STA','TST_END']
#%%
#TEST= pd.DataFrame()
#TRN_END = datetime.datetime.strptime('2016-10-31','%Y-%m-%d')
#TRN_STA = (TRN_END - datetime.timedelta(days=(TRN_N-1)) )
#TST_STA = (TRN_END + datetime.timedelta(days=(1)) )
#TST_END = (TRN_END + datetime.timedelta(days=(TST_N)) )
#test_date_zip = zip([str(TRN_STA.date())],[str(TRN_END.date())],[str(TST_STA.date())], [str(TST_END.date()) ])
#TEST = PAYNW_TAB.loc[:,str(TRN_STA.date()):str(TRN_END.date())]
#TEST.reset_index(level=0, inplace=True)
#end_date = datetime.datetime.strptime('2016-10-31','%Y-%m-%d')
#TEST.loc[:,'TRN_STA'] = str(TRN_STA.date())
#TEST.loc[:,'TRN_END'] = str(TRN_END.date())
#TEST.loc[:,'TST_STA'] = str(TST_STA.date())
#TEST.loc[:,'TST_END'] = str(TST_END.date())
#TEST_TRN_C = map(lambda x:'SA'+ str(x).zfill(2), np.arange(TRN_N))
#TEST.columns = ['SHOP_ID'] + TEST_TRN_C + ['TRN_STA','TRN_END','TST_STA','TST_END']
TEST = pd.read_csv('../dataclean/TEST_cor_0313.csv')
#%%
result_fix = pd.read_csv('../generateXY_table/sub_011_last6weeks_removenan_m105.csv',header=None,names =['SHOP_ID'] + range(14))
result_fmed = pd.DataFrame()
result_fmed['SHOP_ID'] = result_fix['SHOP_ID']
result_fmed['VALUE'] = result_fix.loc[:,np.arange(0,14)].median(axis = 1)
result_fmed.to_csv('0215_fix.csv',index = False)
TRAIN_OK = TRAIN[TRAIN.loc[:,TRAIN_TST_C].isnull().sum(axis = 1)==0]
TRAIN_OK = TRAIN_OK[TRAIN_OK.loc[:,TRAIN_TRN_C].isnull().sum(axis = 1)<=(TRN_N-21)]
TRAIN_OK = pd.merge(TRAIN_OK ,result_fmed,on='SHOP_ID',how = 'left')
TEST = pd.merge(TEST ,result_fmed,on='SHOP_ID',how = 'left')
TRAIN_OK =
|
pd.merge(TRAIN_OK ,SHOP_INFO,on='SHOP_ID',how = 'left')
|
pandas.merge
|
from functools import partial
import json
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pytest
from solarforecastarbiter.io import utils
# data for test Dataframe
TEST_DICT = {'value': [2.0, 43.9, 338.0, -199.7, 0.32],
'quality_flag': [1, 1, 9, 5, 2]}
DF_INDEX = pd.date_range(start=pd.Timestamp('2019-01-24T00:00'),
freq='1min',
periods=5,
tz='UTC', name='timestamp')
DF_INDEX.freq = None
TEST_DATA = pd.DataFrame(TEST_DICT, index=DF_INDEX)
EMPTY_SERIES = pd.Series(dtype=float)
EMPTY_TIMESERIES = pd.Series([], name='value', index=pd.DatetimeIndex(
[], name='timestamp', tz='UTC'), dtype=float)
EMPTY_DATAFRAME = pd.DataFrame(dtype=float)
EMPTY_TIME_DATAFRAME = pd.DataFrame([], index=pd.DatetimeIndex(
[], name='timestamp', tz='UTC'), dtype=float)
TEST_DATAFRAME = pd.DataFrame({
'25.0': [0.0, 1, 2, 3, 4, 5],
'50.0': [1.0, 2, 3, 4, 5, 6],
'75.0': [2.0, 3, 4, 5, 6, 7]},
index=pd.date_range(start='20190101T0600',
end='20190101T1100',
freq='1h',
tz='America/Denver',
name='timestamp')).tz_convert('UTC')
@pytest.mark.parametrize('dump_quality,default_flag,flag_value', [
(False, None, 1),
(True, 2, 2)
])
def test_obs_df_to_json(dump_quality, default_flag, flag_value):
td = TEST_DATA.copy()
if dump_quality:
del td['quality_flag']
converted = utils.observation_df_to_json_payload(td, default_flag)
converted_dict = json.loads(converted)
assert 'values' in converted_dict
values = converted_dict['values']
assert len(values) == 5
assert values[0]['timestamp'] == '2019-01-24T00:00:00Z'
assert values[0]['quality_flag'] == flag_value
assert isinstance(values[0]['value'], float)
def test_obs_df_to_json_no_quality():
td = TEST_DATA.copy()
del td['quality_flag']
with pytest.raises(KeyError):
utils.observation_df_to_json_payload(td)
def test_obs_df_to_json_no_values():
td = TEST_DATA.copy().rename(columns={'value': 'val1'})
with pytest.raises(KeyError):
utils.observation_df_to_json_payload(td)
def test_forecast_series_to_json():
series = pd.Series([0, 1, 2, 3, 4], index=pd.date_range(
start='2019-01-01T12:00Z', freq='5min', periods=5))
expected = [{'value': 0.0, 'timestamp': '2019-01-01T12:00:00Z'},
{'value': 1.0, 'timestamp': '2019-01-01T12:05:00Z'},
{'value': 2.0, 'timestamp': '2019-01-01T12:10:00Z'},
{'value': 3.0, 'timestamp': '2019-01-01T12:15:00Z'},
{'value': 4.0, 'timestamp': '2019-01-01T12:20:00Z'}]
json_out = utils.forecast_object_to_json(series)
assert json.loads(json_out)['values'] == expected
def test_json_payload_to_observation_df(observation_values,
observation_values_text):
out = utils.json_payload_to_observation_df(
json.loads(observation_values_text))
pdt.assert_frame_equal(out, observation_values)
def test_json_payload_to_forecast_series(forecast_values,
forecast_values_text):
out = utils.json_payload_to_forecast_series(
json.loads(forecast_values_text))
pdt.assert_series_equal(out, forecast_values)
def test_empty_payload_to_obsevation_df():
out = utils.json_payload_to_observation_df({'values': []})
assert set(out.columns) == {'value', 'quality_flag'}
assert isinstance(out.index, pd.DatetimeIndex)
def test_empty_payload_to_forecast_series():
out = utils.json_payload_to_forecast_series({'values': []})
assert isinstance(out.index, pd.DatetimeIndex)
def test_null_json_payload_to_observation_df():
observation_values_text = b"""
{
"_links": {
"metadata": ""
},
"observation_id": "OBSID",
"values": [
{
"quality_flag": 1,
"timestamp": "2019-01-01T12:00:00-0700",
"value": null
},
{
"quality_flag": 1,
"timestamp": "2019-01-01T12:05:00-0700",
"value": null
}
]
}"""
ind = pd.DatetimeIndex([
pd.Timestamp("2019-01-01T19:00:00Z"),
pd.Timestamp("2019-01-01T19:05:00Z")
], name='timestamp')
observation_values = pd.DataFrame({
'value': pd.Series([None, None], index=ind, dtype=float),
'quality_flag': pd.Series([1, 1], index=ind)
})
out = utils.json_payload_to_observation_df(
json.loads(observation_values_text))
pdt.assert_frame_equal(out, observation_values)
def test_null_json_payload_to_forecast_series():
forecast_values_text = b"""
{
"_links": {
"metadata": ""
},
"forecast_id": "OBSID",
"values": [
{
"timestamp": "2019-01-01T12:00:00-0700",
"value": null
},
{
"timestamp": "2019-01-01T12:05:00-0700",
"value": null
}
]
}"""
ind = pd.DatetimeIndex([
pd.Timestamp("2019-01-01T19:00:00Z"),
pd.Timestamp("2019-01-01T19:05:00Z")
], name='timestamp')
forecast_values = pd.Series([None, None], index=ind, dtype=float,
name='value')
out = utils.json_payload_to_forecast_series(
json.loads(forecast_values_text))
pdt.assert_series_equal(out, forecast_values)
@pytest.mark.parametrize('label,exp,start,end', [
('instant', TEST_DATA, None, None),
(None, TEST_DATA, None, None),
('ending', TEST_DATA.iloc[1:], None, None),
('beginning', TEST_DATA.iloc[:-1], None, None),
pytest.param('er', TEST_DATA, None, None,
marks=pytest.mark.xfail(raises=ValueError)),
# start/end outside data
('ending', TEST_DATA, pd.Timestamp('20190123T2300Z'), None),
('beginning', TEST_DATA, None, pd.Timestamp('20190124T0100Z')),
# more limited
('ending', TEST_DATA.iloc[2:], pd.Timestamp('20190124T0001Z'), None),
('beginning', TEST_DATA.iloc[:-2], None,
pd.Timestamp('20190124T0003Z')),
('instant', TEST_DATA.iloc[1:-1], pd.Timestamp('20190124T0001Z'),
pd.Timestamp('20190124T0003Z')),
])
def test_adjust_timeseries_for_interval_label(label, exp, start, end):
start = start or pd.Timestamp('2019-01-24T00:00Z')
end = end or pd.Timestamp('2019-01-24T00:04Z')
out = utils.adjust_timeseries_for_interval_label(
TEST_DATA, label, start, end)
pdt.assert_frame_equal(exp, out)
def test_adjust_timeseries_for_interval_label_no_tz():
test_data = TEST_DATA.tz_localize(None)
label = None
start = pd.Timestamp('2019-01-24T00:00Z')
end = pd.Timestamp('2019-01-24T00:04Z')
with pytest.raises(ValueError):
utils.adjust_timeseries_for_interval_label(
test_data, label, start, end)
def test_adjust_timeseries_for_interval_label_no_tz_empty():
test_data = pd.DataFrame()
label = None
start = pd.Timestamp('2019-01-24T00:00Z')
end = pd.Timestamp('2019-01-24T00:04Z')
out = utils.adjust_timeseries_for_interval_label(
test_data, label, start, end)
pdt.assert_frame_equal(test_data, out)
@pytest.mark.parametrize('label,exp', [
('instant', TEST_DATA['value']),
('ending', TEST_DATA['value'].iloc[1:]),
('beginning', TEST_DATA['value'].iloc[:-1])
])
def test_adjust_timeseries_for_interval_label_series(label, exp):
start = pd.Timestamp('2019-01-24T00:00Z')
end = pd.Timestamp('2019-01-24T00:04Z')
out = utils.adjust_timeseries_for_interval_label(
TEST_DATA['value'], label, start, end)
pdt.assert_series_equal(exp, out)
@pytest.mark.parametrize('inp,exp', [
(TEST_DATA['value'], '{"schema":{"version": 1, "orient": "records", "timezone": "UTC", "column": "value", "index": "timestamp", "dtype": "float64", "objtype": "Series"},"data":[{"timestamp":"2019-01-24T00:00:00Z","value":2.0},{"timestamp":"2019-01-24T00:01:00Z","value":43.9},{"timestamp":"2019-01-24T00:02:00Z","value":338.0},{"timestamp":"2019-01-24T00:03:00Z","value":-199.7},{"timestamp":"2019-01-24T00:04:00Z","value":0.32}]}'), # NOQA: E501
(EMPTY_TIMESERIES, '{"schema":{"version": 1, "orient": "records", "timezone": "UTC", "column": "value", "index": "timestamp", "dtype": "float64", "objtype": "Series"},"data":[]}'), # NOQA: E501
pytest.param(EMPTY_SERIES, '', marks=[
pytest.mark.xfail(strict=True, type=TypeError)]),
pytest.param(pd.Series([], dtype=float, index=pd.DatetimeIndex([])), '',
marks=[pytest.mark.xfail(strict=True, type=TypeError)]),
(TEST_DATAFRAME, '{"schema":{"version": 1, "orient": "records", "timezone": "UTC", "column": ["25.0", "50.0", "75.0"], "index": "timestamp", "dtype": ["float64", "float64", "float64"], "objtype": "DataFrame"},"data":[{"timestamp":"2019-01-01T13:00:00Z","25.0":0.0,"50.0":1.0,"75.0":2.0},{"timestamp":"2019-01-01T14:00:00Z","25.0":1.0,"50.0":2.0,"75.0":3.0},{"timestamp":"2019-01-01T15:00:00Z","25.0":2.0,"50.0":3.0,"75.0":4.0},{"timestamp":"2019-01-01T16:00:00Z","25.0":3.0,"50.0":4.0,"75.0":5.0},{"timestamp":"2019-01-01T17:00:00Z","25.0":4.0,"50.0":5.0,"75.0":6.0},{"timestamp":"2019-01-01T18:00:00Z","25.0":5.0,"50.0":6.0,"75.0":7.0}]}'), # NOQA: E501
(EMPTY_TIME_DATAFRAME, '{"schema":{"version": 1, "orient": "records", "timezone": "UTC", "column": [], "index": "timestamp", "dtype": [], "objtype": "DataFrame"},"data":[]}'), # NOQA: E501
pytest.param(EMPTY_DATAFRAME, '', marks=[
pytest.mark.xfail(strict=True, type=TypeError)]),
])
def test_serialize_timeseries(inp, exp):
out = utils.serialize_timeseries(inp)
outd = json.loads(out)
assert 'schema' in outd
assert 'data' in outd
assert out == exp
@pytest.mark.parametrize('inp,exp', [
('{"schema": {"version": 0, "orient": "records", "timezone": "UTC", "column": "value", "index": "timestamp", "dtype": "float64"}, "data": []}', # NOQA
EMPTY_TIMESERIES),
('{"schema": {"version": 0, "orient": "records", "timezone": "US/Arizona", "column": "value", "index": "timestamp", "dtype": "float64"}, "data": []}', # NOQA
pd.Series([], name='value', index=pd.DatetimeIndex(
[], tz='US/Arizona', name='timestamp'), dtype=float)),
('{"schema": {"version": 0, "orient": "records", "timezone": "UTC", "column": "value", "index": "timestamp", "dtype": "float64"}, "data": [], "other_stuff": {}}', # NOQA
EMPTY_TIMESERIES),
('{"schema": {"version": 0, "orient": "records", "timezone": "UTC", "column": "alue", "index": "timestamp", "dtype": "float64"}, "more": [], "data": []}', # NOQA
pd.Series([], name='alue', index=pd.DatetimeIndex(
[], tz='UTC', name='timestamp'), dtype=float)),
('{"schema": {"version": 0, "orient": "records", "timezone": "UTC", "column": "value", "index": "timestamp", "dtype": "float64"}, "more": [], "data": [], "other": []}', # NOQA
EMPTY_TIMESERIES),
('{"schema": {"version": 0, "orient": "records", "timezone": "UTC", "column": "value", "index": "timestamp", "dtype": "float64"}, "data": [{"timestamp": "2019-01-01T00:00:00Z", "value": 1.0}], "other_stuff": {}}', # NOQA
pd.Series([1.0], index=pd.DatetimeIndex(["2019-01-01T00:00:00"],
tz='UTC', name='timestamp'),
name='value')),
('{"schema": {"version": 0, "orient": "records", "timezone": "UTC", "column": "value", "index": "timestamp", "dtype": "float64"}, "data": [{"timestamp": "2019-01-01T00:00:00", "value": 1.0}], "other_stuff": {}}', # NOQA
pd.Series([1.0], index=pd.DatetimeIndex(["2019-01-01T00:00:00"],
tz='UTC', name='timestamp'),
name='value')),
('{"schema": {"version": 0, "orient": "records", "timezone": "Etc/GMT+8", "column": "value", "index": "timestamp", "dtype": "float64"}, "data": [{"timestamp": "2019-01-01T00:00:00", "value": 1.0}], "other_stuff": {}}', # NOQA
pd.Series([1.0], index=pd.DatetimeIndex(["2019-01-01T00:00:00"],
tz='Etc/GMT+8', name='timestamp'),
name='value')),
pytest.param(
'{"data": [], "other_stuff": {}}',
EMPTY_SERIES,
marks=[pytest.mark.xfail(strict=True, type=ValueError)]),
pytest.param(
'{"schema": {"version": 0, "orient": "records", "timezone": "UTC", "column": "value", "index": "timestamp", "dtype": "float64"}, "other_stuff": {}}', # NOQA
EMPTY_SERIES,
marks=[pytest.mark.xfail(strict=True, type=ValueError)]),
pytest.param(
'{"schema": {"version": 0, "timezone": "UTC", "column": "value", "index": "timestamp", "dtype": "float64"}, "data": []}', # NOQA
EMPTY_SERIES,
marks=[pytest.mark.xfail(strict=True, type=KeyError)]),
('{"schema": {"version": 1, "objtype": "Series", "orient": "records", "timezone": "UTC", "column": "value", "index": "timestamp", "dtype": "float64"}, "data": []}', # NOQA
EMPTY_TIMESERIES)
])
def test_deserialize_timeseries(inp, exp):
out = utils.deserialize_timeseries(inp)
pdt.assert_series_equal(out, exp)
@pytest.mark.parametrize('inp,exp', [
('{"schema":{"version": 1, "orient": "records", "timezone": "UTC", "column": [], "index": "timestamp", "dtype": [], "objtype": "DataFrame"},"data":[]}', # NOQA
EMPTY_TIME_DATAFRAME),
('{"schema": {"version": 1, "objtype": "DataFrame", "orient": "records", "timezone": "UTC", "column": [], "index": "timestamp", "dtype": ["float64"]}, "data": []}', # NOQA
EMPTY_TIME_DATAFRAME),
('{"schema": {"version": 1, "objtype": "DataFrame", "orient": "records", "timezone": "UTC", "column": ["25.0"], "index": "timestamp", "dtype": ["float64"]}, "data": [{"timestamp": "2019-01-01T00:00:00", "25.0": 1.0}], "other_stuff": {}}', # NOQA
pd.DataFrame({'25.0': 1.0}, index=pd.DatetimeIndex(
["2019-01-01T00:00:00"], tz='UTC', name='timestamp'))),
('{"schema": {"version": 1, "objtype": "DataFrame", "orient": "records", "timezone": "Etc/GMT+8", "column": ["25.0"], "index": "timestamp", "dtype": ["float64"]}, "data": [{"timestamp": "2019-01-01T00:00:00", "25.0": 1.0}], "other_stuff": {}}', # NOQA
pd.DataFrame({'25.0': 1.0}, index=pd.DatetimeIndex(
["2019-01-01T00:00:00"], tz='Etc/GMT+8', name='timestamp'))),
])
def test_deserialize_timeseries_frame(inp, exp):
out = utils.deserialize_timeseries(inp)
pdt.assert_frame_equal(out, exp)
def test_serialize_roundtrip():
ser = utils.serialize_timeseries(TEST_DATA['value'])
out = utils.deserialize_timeseries(ser)
pdt.assert_series_equal(out, TEST_DATA['value'])
# use the conftest.py dataframe for security against refactoring
def test_serialize_roundtrip_frame(prob_forecast_values):
ser = utils.serialize_timeseries(prob_forecast_values)
out = utils.deserialize_timeseries(ser)
|
pdt.assert_frame_equal(out, prob_forecast_values)
|
pandas.testing.assert_frame_equal
|
import datetime
import hashlib
import os
import time
from warnings import (
catch_warnings,
simplefilter,
)
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
safe_close,
)
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
from pandas.io.pytables import (
HDFStore,
read_hdf,
)
pytestmark = pytest.mark.single_cpu
def test_context(setup_path):
with tm.ensure_clean(setup_path) as path:
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
with tm.ensure_clean(setup_path) as path:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
def test_no_track_times(setup_path):
# GH 32682
# enables to set track_times (see `pytables` `create_table` documentation)
def checksum(filename, hash_factory=hashlib.md5, chunk_num_blocks=128):
h = hash_factory()
with open(filename, "rb") as f:
for chunk in iter(lambda: f.read(chunk_num_blocks * h.block_size), b""):
h.update(chunk)
return h.digest()
def create_h5_and_return_checksum(track_times):
with ensure_clean_path(setup_path) as path:
df = DataFrame({"a": [1]})
with HDFStore(path, mode="w") as hdf:
hdf.put(
"table",
df,
format="table",
data_columns=True,
index=None,
track_times=track_times,
)
return checksum(path)
checksum_0_tt_false = create_h5_and_return_checksum(track_times=False)
checksum_0_tt_true = create_h5_and_return_checksum(track_times=True)
# sleep is necessary to create h5 with different creation time
time.sleep(1)
checksum_1_tt_false = create_h5_and_return_checksum(track_times=False)
checksum_1_tt_true = create_h5_and_return_checksum(track_times=True)
# checksums are the same if track_time = False
assert checksum_0_tt_false == checksum_1_tt_false
# checksums are NOT same if track_time = True
assert checksum_0_tt_true != checksum_1_tt_true
def test_iter_empty(setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[df.index[3:6], ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@pytest.mark.filterwarnings("ignore:object name:tables.exceptions.NaturalNameWarning")
def test_contains(setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
msg = "'NoneType' object has no attribute 'startswith'"
with pytest.raises(Exception, match=msg):
store.select("df2")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(where, expected):
# GH10143
objs = {
"df1": DataFrame([1, 2, 3]),
"df2": DataFrame([4, 5, 6]),
"df3": DataFrame([6, 7, 8]),
"df4": DataFrame([9, 10, 11]),
"s1": Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
msg = f"'HDFStore' object has no attribute '{x}'"
with pytest.raises(AttributeError, match=msg):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, f"_{x}")
def test_store_dropna(setup_path):
df_with_missing = DataFrame(
{"col1": [0.0, np.nan, 2.0], "col2": [1.0, np.nan, np.nan]},
index=list("abc"),
)
df_without_missing = DataFrame(
{"col1": [0.0, 2.0], "col2": [1.0, np.nan]}, index=list("ac")
)
# # Test to make sure defaults are to not drop.
# # Corresponding to Issue 9382
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df", format="table")
reloaded = read_hdf(path, "df")
tm.assert_frame_equal(df_with_missing, reloaded)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df", format="table", dropna=False)
reloaded = read_hdf(path, "df")
tm.assert_frame_equal(df_with_missing, reloaded)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df", format="table", dropna=True)
reloaded = read_hdf(path, "df")
tm.assert_frame_equal(df_without_missing, reloaded)
def test_to_hdf_with_min_itemsize(setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "ss3"), concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(read_hdf(path, "ss4"), concat([df["B"], df2["B"]]))
@pytest.mark.parametrize("format", ["fixed", "table"])
def test_to_hdf_errors(format, setup_path):
data = ["\ud800foo"]
ser = Series(data, index=Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
def test_create_table_index(setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string", "string2"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
assert col("f", "string2").is_indexed is True
# specify index=columns
store.append("f2", df, index=["string"], data_columns=["string", "string2"])
assert col("f2", "index").is_indexed is False
assert col("f2", "string").is_indexed is True
assert col("f2", "string2").is_indexed is False
# try to index a non-table
_maybe_remove(store, "f2")
store.put("f2", df)
msg = "cannot create table index on a Fixed format store"
with pytest.raises(TypeError, match=msg):
store.create_table_index("f2")
def test_create_table_index_data_columns_argument(setup_path):
# GH 28156
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
msg = "'Cols' object has no attribute 'string2'"
with pytest.raises(AttributeError, match=msg):
col("f", "string2").is_indexed
# try to index a col which isn't a data_column
msg = (
"column string2 is not a data_column.\n"
"In order to read column string2 you must reload the dataframe \n"
"into HDFStore and include string2 with the data_columns argument."
)
with pytest.raises(AttributeError, match=msg):
store.create_table_index("f", columns=["string2"])
def test_mi_data_columns(setup_path):
# GH 14435
idx = MultiIndex.from_arrays(
[date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
)
df = DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=True)
actual = store.select("df", where="id == 1")
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_table_mixed_dtypes(setup_path):
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[df.index[3:6], ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
store.append("df1_mixed", df)
tm.assert_frame_equal(store.select("df1_mixed"), df)
def test_calendar_roundtrip_issue(setup_path):
# 8591
# doc example from tseries holiday section
weekmask_egypt = "Sun Mon Tue Wed Thu"
holidays = [
"2012-05-01",
datetime.datetime(2013, 5, 1),
np.datetime64("2014-05-01"),
]
bday_egypt = pd.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt
)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = Series(dts.weekday, dts).map(Series("Mon Tue Wed Thu Fri Sat Sun".split()))
with ensure_clean_store(setup_path) as store:
store.put("fixed", s)
result = store.select("fixed")
tm.assert_series_equal(result, s)
store.append("table", s)
result = store.select("table")
tm.assert_series_equal(result, s)
def test_remove(setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store["a"] = ts
store["b"] = df
_maybe_remove(store, "a")
assert len(store) == 1
tm.assert_frame_equal(df, store["b"])
_maybe_remove(store, "b")
assert len(store) == 0
# nonexistence
with pytest.raises(
KeyError, match="'No object named a_nonexistent_store in the file'"
):
store.remove("a_nonexistent_store")
# pathing
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "foo")
|
_maybe_remove(store, "b/foo")
|
pandas.tests.io.pytables.common._maybe_remove
|
# import numpy as np
import pandas as pd
class PandasUtil():
def __init__(self, datetime_format=None):
self.datetime_format = datetime_format
def fix_string(self, series):
return series.astype(str)
def fix_bool(self, series):
return series.astype(bool)
def fix_float(self, series):
return
|
pd.to_numeric(series)
|
pandas.to_numeric
|
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.generic import ABCIndexClass
import pandas as pd
import pandas._testing as tm
from pandas.api.types import is_float, is_float_dtype, is_integer, is_scalar
from pandas.core.arrays import IntegerArray, integer_array
from pandas.core.arrays.integer import (
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
)
from pandas.tests.extension.base import BaseOpsUtil
def make_data():
return list(range(8)) + [np.nan] + list(range(10, 98)) + [np.nan] + [99, 100]
@pytest.fixture(
params=[
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
]
)
def dtype(request):
return request.param()
@pytest.fixture
def data(dtype):
return integer_array(make_data(), dtype=dtype)
@pytest.fixture
def data_missing(dtype):
return integer_array([np.nan, 1], dtype=dtype)
@pytest.fixture(params=["data", "data_missing"])
def all_data(request, data, data_missing):
"""Parametrized fixture giving 'data' and 'data_missing'"""
if request.param == "data":
return data
elif request.param == "data_missing":
return data_missing
def test_dtypes(dtype):
# smoke tests on auto dtype construction
if dtype.is_signed_integer:
assert np.dtype(dtype.type).kind == "i"
else:
assert np.dtype(dtype.type).kind == "u"
assert dtype.name is not None
@pytest.mark.parametrize(
"dtype, expected",
[
(Int8Dtype(), "Int8Dtype()"),
(Int16Dtype(), "Int16Dtype()"),
(Int32Dtype(), "Int32Dtype()"),
(Int64Dtype(), "Int64Dtype()"),
(UInt8Dtype(), "UInt8Dtype()"),
(UInt16Dtype(), "UInt16Dtype()"),
(UInt32Dtype(), "UInt32Dtype()"),
(UInt64Dtype(), "UInt64Dtype()"),
],
)
def test_repr_dtype(dtype, expected):
assert repr(dtype) == expected
def test_repr_array():
result = repr(integer_array([1, None, 3]))
expected = "<IntegerArray>\n[1, <NA>, 3]\nLength: 3, dtype: Int64"
assert result == expected
def test_repr_array_long():
data = integer_array([1, 2, None] * 1000)
expected = (
"<IntegerArray>\n"
"[ 1, 2, <NA>, 1, 2, <NA>, 1, 2, <NA>, 1,\n"
" ...\n"
" <NA>, 1, 2, <NA>, 1, 2, <NA>, 1, 2, <NA>]\n"
"Length: 3000, dtype: Int64"
)
result = repr(data)
assert result == expected
class TestConstructors:
def test_uses_pandas_na(self):
a = pd.array([1, None], dtype=pd.Int64Dtype())
assert a[1] is pd.NA
def test_from_dtype_from_float(self, data):
# construct from our dtype & string dtype
dtype = data.dtype
# from float
expected = pd.Series(data)
result = pd.Series(
data.to_numpy(na_value=np.nan, dtype="float"), dtype=str(dtype)
)
tm.assert_series_equal(result, expected)
# from int / list
expected = pd.Series(data)
result = pd.Series(np.array(data).tolist(), dtype=str(dtype))
tm.assert_series_equal(result, expected)
# from int / array
expected = pd.Series(data).dropna().reset_index(drop=True)
dropped = np.array(data.dropna()).astype(np.dtype((dtype.type)))
result = pd.Series(dropped, dtype=str(dtype))
tm.assert_series_equal(result, expected)
class TestArithmeticOps(BaseOpsUtil):
def _check_divmod_op(self, s, op, other, exc=None):
super()._check_divmod_op(s, op, other, None)
def _check_op(self, s, op_name, other, exc=None):
op = self.get_op_from_name(op_name)
result = op(s, other)
# compute expected
mask = s.isna()
# if s is a DataFrame, squeeze to a Series
# for comparison
if isinstance(s, pd.DataFrame):
result = result.squeeze()
s = s.squeeze()
mask = mask.squeeze()
# other array is an Integer
if isinstance(other, IntegerArray):
omask = getattr(other, "mask", None)
mask = getattr(other, "data", other)
if omask is not None:
mask |= omask
# 1 ** na is na, so need to unmask those
if op_name == "__pow__":
mask = np.where(~s.isna() & (s == 1), False, mask)
elif op_name == "__rpow__":
other_is_one = other == 1
if isinstance(other_is_one, pd.Series):
other_is_one = other_is_one.fillna(False)
mask = np.where(other_is_one, False, mask)
# float result type or float op
if (
is_float_dtype(other)
or is_float(other)
or op_name in ["__rtruediv__", "__truediv__", "__rdiv__", "__div__"]
):
rs = s.astype("float")
expected = op(rs, other)
self._check_op_float(result, expected, mask, s, op_name, other)
# integer result type
else:
rs = pd.Series(s.values._data, name=s.name)
expected = op(rs, other)
self._check_op_integer(result, expected, mask, s, op_name, other)
def _check_op_float(self, result, expected, mask, s, op_name, other):
# check comparisons that are resulting in float dtypes
expected[mask] = np.nan
if "floordiv" in op_name:
# Series op sets 1//0 to np.inf, which IntegerArray does not do (yet)
mask2 = np.isinf(expected) & np.isnan(result)
expected[mask2] = np.nan
tm.assert_series_equal(result, expected)
def _check_op_integer(self, result, expected, mask, s, op_name, other):
# check comparisons that are resulting in integer dtypes
# to compare properly, we convert the expected
# to float, mask to nans and convert infs
# if we have uints then we process as uints
# then convert to float
# and we ultimately want to create a IntArray
# for comparisons
fill_value = 0
# mod/rmod turn floating 0 into NaN while
# integer works as expected (no nan)
if op_name in ["__mod__", "__rmod__"]:
if is_scalar(other):
if other == 0:
expected[s.values == 0] = 0
else:
expected = expected.fillna(0)
else:
expected[
(s.values == 0).fillna(False)
& ((expected == 0).fillna(False) | expected.isna())
] = 0
try:
expected[
((expected == np.inf) | (expected == -np.inf)).fillna(False)
] = fill_value
original = expected
expected = expected.astype(s.dtype)
except ValueError:
expected = expected.astype(float)
expected[
((expected == np.inf) | (expected == -np.inf)).fillna(False)
] = fill_value
original = expected
expected = expected.astype(s.dtype)
expected[mask] = pd.NA
# assert that the expected astype is ok
# (skip for unsigned as they have wrap around)
if not s.dtype.is_unsigned_integer:
original = pd.Series(original)
# we need to fill with 0's to emulate what an astype('int') does
# (truncation) for certain ops
if op_name in ["__rtruediv__", "__rdiv__"]:
mask |= original.isna()
original = original.fillna(0).astype("int")
original = original.astype("float")
original[mask] = np.nan
tm.assert_series_equal(original, expected.astype("float"))
# assert our expected result
tm.assert_series_equal(result, expected)
def test_arith_integer_array(self, data, all_arithmetic_operators):
# we operate with a rhs of an integer array
op = all_arithmetic_operators
s = pd.Series(data)
rhs = pd.Series([1] * len(data), dtype=data.dtype)
rhs.iloc[-1] = np.nan
self._check_op(s, op, rhs)
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
# scalar
op = all_arithmetic_operators
s = pd.Series(data)
self._check_op(s, op, 1, exc=TypeError)
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators):
# frame & scalar
op = all_arithmetic_operators
df = pd.DataFrame({"A": data})
self._check_op(df, op, 1, exc=TypeError)
def test_arith_series_with_array(self, data, all_arithmetic_operators):
# ndarray & other series
op = all_arithmetic_operators
s = pd.Series(data)
other = np.ones(len(s), dtype=s.dtype.type)
self._check_op(s, op, other, exc=TypeError)
def test_arith_coerce_scalar(self, data, all_arithmetic_operators):
op = all_arithmetic_operators
s = pd.Series(data)
other = 0.01
self._check_op(s, op, other)
@pytest.mark.parametrize("other", [1.0, np.array(1.0)])
def test_arithmetic_conversion(self, all_arithmetic_operators, other):
# if we have a float operand we should have a float result
# if that is equal to an integer
op = self.get_op_from_name(all_arithmetic_operators)
s = pd.Series([1, 2, 3], dtype="Int64")
result = op(s, other)
assert result.dtype is np.dtype("float")
def test_arith_len_mismatch(self, all_arithmetic_operators):
# operating with a list-like with non-matching length raises
op = self.get_op_from_name(all_arithmetic_operators)
other = np.array([1.0])
s = pd.Series([1, 2, 3], dtype="Int64")
with pytest.raises(ValueError, match="Lengths must match"):
op(s, other)
@pytest.mark.parametrize("other", [0, 0.5])
def test_arith_zero_dim_ndarray(self, other):
arr = integer_array([1, None, 2])
result = arr + np.array(other)
expected = arr + other
tm.assert_equal(result, expected)
def test_error(self, data, all_arithmetic_operators):
# invalid ops
op = all_arithmetic_operators
s = pd.Series(data)
ops = getattr(s, op)
opa = getattr(data, op)
# invalid scalars
msg = (
r"(:?can only perform ops with numeric values)"
r"|(:?IntegerArray cannot perform the operation mod)"
)
with pytest.raises(TypeError, match=msg):
ops("foo")
with pytest.raises(TypeError, match=msg):
ops(pd.Timestamp("20180101"))
# invalid array-likes
with pytest.raises(TypeError, match=msg):
ops(pd.Series("foo", index=s.index))
if op != "__rpow__":
# TODO(extension)
# rpow with a datetimelike coerces the integer array incorrectly
msg = (
"can only perform ops with numeric values|"
"cannot perform .* with this index type: DatetimeArray|"
"Addition/subtraction of integers and integer-arrays "
"with DatetimeArray is no longer supported. *"
)
with pytest.raises(TypeError, match=msg):
ops(pd.Series(pd.date_range("20180101", periods=len(s))))
# 2d
result = opa(pd.DataFrame({"A": s}))
assert result is NotImplemented
msg = r"can only perform ops with 1-d structures"
with pytest.raises(NotImplementedError, match=msg):
opa(np.arange(len(s)).reshape(-1, len(s)))
@pytest.mark.parametrize("zero, negative", [(0, False), (0.0, False), (-0.0, True)])
def test_divide_by_zero(self, zero, negative):
# https://github.com/pandas-dev/pandas/issues/27398
a = pd.array([0, 1, -1, None], dtype="Int64")
result = a / zero
expected = np.array([np.nan, np.inf, -np.inf, np.nan])
if negative:
expected *= -1
tm.assert_numpy_array_equal(result, expected)
def test_pow_scalar(self):
a = pd.array([-1, 0, 1, None, 2], dtype="Int64")
result = a ** 0
expected = pd.array([1, 1, 1, 1, 1], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** 1
expected = pd.array([-1, 0, 1, None, 2], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** pd.NA
expected = pd.array([None, None, 1, None, None], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** np.nan
expected = np.array([np.nan, np.nan, 1, np.nan, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
# reversed
a = a[1:] # Can't raise integers to negative powers.
result = 0 ** a
expected = pd.array([1, 0, None, 0], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = 1 ** a
expected = pd.array([1, 1, 1, 1], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = pd.NA ** a
expected = pd.array([1, None, None, None], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = np.nan ** a
expected = np.array([1, np.nan, np.nan, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
def test_pow_array(self):
a = integer_array([0, 0, 0, 1, 1, 1, None, None, None])
b = integer_array([0, 1, None, 0, 1, None, 0, 1, None])
result = a ** b
expected = integer_array([1, 0, None, 1, 1, 1, 1, None, None])
tm.assert_extension_array_equal(result, expected)
def test_rpow_one_to_na(self):
# https://github.com/pandas-dev/pandas/issues/22022
# https://github.com/pandas-dev/pandas/issues/29997
arr = integer_array([np.nan, np.nan])
result = np.array([1.0, 2.0]) ** arr
expected = np.array([1.0, np.nan])
tm.assert_numpy_array_equal(result, expected)
class TestComparisonOps(BaseOpsUtil):
def _compare_other(self, data, op_name, other):
op = self.get_op_from_name(op_name)
# array
result = pd.Series(op(data, other))
expected = pd.Series(op(data._data, other), dtype="boolean")
# fill the nan locations
expected[data._mask] = pd.NA
tm.assert_series_equal(result, expected)
# series
s = pd.Series(data)
result = op(s, other)
expected = op(pd.Series(data._data), other)
# fill the nan locations
expected[data._mask] = pd.NA
expected = expected.astype("boolean")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("other", [True, False, pd.NA, -1, 0, 1])
def test_scalar(self, other, all_compare_operators):
op = self.get_op_from_name(all_compare_operators)
a = pd.array([1, 0, None], dtype="Int64")
result = op(a, other)
if other is pd.NA:
expected = pd.array([None, None, None], dtype="boolean")
else:
values = op(a._data, other)
expected = pd.arrays.BooleanArray(values, a._mask, copy=True)
tm.assert_extension_array_equal(result, expected)
# ensure we haven't mutated anything inplace
result[0] = pd.NA
tm.assert_extension_array_equal(a, pd.array([1, 0, None], dtype="Int64"))
def test_array(self, all_compare_operators):
op = self.get_op_from_name(all_compare_operators)
a = pd.array([0, 1, 2, None, None, None], dtype="Int64")
b = pd.array([0, 1, None, 0, 1, None], dtype="Int64")
result = op(a, b)
values = op(a._data, b._data)
mask = a._mask | b._mask
expected = pd.arrays.BooleanArray(values, mask)
tm.assert_extension_array_equal(result, expected)
# ensure we haven't mutated anything inplace
result[0] = pd.NA
tm.assert_extension_array_equal(
a, pd.array([0, 1, 2, None, None, None], dtype="Int64")
)
tm.assert_extension_array_equal(
b, pd.array([0, 1, None, 0, 1, None], dtype="Int64")
)
def test_compare_with_booleanarray(self, all_compare_operators):
op = self.get_op_from_name(all_compare_operators)
a = pd.array([True, False, None] * 3, dtype="boolean")
b = pd.array([0] * 3 + [1] * 3 + [None] * 3, dtype="Int64")
other = pd.array([False] * 3 + [True] * 3 + [None] * 3, dtype="boolean")
expected = op(a, other)
result = op(a, b)
tm.assert_extension_array_equal(result, expected)
def test_no_shared_mask(self, data):
result = data + 1
assert np.shares_memory(result._mask, data._mask) is False
def test_compare_to_string(self, any_nullable_int_dtype):
# GH 28930
s = pd.Series([1, None], dtype=any_nullable_int_dtype)
result = s == "a"
expected = pd.Series([False, pd.NA], dtype="boolean")
self.assert_series_equal(result, expected)
def test_compare_to_int(self, any_nullable_int_dtype, all_compare_operators):
# GH 28930
s1 = pd.Series([1, None, 3], dtype=any_nullable_int_dtype)
s2 = pd.Series([1, None, 3], dtype="float")
method = getattr(s1, all_compare_operators)
result = method(2)
method = getattr(s2, all_compare_operators)
expected = method(2).astype("boolean")
expected[s2.isna()] = pd.NA
self.assert_series_equal(result, expected)
class TestCasting:
@pytest.mark.parametrize("dropna", [True, False])
def test_construct_index(self, all_data, dropna):
# ensure that we do not coerce to Float64Index, rather
# keep as Index
all_data = all_data[:10]
if dropna:
other = np.array(all_data[~all_data.isna()])
else:
other = all_data
result = pd.Index(integer_array(other, dtype=all_data.dtype))
expected = pd.Index(other, dtype=object)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("dropna", [True, False])
def test_astype_index(self, all_data, dropna):
# as an int/uint index to Index
all_data = all_data[:10]
if dropna:
other = all_data[~all_data.isna()]
else:
other = all_data
dtype = all_data.dtype
idx = pd.Index(np.array(other))
assert isinstance(idx, ABCIndexClass)
result = idx.astype(dtype)
expected = idx.astype(object).astype(dtype)
tm.assert_index_equal(result, expected)
def test_astype(self, all_data):
all_data = all_data[:10]
ints = all_data[~all_data.isna()]
mixed = all_data
dtype = Int8Dtype()
# coerce to same type - ints
s = pd.Series(ints)
result = s.astype(all_data.dtype)
expected = pd.Series(ints)
tm.assert_series_equal(result, expected)
# coerce to same other - ints
s = pd.Series(ints)
result = s.astype(dtype)
expected = pd.Series(ints, dtype=dtype)
tm.assert_series_equal(result, expected)
# coerce to same numpy_dtype - ints
s = pd.Series(ints)
result = s.astype(all_data.dtype.numpy_dtype)
expected = pd.Series(ints._data.astype(all_data.dtype.numpy_dtype))
tm.assert_series_equal(result, expected)
# coerce to same type - mixed
s = pd.Series(mixed)
result = s.astype(all_data.dtype)
expected = pd.Series(mixed)
tm.assert_series_equal(result, expected)
# coerce to same other - mixed
s = pd.Series(mixed)
result = s.astype(dtype)
expected = pd.Series(mixed, dtype=dtype)
tm.assert_series_equal(result, expected)
# coerce to same numpy_dtype - mixed
s = pd.Series(mixed)
msg = r"cannot convert to .*-dtype NumPy array with missing values.*"
with pytest.raises(ValueError, match=msg):
s.astype(all_data.dtype.numpy_dtype)
# coerce to object
s = pd.Series(mixed)
result = s.astype("object")
expected = pd.Series(np.asarray(mixed))
tm.assert_series_equal(result, expected)
def test_astype_to_larger_numpy(self):
a = pd.array([1, 2], dtype="Int32")
result = a.astype("int64")
expected = np.array([1, 2], dtype="int64")
tm.assert_numpy_array_equal(result, expected)
a = pd.array([1, 2], dtype="UInt32")
result = a.astype("uint64")
expected = np.array([1, 2], dtype="uint64")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [Int8Dtype(), "Int8", UInt32Dtype(), "UInt32"])
def test_astype_specific_casting(self, dtype):
s = pd.Series([1, 2, 3], dtype="Int64")
result = s.astype(dtype)
expected = pd.Series([1, 2, 3], dtype=dtype)
tm.assert_series_equal(result, expected)
s = pd.Series([1, 2, 3, None], dtype="Int64")
result = s.astype(dtype)
expected = pd.Series([1, 2, 3, None], dtype=dtype)
tm.assert_series_equal(result, expected)
def test_construct_cast_invalid(self, dtype):
msg = "cannot safely"
arr = [1.2, 2.3, 3.7]
with pytest.raises(TypeError, match=msg):
integer_array(arr, dtype=dtype)
with pytest.raises(TypeError, match=msg):
pd.Series(arr).astype(dtype)
arr = [1.2, 2.3, 3.7, np.nan]
with pytest.raises(TypeError, match=msg):
integer_array(arr, dtype=dtype)
with pytest.raises(TypeError, match=msg):
pd.Series(arr).astype(dtype)
@pytest.mark.parametrize("in_series", [True, False])
def test_to_numpy_na_nan(self, in_series):
a = pd.array([0, 1, None], dtype="Int64")
if in_series:
a = pd.Series(a)
result = a.to_numpy(dtype="float64", na_value=np.nan)
expected = np.array([0.0, 1.0, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
result = a.to_numpy(dtype="int64", na_value=-1)
expected = np.array([0, 1, -1], dtype="int64")
tm.assert_numpy_array_equal(result, expected)
result = a.to_numpy(dtype="bool", na_value=False)
expected = np.array([False, True, False], dtype="bool")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("in_series", [True, False])
@pytest.mark.parametrize("dtype", ["int32", "int64", "bool"])
def test_to_numpy_dtype(self, dtype, in_series):
a = pd.array([0, 1], dtype="Int64")
if in_series:
a = pd.Series(a)
result = a.to_numpy(dtype=dtype)
expected = np.array([0, 1], dtype=dtype)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", ["float64", "int64", "bool"])
def test_to_numpy_na_raises(self, dtype):
a = pd.array([0, 1, None], dtype="Int64")
with pytest.raises(ValueError, match=dtype):
a.to_numpy(dtype=dtype)
def test_astype_str(self):
a = pd.array([1, 2, None], dtype="Int64")
expected = np.array(["1", "2", "<NA>"], dtype=object)
tm.assert_numpy_array_equal(a.astype(str), expected)
tm.assert_numpy_array_equal(a.astype("str"), expected)
def test_astype_boolean(self):
# https://github.com/pandas-dev/pandas/issues/31102
a = pd.array([1, 0, -1, 2, None], dtype="Int64")
result = a.astype("boolean")
expected = pd.array([True, False, True, True, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_frame_repr(data_missing):
df = pd.DataFrame({"A": data_missing})
result = repr(df)
expected = " A\n0 <NA>\n1 1"
assert result == expected
def test_conversions(data_missing):
# astype to object series
df = pd.DataFrame({"A": data_missing})
result = df["A"].astype("object")
expected = pd.Series(np.array([np.nan, 1], dtype=object), name="A")
tm.assert_series_equal(result, expected)
# convert to object ndarray
# we assert that we are exactly equal
# including type conversions of scalars
result = df["A"].astype("object").values
expected = np.array([pd.NA, 1], dtype=object)
tm.assert_numpy_array_equal(result, expected)
for r, e in zip(result, expected):
if pd.isnull(r):
assert pd.isnull(e)
elif is_integer(r):
assert r == e
assert is_integer(e)
else:
assert r == e
assert type(r) == type(e)
def test_integer_array_constructor():
values = np.array([1, 2, 3, 4], dtype="int64")
mask = np.array([False, False, False, True], dtype="bool")
result = IntegerArray(values, mask)
expected = integer_array([1, 2, 3, np.nan], dtype="int64")
tm.assert_extension_array_equal(result, expected)
msg = r".* should be .* numpy array. Use the 'integer_array' function instead"
with pytest.raises(TypeError, match=msg):
IntegerArray(values.tolist(), mask)
with pytest.raises(TypeError, match=msg):
IntegerArray(values, mask.tolist())
with pytest.raises(TypeError, match=msg):
IntegerArray(values.astype(float), mask)
msg = r"__init__\(\) missing 1 required positional argument: 'mask'"
with pytest.raises(TypeError, match=msg):
IntegerArray(values)
@pytest.mark.parametrize(
"a, b",
[
([1, None], [1, np.nan]),
([None], [np.nan]),
([None, np.nan], [np.nan, np.nan]),
([np.nan, np.nan], [np.nan, np.nan]),
],
)
def test_integer_array_constructor_none_is_nan(a, b):
result = integer_array(a)
expected = integer_array(b)
tm.assert_extension_array_equal(result, expected)
def test_integer_array_constructor_copy():
values = np.array([1, 2, 3, 4], dtype="int64")
mask = np.array([False, False, False, True], dtype="bool")
result = IntegerArray(values, mask)
assert result._data is values
assert result._mask is mask
result = IntegerArray(values, mask, copy=True)
assert result._data is not values
assert result._mask is not mask
@pytest.mark.parametrize(
"values",
[
["foo", "bar"],
["1", "2"],
"foo",
1,
1.0,
pd.date_range("20130101", periods=2),
np.array(["foo"]),
[[1, 2], [3, 4]],
[np.nan, {"a": 1}],
],
)
def test_to_integer_array_error(values):
# error in converting existing arrays to IntegerArrays
msg = (
r"(:?.* cannot be converted to an IntegerDtype)"
r"|(:?values must be a 1D list-like)"
)
with pytest.raises(TypeError, match=msg):
integer_array(values)
def test_to_integer_array_inferred_dtype():
# if values has dtype -> respect it
result = integer_array(np.array([1, 2], dtype="int8"))
assert result.dtype == Int8Dtype()
result = integer_array(np.array([1, 2], dtype="int32"))
assert result.dtype == Int32Dtype()
# if values have no dtype -> always int64
result = integer_array([1, 2])
assert result.dtype == Int64Dtype()
def test_to_integer_array_dtype_keyword():
result = integer_array([1, 2], dtype="int8")
assert result.dtype == Int8Dtype()
# if values has dtype -> override it
result = integer_array(np.array([1, 2], dtype="int8"), dtype="int32")
assert result.dtype == Int32Dtype()
def test_to_integer_array_float():
result = integer_array([1.0, 2.0])
expected = integer_array([1, 2])
tm.assert_extension_array_equal(result, expected)
with pytest.raises(TypeError, match="cannot safely cast non-equivalent"):
integer_array([1.5, 2.0])
# for float dtypes, the itemsize is not preserved
result = integer_array(np.array([1.0, 2.0], dtype="float32"))
assert result.dtype == Int64Dtype()
@pytest.mark.parametrize(
"bool_values, int_values, target_dtype, expected_dtype",
[
([False, True], [0, 1], Int64Dtype(), Int64Dtype()),
([False, True], [0, 1], "Int64", Int64Dtype()),
([False, True, np.nan], [0, 1, np.nan], Int64Dtype(), Int64Dtype()),
],
)
def test_to_integer_array_bool(bool_values, int_values, target_dtype, expected_dtype):
result = integer_array(bool_values, dtype=target_dtype)
assert result.dtype == expected_dtype
expected = integer_array(int_values, dtype=target_dtype)
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize(
"values, to_dtype, result_dtype",
[
(np.array([1], dtype="int64"), None, Int64Dtype),
(np.array([1, np.nan]), None, Int64Dtype),
(np.array([1, np.nan]), "int8", Int8Dtype),
],
)
def test_to_integer_array(values, to_dtype, result_dtype):
# convert existing arrays to IntegerArrays
result = integer_array(values, dtype=to_dtype)
assert result.dtype == result_dtype()
expected = integer_array(values, dtype=result_dtype())
tm.assert_extension_array_equal(result, expected)
def test_cross_type_arithmetic():
df = pd.DataFrame(
{
"A": pd.Series([1, 2, np.nan], dtype="Int64"),
"B": pd.Series([1, np.nan, 3], dtype="UInt8"),
"C": [1, 2, 3],
}
)
result = df.A + df.C
expected = pd.Series([2, 4, np.nan], dtype="Int64")
tm.assert_series_equal(result, expected)
result = (df.A + df.C) * 3 == 12
expected = pd.Series([False, True, None], dtype="boolean")
tm.assert_series_equal(result, expected)
result = df.A + df.B
expected = pd.Series([2, np.nan, np.nan], dtype="Int64")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("op", ["sum", "min", "max", "prod"])
def test_preserve_dtypes(op):
# TODO(#22346): preserve Int64 dtype
# for ops that enable (mean would actually work here
# but generally it is a float return value)
df = pd.DataFrame(
{
"A": ["a", "b", "b"],
"B": [1, None, 3],
"C": integer_array([1, None, 3], dtype="Int64"),
}
)
# op
result = getattr(df.C, op)()
assert isinstance(result, int)
# groupby
result = getattr(df.groupby("A"), op)()
expected = pd.DataFrame(
{"B": np.array([1.0, 3.0]), "C": integer_array([1, 3], dtype="Int64")},
index=pd.Index(["a", "b"], name="A"),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("op", ["mean"])
def test_reduce_to_float(op):
# some reduce ops always return float, even if the result
# is a rounded number
df = pd.DataFrame(
{
"A": ["a", "b", "b"],
"B": [1, None, 3],
"C": integer_array([1, None, 3], dtype="Int64"),
}
)
# op
result = getattr(df.C, op)()
assert isinstance(result, float)
# groupby
result = getattr(df.groupby("A"), op)()
expected = pd.DataFrame(
{"B": np.array([1.0, 3.0]), "C": integer_array([1, 3], dtype="Int64")},
index=pd.Index(["a", "b"], name="A"),
)
tm.assert_frame_equal(result, expected)
def test_astype_nansafe():
# see gh-22343
arr = integer_array([np.nan, 1, 2], dtype="Int8")
msg = "cannot convert to 'uint32'-dtype NumPy array with missing values."
with pytest.raises(ValueError, match=msg):
arr.astype("uint32")
@pytest.mark.parametrize("ufunc", [np.abs, np.sign])
# np.sign emits a warning with nans, <https://github.com/numpy/numpy/issues/15127>
@pytest.mark.filterwarnings("ignore:invalid value encountered in sign")
def test_ufuncs_single_int(ufunc):
a = integer_array([1, 2, -3, np.nan])
result = ufunc(a)
expected = integer_array(ufunc(a.astype(float)))
tm.assert_extension_array_equal(result, expected)
s = pd.Series(a)
result = ufunc(s)
expected = pd.Series(integer_array(ufunc(a.astype(float))))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("ufunc", [np.log, np.exp, np.sin, np.cos, np.sqrt])
def test_ufuncs_single_float(ufunc):
a = integer_array([1, 2, -3, np.nan])
with np.errstate(invalid="ignore"):
result = ufunc(a)
expected = ufunc(a.astype(float))
tm.assert_numpy_array_equal(result, expected)
s = pd.Series(a)
with np.errstate(invalid="ignore"):
result = ufunc(s)
expected = ufunc(s.astype(float))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("ufunc", [np.add, np.subtract])
def test_ufuncs_binary_int(ufunc):
# two IntegerArrays
a = integer_array([1, 2, -3, np.nan])
result = ufunc(a, a)
expected = integer_array(ufunc(a.astype(float), a.astype(float)))
tm.assert_extension_array_equal(result, expected)
# IntegerArray with numpy array
arr = np.array([1, 2, 3, 4])
result = ufunc(a, arr)
expected = integer_array(ufunc(a.astype(float), arr))
tm.assert_extension_array_equal(result, expected)
result = ufunc(arr, a)
expected = integer_array(ufunc(arr, a.astype(float)))
tm.assert_extension_array_equal(result, expected)
# IntegerArray with scalar
result = ufunc(a, 1)
expected = integer_array(ufunc(a.astype(float), 1))
tm.assert_extension_array_equal(result, expected)
result = ufunc(1, a)
expected = integer_array(ufunc(1, a.astype(float)))
|
tm.assert_extension_array_equal(result, expected)
|
pandas._testing.assert_extension_array_equal
|
# -*- coding: utf-8 -*-
'''
Created on Jul 28 2019
Last large update on Jun 22 2020
@author: <NAME>
@supervisor: <NAME>
It's (will be) a Python3.6 or higher program that perform massive search and classification of variable stars into VVV data.
'''
import os
import sys
import math
import numpy as np
import pandas as pd
from astropy.io import ascii
from astropy.table import Table
from astropy.stats import sigma_clip
from scipy.optimize import curve_fit
from scipy import optimize, signal
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
#from matplotlib import rc
#rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
#rc('font',**{'family':'serif','serif':['Palatino']})
#rc('text', usetex=True)
import time
sys.path.append('/home/botan/OneDrive/Doutorado/VVV_DATA/my_modules/')
import clean_match_tables as cmt
import fit_sin_series as fitSin
import periodogram as pg
import variability_indicator as vi
import star_classificator_tools as sct
import status
class PeriodSearch(object):
def __init__(self, path, tile, minP, maxP, varIndex='chi2'):
'''
tile: tile name as b293 it deppends of your folder architecture.
minP: minimum period (float)
maxP: maximum period (float)
varIndex: uncorrelated variable index: std or chi2
'''
self.tile = tile
self.varIndex = varIndex
self.minP = minP
self.maxP = maxP
self.path = f'{path}/{tile}'
os.makedirs(f'{self.path}/figures',exist_ok=True)
os.makedirs(f'{self.path}/output',exist_ok=True)
self.chips = [fn[:-3] for fn in sorted(os.listdir(f'{self.path}/chips/')) if fn.endswith('ts')]
self.tiles = sorted(os.listdir('/home/botan/OneDrive/Doutorado/VVV_DATA/data/psf_ts/'))
def organize_tables(self):
org = cmt.CreateTable(path=self.path,
tile=self.tile,
min_sample=25,
raw_files=True)
#org.plot_chips(show=False)
def select_candidates(self):
select = vi.Variability(path=self.path,
tile=self.tile,
method=self.varIndex,
maxMag=11.5,
stdRatio=1.5,
minChi2=2,
savePlot=True)
select.do_selection()
def _read_tables(self,chip):
self.data_table = pd.read_csv(f'{self.path}/chips/{chip}.ts',index_col=0,sep=',',low_memory=False)
self.obs_time = np.loadtxt(f'{self.path}/chips/{chip}.mjd')
self.mag_cols = [col for col in self.data_table.columns if col.split('_')[0] == 'MAG']
self.err_cols = [col for col in self.data_table.columns if col.split('_')[0] == 'ERR']
self.color_cols = [col for col in self.data_table.columns if col.split('_')[0] == 'mag']
self.ks_mag = self.data_table[self.mag_cols]
self.ks_err = self.data_table[self.err_cols]
self.star_ids = self.data_table.index
if self.varIndex == 'chi2':
self.candidates = np.genfromtxt(f'{self.path}/var_data/{chip}.chi2cand', dtype=str)
if self.varIndex == 'std':
self.candidates = np.genfromtxt(f'{self.path}/var_data/{chip}.stdcand', dtype=str)
def _freq_agreement(self,f1,f2,h=4):
'''check if PDM and LSG frequencies are in agreement till 4 harmonics'''
n = 1
while n <= h:
m = 1
while m <= h:
if abs(f1/f2 - n/m) < 0.01:
bol = True
break
else:
bol = False
m+=1
if bol:
break
n+=1
return bol
def do_periodogram(self, exists=True):
j=1
for chip in self.chips:
if exists:
chips_done = [fn[:-19] for fn in os.listdir(f'{self.path}/var_data/') if fn.endswith('pdm_parameters.csv')]
else:
chips_done = []
if not chip in chips_done:
self._read_tables(chip)
lsg_pgram_params = []
pdm_pgram_params = []
i=1
for star in self.candidates:
status._print(prefix=f'Periodogram of chip {chip}',
iter1=j,
length1=len(self.chips),
iter2=i,
length2=len(self.candidates),
sufix='%')
lc = self.ks_mag.loc[star].values
err = self.ks_err.loc[star].values
t = self.obs_time
pgram = pg.Periodogram(t, lc, err, self.minP, self.maxP,
normalization='psd',
method='scargle',
samples_per_peak=10,
false=0.001,
nbins=10,
covers=3,
mode=False)
lsg_freq, lsg_power, lsg_false_alarm, lsg_best_freq, lsg_fap, lsg_sig_level, lsg_all_freq = pgram.LSG()
#lomg Scargle is much faster than PDM, so, PDM stars only if LSG identify a true Frequency
if lsg_best_freq > 0:
pdm_freq, pdm_theta, pdm_best_freq, pdm_fap, pdm_sig_level, pdm_all_freq = pgram.CyPDM()
if pdm_best_freq > 0:
# comparison with PDM period (inside 1% and harmonics until n = 4):
if self._freq_agreement(f1=lsg_best_freq, f2=pdm_best_freq, h=4):
#coords:
ra = self.data_table.loc[star]['RA']
dec = self.data_table.loc[star]['DEC']
# J and Ks aper mag for color classification
j_mag = self.data_table.loc[star]['J']
j_err = self.data_table.loc[star]['JERR']
k_mag = self.data_table.loc[star]['K']
k_err = self.data_table.loc[star]['KERR']
EJK = self.data_table.loc[star]['EJK']
EJKERR = self.data_table.loc[star]['EJKERR']
# ZYJHKs psf mag for color classification
color_cols = [col for col in self.data_table.columns if col.split('_')[0] == 'mag' or col.split('_')[0] == 'er']
color_vals = [self.data_table.loc[star][col] for col in color_cols]
# amplitude from light curve (95 - 5 percentile)
amplitude = np.nanpercentile(lc,q=95) - np.nanpercentile(lc,q=5)
lsg_params = [star, chip, ra, dec] + color_vals + [j_mag, j_err,
k_mag, k_err, EJK, EJKERR, lsg_best_freq, amplitude,
lsg_fap, lsg_sig_level]
pdm_params = [star, chip, ra, dec] + color_vals + [j_mag, j_err,
k_mag, k_err, EJK, EJKERR, pdm_best_freq, amplitude,
pdm_fap, pdm_sig_level]
lsg_pgram_params.append(lsg_params)
pdm_pgram_params.append(pdm_params)
i+=1
# save periodogram data to files
colnames = ['ID','chip','RA','DEC'] + color_cols + ['APER_J',
'APER_JERR', 'APER_K','APER_KERR','APER_EJK',
'APER_EJKERR','best_freq','amplitude','fap','sig_level']
lsg_pgram_params =
|
pd.DataFrame(lsg_pgram_params, columns=colnames)
|
pandas.DataFrame
|
import requests
from glob import glob
from bs4 import BeautifulSoup
import pandas as pd
from datetime import datetime
from time import sleep
# http://www.networkinghowtos.com/howto/common-user-agent-list/
HEADERS = ({'User-Agent':
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5'})
def search_product_list(interval_count=1, interval_hours=6):
"""
This function lods a csv file named TRACKER_PRODUCTS.csv, with headers: [url, code, buy_below]
It looks for the file under in ./trackers
It also requires a file called SEARCH_HISTORY.xslx under the folder ./search_history to start saving the results.
An empty file can be used on the first time using the script.
Both the old and the new results are then saved in a new file named SEARCH_HISTORY_{datetime}.xlsx
This is the file the script will use to get the history next time it runs.
Parameters
----------
interval_count : TYPE, optional
DESCRIPTION. The default is 1. The number of iterations you want the script to run a search on the full list.
interval_hours : TYPE, optional
DESCRIPTION. The default is 6.
Returns
-------
New .xlsx file with previous search history and results from current search
"""
prod_tracker = pd.read_csv('trackers/TRACKER_PRODUCTS.csv', sep=';')
prod_tracker_URLS = prod_tracker.url
tracker_log =
|
pd.DataFrame()
|
pandas.DataFrame
|
#!/usr/bin/env python
# coding: utf-8
"""script that generates source data csvs for searchstims experiment figures"""
from argparse import ArgumentParser
from pathlib import Path
import pandas as pd
import pyprojroot
import searchnets
def main(results_gz_root,
source_data_root,
all_csv_filename,
err_effect_csv_filename,
net_names,
methods,
modes,
alexnet_split_csv_path,
VGG16_split_csv_path,
learning_rate=1e-3,
):
"""generate .csv files used as source data for figures corresponding to experiments
carried out with stimuli generated by searchstims library
Parameters
----------
results_gz_root : str, Path
path to root of directory that has results.gz files created by `searchnets test` command
source_data_root : str, path
path to root of directory where csv files
that are the source data for figures should be saved.
all_csv_filename : str
filename for .csv saved that contains results from **all** results.gz files.
Saved in source_data_root.
err_effect_csv_filename : str
filename for .csv should be saved that contains group analysis derived from all results,
with a measure of set size effect.
Saved in source_data_root.
net_names : list
of str, neural network architecture names
methods : list
of str, training "methods". Valid values are {"transfer", "initialize"}.
modes : list
of str, training "modes". Valid values are {"classify","detect"}.
alexnet_split_csv_path : str, Path
path to .csv that contains dataset splits for "alexnet-sized" searchstim images
VGG16_split_csv_path : str, Path
path to .csv that contains dataset splits for "VGG16-sized" searchstim images
learning_rate
float, learning rate value for all experiments. Default is 1e-3.
"""
results_gz_root = Path(results_gz_root)
source_data_root = Path(source_data_root)
if not source_data_root.exists():
raise NotADirectoryError(
f'directory specified as source_data_root not found: {source_data_root}'
)
df_list = []
for net_name in net_names:
for method in methods:
if method not in METHODS:
raise ValueError(
f'invalid method: {method}, must be one of: {METHODS}'
)
for mode in modes:
results_gz_path = sorted(results_gz_root.glob(f'**/*{net_name}*{method}*gz'))
if mode == 'classify':
results_gz_path = [results_gz for results_gz in results_gz_path if 'detect' not in str(results_gz)]
elif mode == 'detect':
results_gz_path = [results_gz for results_gz in results_gz_path if 'detect' in str(results_gz)]
else:
raise ValueError(
f'invalid mode: {mode}, must be one of: {MODES}'
)
if len(results_gz_path) != 1:
raise ValueError(f'found more than one results.gz file: {results_gz_path}')
results_gz_path = results_gz_path[0]
if net_name == 'alexnet' or 'CORnet' in net_name:
csv_path = alexnet_split_csv_path
elif net_name == 'VGG16':
csv_path = VGG16_split_csv_path
else:
raise ValueError(f'no csv path defined for net_name: {net_name}')
df = searchnets.analysis.searchstims.results_gz_to_df(results_gz_path,
csv_path,
net_name,
method,
mode,
learning_rate)
df_list.append(df)
df_all = pd.concat(df_list)
df_all.to_csv(source_data_root.joinpath(all_csv_filename), index=False)
# For each "method" ('transfer' or 'initialize'),
# group by network, stimulus, and set size,
# and compute the mean accuracy for each set size.
for method in methods:
df_method = df_all[
(df_all['method'] == method) &
(df_all['target_condition'] == 'both')
]
# Make `DataFrame`
# where variable is difference of accuracies on set size 1 and set size 8.
records = []
df_method.groupby(['net_name', 'net_number', 'stimulus'])
for net_name in df_method['net_name'].unique():
df_net = df_method[df_method['net_name'] == net_name]
for net_number in df_net['net_number'].unique():
df_net_number = df_net[df_net['net_number'] == net_number]
for stimulus in df_net_number['stimulus'].unique():
df_stimulus = df_net_number[df_net_number['stimulus'] == stimulus]
set_size_1_acc = df_stimulus[df_stimulus['set_size'] == 1]['accuracy'].values.item() * 100
set_size_8_acc = df_stimulus[df_stimulus['set_size'] == 8]['accuracy'].values.item() * 100
set_size_1_err = (100. - set_size_1_acc)
set_size_effect = set_size_1_acc - set_size_8_acc
records.append(
{
'net_name': net_name,
'net_number': net_number,
'stimulus': stimulus,
'set_size_1_acc': set_size_1_acc,
'set_size_8_acc': set_size_8_acc,
'set_size_1_err': set_size_1_err,
'set_size_effect': set_size_effect,
'set_size_1_err_plus_effect': set_size_1_err + set_size_effect
}
)
df_acc_diff =
|
pd.DataFrame.from_records(records)
|
pandas.DataFrame.from_records
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 12 17:13:29 2018
@author: pamelaanderson
"""
from difflib import SequenceMatcher
import json
import numpy as np
import os
import operator
import pandas as pd
def load_adverse_events(path, year, q):
""" Loading adverse drug events while performing basic pre-processing"""
path_w_year = path + year + '/' + q + '/'
json_files = os.listdir(path_w_year)
df_adverse_ev = pd.DataFrame()
file_tot = [file for file in json_files if file not in ['.DS_Store']]
ind = 0
for file in file_tot:
print(file)
adverse_ev_data = json.load(open(path_w_year + file))
df_adverse_ev_json = pd.DataFrame(adverse_ev_data['results'])
df_adverse_ev = pd.concat([df_adverse_ev, df_adverse_ev_json])
del adverse_ev_data, df_adverse_ev_json
ind += 1
df_adverse_ev = df_adverse_ev.reset_index(drop=True)
# Change data types to correct format
df_adverse_ev = format_kept_cells(df_adverse_ev)
# Find drug application number from nested dictionary
df_adverse_ev = extract_drug_app_num_from_ad_ev(df_adverse_ev)
# Find patient features from nested dictionary
df_adverse_ev = extract_patient_features(df_adverse_ev)
# Find drug info from nested dictionary
df_adverse_ev = extract_drug_features(df_adverse_ev)
# Find who submitted report info as column in df
df_adverse_ev = extract_source_info(df_adverse_ev)
# Drop columns that will not be included as features
df_adverse_ev = drop_unneeded_cols(df_adverse_ev)
return df_adverse_ev
def drop_unneeded_cols(df_adverse_ev):
""" Drop the columns that will not be used as features """
drop_cols = ['companynumb','duplicate', 'occurcountry',
'patient',
'primarysourcecountry', 'receiptdateformat',
'receiver', 'receivedate', 'receivedateformat', 'reportduplicate',
'reporttype','safetyreportid',
'safetyreportversion', 'sender',
'transmissiondate','transmissiondateformat']
df_adverse_ev = df_adverse_ev.drop(drop_cols, axis=1)
return df_adverse_ev
def format_kept_cells(df_adverse_ev):
""" Correct data types (to numeric or datetime) """
df_adverse_ev['fulfillexpeditecriteria'] = pd.to_numeric(df_adverse_ev['fulfillexpeditecriteria'])
df_adverse_ev['serious'] = pd.to_numeric(df_adverse_ev['serious'])
df_adverse_ev['seriousnesscongenitalanomali'] = pd.to_numeric(df_adverse_ev['seriousnesscongenitalanomali'])
df_adverse_ev['seriousnessdeath'] = pd.to_numeric(df_adverse_ev['seriousnessdeath'])
df_adverse_ev['seriousnessdisabling'] = pd.to_numeric(df_adverse_ev['seriousnessdisabling'])
df_adverse_ev['seriousnesshospitalization'] =
|
pd.to_numeric(df_adverse_ev['seriousnesshospitalization'])
|
pandas.to_numeric
|
# Copyright (c) 2019-2021 - for information on the respective copyright owner
# see the NOTICE file and/or the repository
# https://github.com/boschresearch/pylife
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "<NAME>"
__maintainer__ = __author__
import pytest
import numpy as np
import pandas as pd
from pylife.core.broadcaster import Broadcaster
foo_bar_series = pd.Series({'foo': 1.0, 'bar': 2.0})
foo_bar_series_twice_in_frame = pd.DataFrame([foo_bar_series, foo_bar_series])
series_named_index = foo_bar_series.copy()
series_named_index.index.name = 'idx1'
foo_bar_frame = pd.DataFrame({'foo': [1.0, 1.5], 'bar': [2.0, 1.5]})
def test_broadcast_series_to_array():
param, obj = Broadcaster(foo_bar_series).broadcast([1.0, 2.0])
pd.testing.assert_series_equal(param, pd.Series([1.0, 2.0]))
pd.testing.assert_frame_equal(foo_bar_series_twice_in_frame, obj)
def test_broadcast_frame_to_array_match():
param, obj = Broadcaster(foo_bar_frame).broadcast([1.0, 2.0])
np.testing.assert_array_equal(param, [1.0, 2.0])
pd.testing.assert_frame_equal(foo_bar_frame, obj)
def test_broadcast_frame_to_array_mismatch():
with pytest.raises(ValueError, match=r"Dimension mismatch. "
"Cannot map 3 value array-like to a 2 element DataFrame signal."):
Broadcaster(foo_bar_frame).broadcast([1.0, 2.0, 3.0])
def test_broadcast_series_to_scalar():
param, obj = Broadcaster(foo_bar_series).broadcast(1.0)
assert param == 1.0
pd.testing.assert_series_equal(foo_bar_series, obj)
def test_broadcast_frame_to_scalar():
param, obj = Broadcaster(foo_bar_frame).broadcast(1.0)
expected_param = pd.Series([1.0, 1.0], index=foo_bar_frame.index)
pd.testing.assert_series_equal(expected_param, param)
pd.testing.assert_frame_equal(foo_bar_frame, obj)
def test_broadcast_series_index_named_to_series_index_named():
series = pd.Series([5.0, 6.0], index=pd.Index(['x', 'y'], name='idx2'))
param, obj = Broadcaster(series_named_index).broadcast(series)
expected_param = pd.Series({
('foo', 'x'): 5.0,
('foo', 'y'): 6.0,
('bar', 'x'): 5.0,
('bar', 'y'): 6.0
})
expected_obj = pd.Series({
('foo', 'x'): 1.0,
('foo', 'y'): 1.0,
('bar', 'x'): 2.0,
('bar', 'y'): 2.0
})
expected_obj.index.names = ['idx1', 'idx2']
expected_param.index.names = ['idx1', 'idx2']
pd.testing.assert_series_equal(expected_param, param)
pd.testing.assert_series_equal(expected_obj, obj)
def test_broadcast_series_index_named_to_series_index_none():
series = pd.Series([5.0, 6.0], index=
|
pd.Index([3, 4])
|
pandas.Index
|
#!/usr/bin/env python
# coding: utf-8
# # Import Base Packages
# In[ ]:
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings('ignore')
# # Interface function to feature engineering data
# In[ ]:
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import PolynomialFeatures
column_names = [ 'age', 'workclass', 'fnlwgt', 'education', 'educational-num', 'marital-status', 'occupation', 'relationship', 'race', 'gender', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'income' ]
columns_to_encoding = [ 'workclass', 'marital-status', 'occupation', 'relationship', 'race', 'gender' ]
columns_to_normalize = [ 'age', 'educational-num', 'hours-per-week', 'capital-gain', 'capital-loss' ]
le = LabelEncoder()
scaler = StandardScaler()
pl = PolynomialFeatures(2, include_bias=False)
def feature_engineering(filename, train=True):
df = pd.read_csv(filename, index_col=False)
df.drop(['fnlwgt', 'education', 'native-country'], axis=1, inplace=True)
df = pd.get_dummies(df, columns=columns_to_encoding)
df["income"] = le.fit_transform(df['income'])
if train:
X_temp = pl.fit_transform(df[columns_to_normalize])
X_temp = scaler.fit_transform(X_temp)
df.drop(columns_to_normalize, axis=1, inplace=True)
X_train = np.hstack((df.values, X_temp))
y_train = df['income']
columns_names = pl.get_feature_names(df.columns)
return np.hstack((df.columns.values, columns_names)), X_train, y_train
else:
X_temp = pl.transform(df[columns_to_normalize])
X_temp = scaler.transform(X_temp)
df.drop(columns_to_normalize, axis=1, inplace=True)
X_test = np.hstack((df.values, X_temp))
y_test = df['income']
columns_names = pl.get_feature_names(df.columns)
return np.hstack((df.columns.values, columns_names)), X_test, y_test
# # Load Data
# In[ ]:
columns_names, X, y = feature_engineering("../../../input/wenruliu_adult-income-dataset/adult.csv", train=True)
# In[ ]:
from sklearn.model_selection import train_test_split
def rmnan(X, y):
X_, y_ = [], []
for x, yt in zip(X, y):
if np.isnan(x).any() or np.isnan(yt).any():
continue
X_.append(x)
y_.append(yt)
return np.array(X_), np.array(y_)
X, y = rmnan(X, y)
# In[ ]:
X, X_test, y, y_test = train_test_split(X, y, test_size=0.30, random_state=42)
y.shape, y_test.shape
# # Find Best number of components to PCA
# In[ ]:
from sklearn.tree import DecisionTreeClassifier
from sklearn.decomposition import PCA
from sklearn.model_selection import RandomizedSearchCV
from sklearn.metrics import make_scorer
from sklearn.metrics import fbeta_score
from sklearn.metrics import accuracy_score
from sklearn.model_selection import RepeatedStratifiedKFold
param_distribution = { 'max_depth': np.arange(1, 15), }
scoring = { 'Accuracy': make_scorer(accuracy_score), 'F1_Score': make_scorer(fbeta_score, beta=1, average='micro'), }
# In[ ]:
result = []
kf = RepeatedStratifiedKFold(n_splits=2, n_repeats=2)
for fold, (train_index, test_index) in enumerate(kf.split(X, y)):
X_tr, X_tst = X[train_index], X[test_index]
y_tr, y_tst = y[train_index], y[test_index]
for i in range(1, 20):
# train
pca = PCA(i)
X_t = pca.fit_transform(X_tr)
search_cv = RandomizedSearchCV(DecisionTreeClassifier(), param_distribution, scoring=scoring, n_jobs=-1, cv=RepeatedStratifiedKFold(n_splits=2, n_repeats=2), refit='F1_Score')
search_cv.fit(X_t, y_tr)
model = search_cv.best_estimator_
# test
X_t = pca.transform(X_tst)
y_pred = model.predict(X_t)
# model evaluation
f1 = fbeta_score(y_tst, y_pred, beta=1)
acc = accuracy_score(y_tst, y_pred)
print(f"fold: {fold} - cp:{i} train: {search_cv.best_score_} test: f1={f1}, acc={acc}")
result.append((fold, i, acc, f1, pca, model))
# In[ ]:
best_f1 = 0
best_model = None
for fold, n, acc, f1, pca, model in result:
if best_f1 < f1:
best_f1 = f1
best_model=(fold, n, acc, f1, pca, model)
pca_components = best_model[1]
pca_components
# # Get best model with best pca_components number
# In[ ]:
result, metrics_ = [], []
kf = RepeatedStratifiedKFold(n_splits=10, n_repeats=1)
for fold, (train_index, test_index) in enumerate(kf.split(X, y)):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
# train
pca = PCA(pca_components)
X_t = pca.fit_transform(X_train)
search_cv = RandomizedSearchCV(DecisionTreeClassifier(), param_distribution, scoring=scoring, n_jobs=-1, cv=RepeatedStratifiedKFold(n_splits=10, n_repeats=1), refit='F1_Score')
search_cv.fit(X_t, y_train)
model = search_cv.best_estimator_
# test
X_t = pca.transform(X_test)
y_pred = model.predict(X_t)
# model evaluation
f1 = fbeta_score(y_test, y_pred, beta=1)
acc = accuracy_score(y_test, y_pred)
print(f"fold: {fold} - cp:{pca_components} train: {search_cv.best_score_} test: f1={f1}, acc={acc}")
result.append((X_train, y_train, X_test, y_test, fold, i, acc, f1, pca, model))
metrics_.append((f1, acc))
# In[ ]:
best_f1 = 0
best_model = None
for X_train, y_train, X_test, y_test, fold, n, acc, f1, pca, model in result:
if best_f1 < f1:
best_f1 = f1
best_model=(X_train, y_train, X_test, y_test, fold, n, acc, f1, pca, model)
X_train, y_train, X_test, y_test = X, y, X_test, y_test #best_model[:4]
# # Analyse Model Result
# In[ ]:
from sklearn import metrics
pca, model = best_model[-2], best_model[-1]
probs = model.predict_proba(pca.transform(X_test))
preds = probs[:,1]
fpr, tpr, threshold = metrics.roc_curve(y_test, preds)
roc_auc = metrics.auc(fpr, tpr)
# method I: plt
import matplotlib.pyplot as plt
plt.title('Receiver Operating Characteristic')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
print()
# In[ ]:
f1_r, acc_r = [], []
for f1, acc in metrics_:
f1_r.append(f1)
acc_r.append(acc)
f1_r, acc_r = np.array(f1_r), np.array(acc_r)
l = f1_r.shape[0]
plt.title(f'F1 Score in Folds(PCA components = {pca_components})')
plt.plot(range(l), f1_r, 'r', label = 'F1 Score')
plt.plot(range(l), acc_r, 'b', label = 'Accuracy')
plt.legend(loc = 'lower right')
plt.xticks(range(l))
plt.xlim([0, l - 1])
plt.ylim([0.95, 1])
plt.ylabel('F1 Score')
plt.xlabel('Fold')
plt.grid()
print()
# ## Plot feature importances
# In[ ]:
def plot_feature_importances(clf, X_train, y_train=None, top_n=10, figsize=(8,8), print_table=False, title="Feature Importances"):
# https://www.kaggle.com/grfiv4/plotting-feature-importances
__name__ = "plot_feature_importances"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
X_train = pd.DataFrame(data=X_train, columns=[f"PC{i}" for i in range(1, X_train.shape[1] + 1)])
feat_imp =
|
pd.DataFrame({'importance':clf.feature_importances_})
|
pandas.DataFrame
|
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_0(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=0, inplace=inplace)
actual = gdf.drop(labels=labels, axis=0, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"index",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_index(pdf, index, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace)
actual = gdf.drop(index=index, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5},
index=pd.MultiIndex(
levels=[
["lama", "cow", "falcon"],
["speed", "weight", "length"],
],
codes=[
[0, 0, 0, 1, 1, 1, 2, 2, 2, 1],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 1],
],
),
)
],
)
@pytest.mark.parametrize(
"index,level",
[
("cow", 0),
("lama", 0),
("falcon", 0),
("speed", 1),
("weight", 1),
("length", 1),
pytest.param(
"cow",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"lama",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"falcon",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_multiindex(pdf, index, level, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace, level=level)
actual = gdf.drop(index=index, inplace=inplace, level=level)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_1(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=1, inplace=inplace)
actual = gdf.drop(labels=labels, axis=1, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
def test_dataframe_drop_error():
df = cudf.DataFrame({"a": [1], "b": [2], "c": [3]})
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "d"}),
rfunc_args_and_kwargs=([], {"columns": "d"}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
rfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
expected_error_message="Cannot specify both",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"axis": 1}),
rfunc_args_and_kwargs=([], {"axis": 1}),
expected_error_message="Need to specify at least",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([[2, 0]],),
rfunc_args_and_kwargs=([[2, 0]],),
expected_error_message="One or more values not found in axis",
)
def test_dataframe_drop_raises():
df = cudf.DataFrame(
{"a": [1, 2, 3], "c": [10, 20, 30]}, index=["x", "y", "z"]
)
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["p"],),
rfunc_args_and_kwargs=(["p"],),
expected_error_message="One or more values not found in axis",
)
# label dtype mismatch
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([3],),
rfunc_args_and_kwargs=([3],),
expected_error_message="One or more values not found in axis",
)
expect = pdf.drop("p", errors="ignore")
actual = df.drop("p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "p"}),
rfunc_args_and_kwargs=([], {"columns": "p"}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(columns="p", errors="ignore")
actual = df.drop(columns="p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
rfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(labels="p", axis=1, errors="ignore")
actual = df.drop(labels="p", axis=1, errors="ignore")
assert_eq(actual, expect)
def test_dataframe_column_add_drop_via_setitem():
df = cudf.DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = cudf.DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = cudf.DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas()
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = cudf.DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
string = str(df)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
data = np.arange(6)
mask = np.zeros(1, dtype=cudf.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = cudf.Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is cudf.NA
pd.options.display.max_rows = 10
got = df.to_string()
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide(monkeypatch):
monkeypatch.setenv("COLUMNS", "79")
# Test basic
df = cudf.DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = cudf.DataFrame()
got = df.to_string()
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = cudf.DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = cudf.DataFrame(
{k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()}
)
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = cudf.DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = cudf.DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = cudf.DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = cudf.DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = cudf.Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = cudf.Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=True
)
assert test2_null["a"].nullable
assert test2_null["a"].null_count == 20
test2_nan = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=False
)
assert test2_nan["a"].null_count == 0
gpu_ary = cupy.asarray(ary)
test3_null = cudf.Series(gpu_ary, nan_as_null=True)
assert test3_null.nullable
assert test3_null.null_count == 20
test3_nan = cudf.Series(gpu_ary, nan_as_null=False)
assert test3_nan.null_count == 0
test4 = cudf.DataFrame()
lst = [1, 2, None, 4, 5, 6, None, 8, 9]
test4["lst"] = lst
assert test4["lst"].nullable
assert test4["lst"].null_count == 2
def test_dataframe_append_to_empty():
pdf = pd.DataFrame()
pdf["a"] = []
pdf["b"] = [1, 2, 3]
gdf = cudf.DataFrame()
gdf["a"] = []
gdf["b"] = [1, 2, 3]
assert_eq(gdf, pdf)
def test_dataframe_setitem_index_len1():
gdf = cudf.DataFrame()
gdf["a"] = [1]
gdf["b"] = gdf.index._values
np.testing.assert_equal(gdf.b.to_array(), [0])
def test_empty_dataframe_setitem_df():
gdf1 = cudf.DataFrame()
gdf2 = cudf.DataFrame({"a": [1, 2, 3, 4, 5]})
gdf1["a"] = gdf2["a"]
assert_eq(gdf1, gdf2)
def test_assign():
gdf = cudf.DataFrame({"x": [1, 2, 3]})
gdf2 = gdf.assign(y=gdf.x + 1)
assert list(gdf.columns) == ["x"]
assert list(gdf2.columns) == ["x", "y"]
np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000])
def test_dataframe_hash_columns(nrows):
gdf = cudf.DataFrame()
data = np.asarray(range(nrows))
data[0] = data[-1] # make first and last the same
gdf["a"] = data
gdf["b"] = gdf.a + 100
out = gdf.hash_columns(["a", "b"])
assert isinstance(out, cupy.ndarray)
assert len(out) == nrows
assert out.dtype == np.int32
# Check default
out_all = gdf.hash_columns()
np.testing.assert_array_equal(cupy.asnumpy(out), cupy.asnumpy(out_all))
# Check single column
out_one = cupy.asnumpy(gdf.hash_columns(["a"]))
# First matches last
assert out_one[0] == out_one[-1]
# Equivalent to the cudf.Series.hash_values()
np.testing.assert_array_equal(cupy.asnumpy(gdf.a.hash_values()), out_one)
@pytest.mark.parametrize("nrows", [3, 10, 100, 1000])
@pytest.mark.parametrize("nparts", [1, 2, 8, 13])
@pytest.mark.parametrize("nkeys", [1, 2])
def test_dataframe_hash_partition(nrows, nparts, nkeys):
np.random.seed(123)
gdf = cudf.DataFrame()
keycols = []
for i in range(nkeys):
keyname = "key{}".format(i)
gdf[keyname] = np.random.randint(0, 7 - i, nrows)
keycols.append(keyname)
gdf["val1"] = np.random.randint(0, nrows * 2, nrows)
got = gdf.partition_by_hash(keycols, nparts=nparts)
# Must return a list
assert isinstance(got, list)
# Must have correct number of partitions
assert len(got) == nparts
# All partitions must be DataFrame type
assert all(isinstance(p, cudf.DataFrame) for p in got)
# Check that all partitions have unique keys
part_unique_keys = set()
for p in got:
if len(p):
# Take rows of the keycolumns and build a set of the key-values
unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))
# Ensure that none of the key-values have occurred in other groups
assert not (unique_keys & part_unique_keys)
part_unique_keys |= unique_keys
assert len(part_unique_keys)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_value(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["val"] = gdf["val"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.key])
expected_value = row.key + 100 if valid else np.nan
got_value = row.val
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_keys(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["key"] = gdf["key"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3, keep_index=False)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.val - 100])
# val is key + 100
expected_value = row.val - 100 if valid else np.nan
got_value = row.key
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("keep_index", [True, False])
def test_dataframe_hash_partition_keep_index(keep_index):
gdf = cudf.DataFrame(
{"val": [1, 2, 3, 4], "key": [3, 2, 1, 4]}, index=[4, 3, 2, 1]
)
expected_df1 = cudf.DataFrame(
{"val": [1], "key": [3]}, index=[4] if keep_index else None
)
expected_df2 = cudf.DataFrame(
{"val": [2, 3, 4], "key": [2, 1, 4]},
index=[3, 2, 1] if keep_index else range(1, 4),
)
expected = [expected_df1, expected_df2]
parts = gdf.partition_by_hash(["key"], nparts=2, keep_index=keep_index)
for exp, got in zip(expected, parts):
assert_eq(exp, got)
def test_dataframe_hash_partition_empty():
gdf = cudf.DataFrame({"val": [1, 2], "key": [3, 2]}, index=["a", "b"])
parts = gdf.iloc[:0].partition_by_hash(["key"], nparts=3)
assert len(parts) == 3
for part in parts:
assert_eq(gdf.iloc[:0], part)
@pytest.mark.parametrize("dtype1", utils.supported_numpy_dtypes)
@pytest.mark.parametrize("dtype2", utils.supported_numpy_dtypes)
def test_dataframe_concat_different_numerical_columns(dtype1, dtype2):
df1 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype1)))
df2 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype2)))
if dtype1 != dtype2 and "datetime" in dtype1 or "datetime" in dtype2:
with pytest.raises(TypeError):
cudf.concat([df1, df2])
else:
pres = pd.concat([df1, df2])
gres = cudf.concat([cudf.from_pandas(df1), cudf.from_pandas(df2)])
assert_eq(cudf.from_pandas(pres), gres)
def test_dataframe_concat_different_column_types():
df1 = cudf.Series([42], dtype=np.float64)
df2 = cudf.Series(["a"], dtype="category")
with pytest.raises(ValueError):
cudf.concat([df1, df2])
df2 = cudf.Series(["a string"])
with pytest.raises(TypeError):
cudf.concat([df1, df2])
@pytest.mark.parametrize(
"df_1", [cudf.DataFrame({"a": [1, 2], "b": [1, 3]}), cudf.DataFrame({})]
)
@pytest.mark.parametrize(
"df_2", [cudf.DataFrame({"a": [], "b": []}), cudf.DataFrame({})]
)
def test_concat_empty_dataframe(df_1, df_2):
got = cudf.concat([df_1, df_2])
expect = pd.concat([df_1.to_pandas(), df_2.to_pandas()], sort=False)
# ignoring dtypes as pandas upcasts int to float
# on concatenation with empty dataframes
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"df1_d",
[
{"a": [1, 2], "b": [1, 2], "c": ["s1", "s2"], "d": [1.0, 2.0]},
{"b": [1.9, 10.9], "c": ["s1", "s2"]},
{"c": ["s1"], "b": [None], "a": [False]},
],
)
@pytest.mark.parametrize(
"df2_d",
[
{"a": [1, 2, 3]},
{"a": [1, None, 3], "b": [True, True, False], "c": ["s3", None, "s4"]},
{"a": [], "b": []},
{},
],
)
def test_concat_different_column_dataframe(df1_d, df2_d):
got = cudf.concat(
[cudf.DataFrame(df1_d), cudf.DataFrame(df2_d), cudf.DataFrame(df1_d)],
sort=False,
)
expect = pd.concat(
[pd.DataFrame(df1_d), pd.DataFrame(df2_d), pd.DataFrame(df1_d)],
sort=False,
)
# numerical columns are upcasted to float in cudf.DataFrame.to_pandas()
# casts nan to 0 in non-float numerical columns
numeric_cols = got.dtypes[got.dtypes != "object"].index
for col in numeric_cols:
got[col] = got[col].astype(np.float64).fillna(np.nan)
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"ser_1", [pd.Series([1, 2, 3]), pd.Series([], dtype="float64")]
)
@pytest.mark.parametrize("ser_2", [pd.Series([], dtype="float64")])
def test_concat_empty_series(ser_1, ser_2):
got = cudf.concat([cudf.Series(ser_1), cudf.Series(ser_2)])
expect = pd.concat([ser_1, ser_2])
assert_eq(got, expect)
def test_concat_with_axis():
df1 = pd.DataFrame(dict(x=np.arange(5), y=np.arange(5)))
df2 = pd.DataFrame(dict(a=np.arange(5), b=np.arange(5)))
concat_df =
|
pd.concat([df1, df2], axis=1)
|
pandas.concat
|
import base64
import json
from datetime import datetime
from typing import Tuple
import matplotlib as mpl
import matplotlib.pyplot as plt
import mplcyberpunk
import numpy as np
import pandas as pd
import pushover
import pytz
import requests
from config import (dbConfig, electricalSupplier, pushNotifications)
from db import db
from logger import create_logger
logger = create_logger('octopus_tariff_app')
def get_tariff(productCode: str) -> pd.DataFrame:
try:
url = f"{electricalSupplier['API_URL']}/products/{productCode}/electricity-tariffs/E-1R-{productCode}-L/standard-unit-rates/"
agileProduct = requests.get(url)
except requests.urllib3.exceptions.MaxRetryError:
logger.error(f'API attempt failed: {url}')
return
tariff = pd.DataFrame.from_records(
json.loads(agileProduct.text)['results'])
# default times are UTC
tariff['valid_from'] = pd.to_datetime(
tariff['valid_from']).dt.tz_convert('Europe/London')
tariff['valid_to'] = pd.to_datetime(
tariff['valid_to']).dt.tz_convert('Europe/London')
return tariff
def get_usage():
return get_usage_base(electricalSupplier["MPAN"])
def get_export():
return get_usage_base(electricalSupplier["MPAN_export"])
def get_usage_base(MPAN) -> pd.DataFrame:
token = base64.b64encode(electricalSupplier['key'].encode()).decode()
response = requests.get(
f'{electricalSupplier["API_URL"]}/electricity-meter-points/{MPAN}/meters/{electricalSupplier["serialNo"]}/consumption/', headers={"Authorization": f'Basic {token}'})
usage = pd.DataFrame.from_records(json.loads(response.text)['results'])
# default times are UTC
usage['interval_start'] = pd.to_datetime(
usage['interval_start']).dt.tz_convert('Europe/London')
usage['interval_end'] = pd.to_datetime(
usage['interval_end']).dt.tz_convert('Europe/London')
return usage
def get_cheapest_period(n: int = 1):
tariff = get_tariff(electricalSupplier['productRef'])
if tariff is None:
return
tariff = tariff[tariff['valid_to'] >
datetime.now(pytz.timezone('Europe/London'))]
return tariff.sort_values('value_inc_vat', ascending=True).head(n)
def create_actions(df: pd.DataFrame, start: str = 'From', end: str = 'To', start_action: str = 'on', end_action: str = 'off') -> pd.DataFrame:
"""Transform table with From and To fields to actions
Args:
df (pd.DataFrame): Dataframe with schema which includes start and end time fields for actions
start (str, optional): name of start time field. Defaults to 'From'.
end (str, optional): name of end time field. Defaults to 'To'.
start_action (str, optional): Action at start. None results in no action set. Defaults to 'on'.
end_action (str, optional): As start_action. Defaults to 'off'.
Returns:
pd.DataFrame: [description]
"""
# Discard unneeded fields
df = df[[start, end]]
action =
|
pd.melt(df)
|
pandas.melt
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = '<NAME>'
from pathlib import Path
import warnings
import pandas as pd
warnings.filterwarnings('ignore')
DATA_DIR = Path('..', '..', 'data')
idx = pd.IndexSlice
def create_split_table():
with pd.HDFStore('stooq.h5') as store:
store.put('jp/splits', pd.DataFrame(columns=['sid', 'effective_date', 'ratio'],
data=[[1, pd.to_datetime('2010-01-01'), 1.0]]), format='t')
def load_prices():
df =
|
pd.read_hdf(DATA_DIR / 'assets.h5', 'stooq/jp/tse/stocks/prices')
|
pandas.read_hdf
|
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import time
import random
import datetime
from datetime import date
from datetime import timedelta
from dateutil.relativedelta import relativedelta
from google.cloud import bigquery
import os
os.environ["GOOGLE_APPLICATION_CREDENTIALS"]="data/ironia-data-dfc6335a0abe.json"
client = bigquery.Client()
def generate_df(ids,working_dates,period=1,risk=0.05):
pd.options.mode.chained_assignment = None
main_df =
|
pd.DataFrame(columns=["Ironia_id","Name","MDD","DaR","CDaR","RF","VaR", "CVaR", "MAD"])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import datetime as dt
import pandas as pd
import pytest
from snl_d3d_cec_verify.report import (_Line,
_WrappedLine,
_Paragraph,
_WrappedParagraph,
_MetaLine,
Content,
Report)
@pytest.fixture
def text():
return "a" * 24 + " " + "a" * 24
def test_Line(text):
test = _Line(text, 25)
result = test()
lines = result.split("\n")
assert len(lines) == 1
assert len(lines[0]) == 49
def test_WrappedLine(text):
test = _WrappedLine(text, 25)
result = test()
lines = result.split("\n")
assert len(lines) == 2
assert len(lines[0]) == 24
def test_WrappedLine_width_none(text):
test = _WrappedLine(text)
result = test()
lines = result.split("\n")
assert len(lines) == 1
assert len(lines[0]) == 49
def test_Paragraph(text):
test = _Paragraph(text, 25)
result = test()
lines = result.split("\n")
assert len(lines) == 2
assert len(lines[0]) == 49
def test_WrappedParagraph(text):
test = _WrappedParagraph(text, 25)
result = test()
lines = result.split("\n")
assert len(lines) == 3
assert len(lines[0]) == 24
def test_WrappedParagraph_width_none(text):
test = _WrappedParagraph(text)
result = test()
lines = result.split("\n")
assert len(lines) == 2
assert len(lines[0]) == 49
@pytest.fixture
def metaline():
return _MetaLine()
def test_MetaLine_defined_false(metaline):
assert not metaline.defined
def test_MetaLine_defined_true(metaline, text):
metaline.add_line(text)
assert metaline.defined
def test_MetaLine_add_line(metaline, text):
metaline.add_line(text)
assert metaline.defined
assert isinstance(metaline.line, _Line)
metaline.add_line(None)
assert not metaline.defined
def test_MetaLine_call_empty(metaline):
assert metaline() == "%"
def test_MetaLine_call_text(metaline, text):
metaline.add_line(text)
assert metaline() == "% " + text
@pytest.fixture
def content():
return Content()
def test_content_add_text(content, text):
content.add_text(text)
assert len(content._body) == 1
assert content._body[0][0] == text
assert content._body[0][1] is _WrappedParagraph
def test_content_add_text_no_wrap(content, text):
content.add_text(text, wrapped=False)
assert len(content._body) == 1
assert content._body[0][0] == text
assert content._body[0][1] is _Paragraph
def test_content_add_heading(content):
title = "Test"
level = 2
content.add_heading(title, level=level)
assert len(content._body) == 1
assert content._body[0][1] is _Paragraph
text = content._body[0][0]
assert text == '#' * level + ' ' + title
def test_content_add_heading_label_error(content):
title = "Test"
level = 2
label = "mock"
with pytest.raises(ValueError) as excinfo:
content.add_heading(title, level=level, label=label)
assert "must start with 'sec:'" in str(excinfo)
assert len(content._body) == 0
def test_content_add_heading_label(content):
title = "Test"
level = 2
label = "sec:mock"
content.add_heading(title, level=level, label=label)
assert len(content._body) == 1
assert content._body[0][1] is _Paragraph
text = content._body[0][0]
assert text == '#' * level + ' ' + title + ' ' + f'{{#{label}}}'
def test_content_add_table(content):
data = {"a": [1, 2, 3],
"b": [4, 5, 6]}
df = pd.DataFrame(data)
caption = "test"
content.add_table(df, caption=caption)
assert len(content._body) == 2
assert content._body[0][1] is _Paragraph
assert content._body[1][1] is _Paragraph
table_text = content._body[0][0]
assert table_text == df.to_markdown(index=True)
caption_text = content._body[1][0]
assert caption_text == "Table: " + caption
def test_content_add_table_label_error(content):
df = "mock"
caption = "test"
label = "mock"
with pytest.raises(ValueError) as excinfo:
content.add_table(df, caption=caption, label=label)
assert "must start with 'tbl:'" in str(excinfo)
assert len(content._body) == 0
def test_content_add_table_label_no_caption(content):
data = {"a": [1, 2, 3],
"b": [4, 5, 6]}
df =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
import pandas as pd
import math
import astropy as ast
import numpy as np
from astropy.time import Time
import matplotlib.pylab as plt
from astropy import units as u
from astropy.io import fits
import warnings
from lstchain.reco.utils import get_effective_time,add_delta_t_key
from lstchain.io.io import dl2_params_lstcam_key,dl2_params_src_dep_lstcam_key, get_srcdep_params
import os
class ReadFermiFile():
def __init__(self, file):
if 'fits' not in file:
raise ValueError('No FITS file provided for Fermi-LAT data')
else:
self.fname=file
def read_file(self):
f=fits.open(self.fname)
fits_table=f[1].data
return(fits_table)
def create_df_from_info(self,fits_table):
time=fits_table['BARYCENTRIC_TIME'].byteswap().newbyteorder()
phases=fits_table['PULSE_PHASE'].byteswap().newbyteorder()
energies=fits_table['ENERGY'].byteswap().newbyteorder()
dataframe = pd.DataFrame({"mjd_time":time,"pulsar_phase":phases,"dragon_time":time*3600*24,"energy":energies/1000})
dataframe=dataframe.sort_values(by=['mjd_time'])
self.info=dataframe
return(self.info)
def calculate_tobs(self):
diff=np.array(self.info['mjd_time'].to_list()[1:])-np.array(self.info['mjd_time'].to_list()[0:-1])
diff[diff>5/24]=0
return(sum(diff)*24)
def run(self):
print(' Reading Fermi-LAT data file')
ftable=self.read_file()
self.create_df_from_info(ftable)
self.tobs=self.calculate_tobs()
print(' Finishing reading. Total time is '+str(self.tobs)+' h'+'\n')
class ReadLSTFile():
def __init__(self, file=None, directory=None,src_dependent=False):
if file==None and directory==None:
raise ValueError('No file provided')
elif file is not None and directory is not None:
raise ValueError('Can only provide file or directory, but not both')
elif file is not None:
if 'h5' not in file:
raise ValueError('No hdf5 file provided for LST data')
else:
self.fname=file
elif directory is not None:
self.direc=directory
self.fname=[]
for x in os.listdir(self.direc):
rel_dir = os.path.relpath(self.direc)
rel_file = os.path.join(rel_dir, x)
if 'h5' in rel_file:
self.fname.append(rel_file)
self.fname.sort()
self.info=None
self.src_dependent=src_dependent
def read_LSTfile(self,fname,df_type='short'):
if self.src_dependent==False:
df=pd.read_hdf(fname,key=dl2_params_lstcam_key)
elif self.src_dependent==True:
srcindep_df=pd.read_hdf(fname,key=dl2_params_lstcam_key,float_precision=20)
on_df_srcdep=get_srcdep_params(fname,'on')
if 'reco_energy' in srcindep_df.keys():
srcindep_df.drop(['reco_energy'])
if 'gammaness' in srcindep_df.keys():
srcindep_df.drop(['gammaness'])
df = pd.concat([srcindep_df, on_df_srcdep], axis=1)
if df_type=='short':
if 'alpha' in df and 'theta2' in df:
df_filtered=df[["mjd_time","pulsar_phase", "dragon_time","gammaness","alpha","theta2","alt_tel"]]
elif 'alpha' in df and 'theta2' not in df:
df_filtered=df[["mjd_time","pulsar_phase", "dragon_time","gammaness","alpha","alt_tel"]]
elif 'theta2' in df and 'alpha' not in df:
df_filtered=df[["mjd_time","pulsar_phase", "dragon_time","gammaness","theta2","alt_tel"]]
else:
df_filtered=df[["mjd_time","pulsar_phase", "dragon_time","gammaness","alt_tel"]]
try:
df_filtered['energy']=df['reco_energy']
except:
df_filtered['energy']=df['energy']
else:
df_filtered = df
df_filtered['energy']=df['reco_energy']
df_filtered=add_delta_t_key(df_filtered)
return(df_filtered)
def calculate_tobs(self):
dataframe=add_delta_t_key(self.info)
return(get_effective_time(dataframe)[1].value/3600)
def run(self,pulsarana,df_type='long'):
print(' Reading LST-1 data file')
if isinstance(self.fname,list):
info_list=[]
for name in self.fname:
try:
info_file=self.read_LSTfile(name,df_type)
self.info=info_file
self.tobs=self.calculate_tobs()
pulsarana.cuts.apply_fixed_cut(self)
if pulsarana.cuts.energy_binning_cut is not None:
pulsarana.cuts.apply_energydep_cuts(self)
info_list.append(self.info)
except:
raise ValueError('Failing when reading:'+ str(name))
self.info=pd.concat(info_list)
self.tobs=self.calculate_tobs()
else:
self.info=self.read_LSTfile(self.fname,df_type)
self.tobs=self.calculate_tobs()
print(' Finishing reading. Total time is '+str(self.tobs)+' h')
pulsarana.cuts.apply_fixed_cut(self)
if pulsarana.cuts.energy_binning_cut is not None:
pulsarana.cuts.apply_energydep_cuts(self)
print(' Finishing filtering events:')
print(' gammaness cut:'+str(pulsarana.cuts.gammaness_cut))
print(' alpha cut:'+str(pulsarana.cuts.alpha_cut))
print(' theta2 cut:'+str(pulsarana.cuts.theta2_cut))
print(' zd cut:'+str(pulsarana.cuts.zd_cut))
print(' energy binning for the cuts:'+str(pulsarana.cuts.energy_binning_cut))
print('\n')
class ReadtxtFile():
def __init__(self, file,format_txt):
self.fname=file
self.format=format_txt
def read_file(self):
data = pd.read_csv(file, sep=" ", header=None)
return(data)
def check_format(self):
for name in ['t','p']:
if name not in self.format:
raise ValueError(' No valid format')
def create_df_from_info(self,df):
for i in range(0,len(self.format)):
if self.format[i]=='t':
times=df.iloc[:, i]
elif self.format[i]=='e':
energies=df.iloc[:, i]
elif self.format[i]=='p':
phases=df.iloc[:, i]
elif self.format[i]=='g':
gammaness=df.iloc[:, i]
elif self.format[i]=='a':
alphas=df.iloc[:, i]
elif self.format[i]=='t2':
theta2=df.iloc[:, i]
elif self.format[i]=='at':
alt_tel=df.iloc[:, i]
dataframe =
|
pd.DataFrame({"mjd_time":times,"pulsar_phase":phases,"dragon_time":times*3600*24,"energy":energies})
|
pandas.DataFrame
|
import logging
import os
import re
import time
import zipfile
import paramiko
from collections import OrderedDict
import numpy as np
import pandas as pd
from sqlalchemy.exc import IntegrityError
from dataactcore.config import CONFIG_BROKER
from dataactcore.models.domainModels import DUNS
from dataactvalidator.health_check import create_app
from dataactvalidator.scripts.loader_utils import clean_data, insert_dataframe
from dataactbroker.helpers.uri_helper import RetrieveFileFromUri
logger = logging.getLogger(__name__)
REMOTE_SAM_DUNS_DIR = '/current/SAM/2_FOUO/UTF-8/'
REMOTE_SAM_EXEC_COMP_DIR = '/current/SAM/6_EXECCOMP/UTF-8'
BUSINESS_TYPES_SEPARATOR = '~'
def get_client(ssh_key=None):
""" Connects to the SAM client and returns a usable object for interaction
Arguments:
ssh_key: private ssh key to connect to the secure API
Returns:
client object to interact with the SAM service
"""
sam_config = CONFIG_BROKER.get('sam_duns')
if not sam_config:
return None
host = sam_config.get('host') if not ssh_key else sam_config.get('host_ssh')
port = sam_config.get('port')
username = sam_config.get('username')
password = sam_config.get('password')
pkey = None
if ssh_key:
with RetrieveFileFromUri(ssh_key, binary_data=False).get_file_object() as key_obj:
pkey = paramiko.RSAKey.from_private_key(key_obj, password=password)
if None in (host, port, username, password):
raise Exception("Missing config elements for connecting to SAM")
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(
hostname=host,
port=port,
username=username,
password=password,
pkey=pkey
)
return client
def get_relevant_models(data, sess, benchmarks=False, table=DUNS):
""" Get a list of the duns we're gonna work off of to prevent multiple calls to the database
Args:
data: dataframe representing the original list of duns we have available
sess: the database connection
benchmarks: whether or not to log times
table: the table to work from (could be DUNS/HistoricParentDuns)
Returns:
A list of models, models which have been activatated
"""
if benchmarks:
get_models = time.time()
logger.info("Getting relevant models")
duns_found = [duns.strip().zfill(9) for duns in list(data["awardee_or_recipient_uniqu"].unique())]
dun_objects_found = sess.query(table).filter(table.awardee_or_recipient_uniqu.in_(duns_found))
models = {duns.awardee_or_recipient_uniqu: duns for duns in dun_objects_found}
logger.info("Getting models with activation dates already set")
activated_models = {duns_num: duns for duns_num, duns in models.items() if duns.activation_date is not None}
if benchmarks:
logger.info("Getting models took {} seconds".format(time.time() - get_models))
return models, activated_models
def load_duns_by_row(data, sess, models, activated_models, benchmarks=False, table=DUNS):
""" Updates the DUNS in the database that match to the models provided
Args:
data: dataframe representing the original list of duns we have available
sess: the database connection
models: the DUNS objects representing the updated data
activated_models: the DUNS objects that have been activated
benchmarks: whether or not to log times
table: the table to work from (could be DUNS/HistoricParentDuns)
Returns:
tuple of added and updated duns lists
"""
# Disabling activation_check as we're using registration_date
# data = activation_check(data, activated_models, benchmarks).where(pd.notnull(data), None)
added, updated = update_duns(models, data, benchmarks=benchmarks, table=table)
sess.add_all(models.values())
return added, updated
# Removed this function when adding registration_date
# def activation_check(data, activated_models, benchmarks=False):
# # if activation_date's already set, keep it, otherwise update it (default)
# logger.info("going through activation check")
# if benchmarks:
# activation_check_start = time.time()
# lambda_func = (lambda duns_num: pd.Series([activated_models[duns_num].activation_date
# if duns_num in activated_models else np.nan]))
# data = data.assign(old_activation_date=data["awardee_or_recipient_uniqu"].apply(lambda_func))
# data.loc[pd.notnull(data["old_activation_date"]), "activation_date"] = data["old_activation_date"]
# del data["old_activation_date"]
# if benchmarks:
# logger.info("Activation check took {} seconds".format(time.time()-activation_check_start))
# return data
def update_duns(models, new_data, benchmarks=False, table=DUNS):
""" Modify existing models or create new ones
Args:
models: the DUNS objects representing the updated data
new_data: the new data to update
benchmarks: whether or not to log times
table: the table to work from (could be DUNS/HistoricParentDuns)
Returns:
tuple of added and updated duns lists
"""
added = []
updated = []
logger.info("Updating duns")
if benchmarks:
update_duns_start = time.time()
for _, row in new_data.iterrows():
awardee_or_recipient_uniqu = row['awardee_or_recipient_uniqu']
if awardee_or_recipient_uniqu not in models:
models[awardee_or_recipient_uniqu] = table()
added.append(awardee_or_recipient_uniqu)
else:
updated.append(awardee_or_recipient_uniqu)
for field, value in row.items():
if value:
setattr(models[awardee_or_recipient_uniqu], field, value)
if benchmarks:
logger.info("Updating duns took {} seconds".format(time.time() - update_duns_start))
return added, updated
def clean_sam_data(data, table=DUNS):
""" Wrapper around clean_data with the DUNS context
Args:
data: the dataframe to be cleaned
table: the table to work from (could be DUNS/HistoricParentDuns)
Returns:
a cleaned/updated dataframe to be imported
"""
return clean_data(data, table, {
"awardee_or_recipient_uniqu": "awardee_or_recipient_uniqu",
"activation_date": "activation_date",
"deactivation_date": "deactivation_date",
"registration_date": "registration_date",
"expiration_date": "expiration_date",
"last_sam_mod_date": "last_sam_mod_date",
"sam_extract_code": "sam_extract_code",
"legal_business_name": "legal_business_name",
"dba_name": "dba_name",
"address_line_1": "address_line_1",
"address_line_2": "address_line_2",
"city": "city",
"state": "state",
"zip": "zip",
"zip4": "zip4",
"country_code": "country_code",
"congressional_district": "congressional_district",
"entity_structure": "entity_structure",
"business_types_codes": "business_types_codes",
"ultimate_parent_legal_enti": "ultimate_parent_legal_enti",
"ultimate_parent_unique_ide": "ultimate_parent_unique_ide"
}, {})
def parse_duns_file(file_path, sess, monthly=False, benchmarks=False, table=DUNS, year=None, metrics=None):
""" Takes in a DUNS file and adds the DUNS data to the database
Args:
file_path: the path to the SAM file
sess: the database connection
monthly: whether it's a monthly file
benchmarks: whether to log times
table: the table to work from (could be DUNS/HistoricParentDuns)
year: the year associated with the data (primarily for HistoricParentDUNS loads)
metrics: dictionary representing metrics data for the load
"""
if not metrics:
metrics = {
'files_processed': [],
'records_received': 0,
'adds_received': 0,
'updates_received': 0,
'deletes_received': 0,
'records_ignored': 0,
'added_duns': [],
'updated_duns': []
}
parse_start_time = time.time()
logger.info("Starting file " + str(file_path))
dat_file_name = os.path.splitext(os.path.basename(file_path))[0]+'.dat'
sam_file_type = "MONTHLY" if monthly else "DAILY"
dat_file_date = re.findall(".*{}_(.*).dat".format(sam_file_type), dat_file_name)[0]
with create_app().app_context():
column_header_mapping = {
"awardee_or_recipient_uniqu": 0,
"sam_extract_code": 4,
"registration_date": 6,
"expiration_date": 7,
"last_sam_mod_date": 8,
"activation_date": 9,
"legal_business_name": 10,
"dba_name": 11,
"address_line_1": 14,
"address_line_2": 15,
"city": 16,
"state": 17,
"zip": 18,
"zip4": 19,
"country_code": 20,
"congressional_district": 21,
"entity_structure": 27,
"business_types_raw": 31,
"ultimate_parent_legal_enti": 186,
"ultimate_parent_unique_ide": 187
}
column_header_mapping_ordered = OrderedDict(sorted(column_header_mapping.items(), key=lambda c: c[1]))
# Initial sweep of the file to see rows and possibly what DUNS we're updating
if benchmarks:
initial_sweep = time.time()
nrows = 0
with zipfile.ZipFile(file_path) as zip_file:
with zip_file.open(dat_file_name) as dat_file:
nrows = len(dat_file.readlines())
if benchmarks:
logger.info("Initial sweep took {} seconds".format(time.time() - initial_sweep))
block_size = 10000
batches = (nrows-1)//block_size
# skip the first line again if the last batch is also the first batch
skiplastrows = 2 if batches == 0 else 1
last_block_size = ((nrows % block_size) or block_size)-skiplastrows
batch = 0
rows_received = 0
adds_received = 0
updates_received = 0
deletes_received = 0
records_ignored = 0
added_duns = []
updated_duns = []
while batch <= batches:
skiprows = 1 if batch == 0 else (batch*block_size)
nrows = (((batch+1)*block_size)-skiprows) if (batch < batches) else last_block_size
logger.info('Loading rows %s to %s', skiprows+1, nrows+skiprows)
with zipfile.ZipFile(file_path) as zip_file:
with zip_file.open(dat_file_name) as dat_file:
csv_data = pd.read_csv(dat_file, dtype=str, header=None, skiprows=skiprows, nrows=nrows, sep='|',
usecols=column_header_mapping_ordered.values(),
names=column_header_mapping_ordered.keys(), quoting=3)
# add deactivation_date column for delete records
lambda_func = (lambda sam_extract: pd.Series([dat_file_date if sam_extract == "1" else np.nan]))
csv_data = csv_data.assign(deactivation_date=pd.Series([np.nan], name='deactivation_date')
if monthly else csv_data["sam_extract_code"].apply(lambda_func))
# convert business types string to array
bt_func = (lambda bt_raw: pd.Series([[str(code) for code in str(bt_raw).split('~')
if isinstance(bt_raw, str)]]))
csv_data = csv_data.assign(business_types_codes=csv_data["business_types_raw"].apply(bt_func))
del csv_data["business_types_raw"]
# removing rows where DUNS number isn't even provided
csv_data = csv_data.where(csv_data["awardee_or_recipient_uniqu"].notnull())
# cleaning and replacing NaN/NaT with None's
csv_data = clean_sam_data(csv_data.where(
|
pd.notnull(csv_data)
|
pandas.notnull
|
import ntpath
import os
import logging
import argparse
from datetime import datetime
from pypgrest import Postgrest
import pandas as pd
import boto3
from dotenv import load_dotenv
import utils
# Envrioment variables
AWS_ACCESS_ID = os.getenv("AWS_ACCESS_ID")
AWS_PASS = os.getenv("AWS_PASS")
BUCKET_NAME = os.getenv("BUCKET_NAME")
POSTGREST_TOKEN = os.getenv("POSTGREST_TOKEN")
POSTGREST_ENDPOINT = os.getenv("POSTGREST_ENDPOINT")
def handle_year_month_args(year, month, lastmonth, aws_s3_client, user):
"""
Parameters
----------
year : Int
Argument provided value for year.
month : Int
Argument provided value for month.
lastmonth : Bool
Argument that determines if the previous month should also be queried.
aws_s3_client : boto3 client object
For sending on to get_csv_list
Returns
-------
csv_file_list : List
A list of the csv files to be downloaded and upsert to Postgres.
"""
# If args are missing, default to current month and/or year
if not year:
f_year = datetime.now().year
else:
f_year = year
if not month:
f_month = datetime.now().month
else:
f_month = month
csv_file_list = get_csv_list(f_year, f_month, aws_s3_client, user)
if not month and not year:
if lastmonth == True:
prev_month = f_month - 1
prev_year = f_year
if prev_month == 0:
prev_year = prev_year - 1
prev_month = 12
logger.debug(
f"Getting data from folders: {prev_month}-{prev_year} and {f_month}-{f_year}"
)
prev_list = get_csv_list(prev_year, prev_month, aws_s3_client, user)
csv_file_list.extend(prev_list)
else:
logger.debug(f"Getting data from folders: {f_month}-{f_year}")
csv_file_list = [f for f in csv_file_list if f.endswith(".csv")]
return csv_file_list
def get_file_name(file_key):
"""
Returns the name of an email file based on the full s3 file path
:param file_key: the file path
:return: string
"""
return ntpath.basename(file_key)
def get_csv_list(year, month, client, user):
"""
Returns an array of files parsed into an actual array (as opposed to an object)
:return: array of strings
"""
csv_file_list = []
pending_csv_list = aws_list_files(year, month, client, user)
for csv_file in pending_csv_list:
csv_file_list.append(csv_file)
# Finally return the final list
return csv_file_list
def aws_list_files(year, month, client, user):
"""
Returns a list of email files.
:return: object
"""
subdir = "archipel_transactionspub"
if user == "pard":
subdir = f"{subdir}-PARD"
response = client.list_objects(
Bucket=BUCKET_NAME, Prefix=f"meters/prod/{subdir}/{str(year)}/{str(month)}",
)
for content in response.get("Contents", []):
yield content.get("Key")
def get_invoice_id(banking_id, terminal_code):
"""Create the Inovice ID which is a concatention of the banking ID and device ID
Args:
banking_id (int): whatever this is
terminal_code (int): whatever this is
Returns:
int: The formatted invoice ID
"""
if
|
pd.isna(banking_id)
|
pandas.isna
|
# flake8: noqa: F841
import tempfile
from pathlib import Path
from typing import List
from pandas._typing import Scalar, ArrayLike
import pandas as pd
import numpy as np
from pandas.core.window import ExponentialMovingWindow
def test_types_init() -> None:
pd.Series(1)
pd.Series((1, 2, 3))
pd.Series(np.array([1, 2, 3]))
pd.Series(data=[1, 2, 3, 4], name="series")
pd.Series(data=[1, 2, 3, 4], dtype=np.int8)
pd.Series(data={'row1': [1, 2], 'row2': [3, 4]})
pd.Series(data=[1, 2, 3, 4], index=[4, 3, 2, 1], copy=True)
def test_types_any() -> None:
res1: bool = pd.Series([False, False]).any()
res2: bool = pd.Series([False, False]).any(bool_only=False)
res3: bool = pd.Series([np.nan]).any(skipna=False)
def test_types_all() -> None:
res1: bool = pd.Series([False, False]).all()
res2: bool = pd.Series([False, False]).all(bool_only=False)
res3: bool = pd.Series([np.nan]).all(skipna=False)
def test_types_csv() -> None:
s = pd.Series(data=[1, 2, 3])
csv_df: str = s.to_csv()
with tempfile.NamedTemporaryFile() as file:
s.to_csv(file.name)
s2: pd.DataFrame = pd.read_csv(file.name)
with tempfile.NamedTemporaryFile() as file:
s.to_csv(Path(file.name))
s3: pd.DataFrame = pd.read_csv(Path(file.name))
# This keyword was added in 1.1.0 https://pandas.pydata.org/docs/whatsnew/v1.1.0.html
with tempfile.NamedTemporaryFile() as file:
s.to_csv(file.name, errors='replace')
s4: pd.DataFrame = pd.read_csv(file.name)
def test_types_copy() -> None:
s = pd.Series(data=[1, 2, 3, 4])
s2: pd.Series = s.copy()
def test_types_select() -> None:
s = pd.Series(data={'row1': 1, 'row2': 2})
s[0]
s[1:]
def test_types_iloc_iat() -> None:
s = pd.Series(data={'row1': 1, 'row2': 2})
s2 = pd.Series(data=[1, 2])
s.loc['row1']
s.iat[0]
s2.loc[0]
s2.iat[0]
def test_types_loc_at() -> None:
s = pd.Series(data={'row1': 1, 'row2': 2})
s2 = pd.Series(data=[1, 2])
s.loc['row1']
s.at['row1']
s2.loc[1]
s2.at[1]
def test_types_boolean_indexing() -> None:
s = pd.Series([0, 1, 2])
s[s > 1]
s[s]
def test_types_df_to_df_comparison() -> None:
s = pd.Series(data={'col1': [1, 2]})
s2 = pd.Series(data={'col1': [3, 2]})
res_gt: pd.Series = s > s2
res_ge: pd.Series = s >= s2
res_lt: pd.Series = s < s2
res_le: pd.Series = s <= s2
res_e: pd.Series = s == s2
def test_types_head_tail() -> None:
s = pd.Series([0, 1, 2])
s.head(1)
s.tail(1)
def test_types_sample() -> None:
s = pd.Series([0, 1, 2])
s.sample(frac=0.5)
s.sample(n=1)
def test_types_nlargest_nsmallest() -> None:
s = pd.Series([0, 1, 2])
s.nlargest(1)
s.nlargest(1, 'first')
s.nsmallest(1, 'last')
s.nsmallest(1, 'all')
def test_types_filter() -> None:
s = pd.Series(data=[1, 2, 3, 4], index=['cow', 'coal', 'coalesce', ''])
s.filter(items=['cow'])
s.filter(regex='co.*')
s.filter(like='al')
def test_types_setting() -> None:
s = pd.Series([0, 1, 2])
s[3] = 4
s[s == 1] = 5
s[:] = 3
def test_types_drop() -> None:
s = pd.Series([0, 1, 2])
res: pd.Series = s.drop(0)
res2: pd.Series = s.drop([0, 1])
res3: pd.Series = s.drop(0, axis=0)
res4: None = s.drop([0, 1], inplace=True, errors='raise')
res5: None = s.drop([0, 1], inplace=True, errors='ignore')
def test_types_drop_multilevel() -> None:
index = pd.MultiIndex(levels=[['top', 'bottom'], ['first', 'second', 'third']],
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
s = pd.Series(data=[1, 2, 3, 4, 5, 6], index=index)
res: pd.Series = s.drop(labels='first', level=1)
def test_types_dropna() -> None:
s = pd.Series([1, np.nan, np.nan])
res: pd.Series = s.dropna()
res2: None = s.dropna(axis=0, inplace=True)
def test_types_fillna() -> None:
s = pd.Series([1, np.nan, np.nan, 3])
res: pd.Series = s.fillna(0)
res2: pd.Series = s.fillna(0, axis='index')
res3: pd.Series = s.fillna(method='backfill', axis=0)
res4: None = s.fillna(method='bfill', inplace=True)
res5: pd.Series = s.fillna(method='pad')
res6: pd.Series = s.fillna(method='ffill', limit=1)
def test_types_sort_index() -> None:
s = pd.Series([1, 2, 3], index=[2, 3, 1])
res: pd.Series = s.sort_index()
res2: None = s.sort_index(ascending=False, inplace=True)
res3: pd.Series = s.sort_index(kind="mergesort")
# This was added in 1.1.0 https://pandas.pydata.org/docs/whatsnew/v1.1.0.html
def test_types_sort_index_with_key() -> None:
s = pd.Series([1, 2, 3], index=['a', 'B', 'c'])
res: pd.Series = s.sort_index(key=lambda k: k.str.lower())
def test_types_sort_values() -> None:
s = pd.Series([4, 2, 1, 3])
res: pd.Series = s.sort_values(0)
res2: pd.Series = s.sort_values(ascending=False)
res3: None = s.sort_values(inplace=True, kind='quicksort')
res4: pd.Series = s.sort_values(na_position='last')
res5: pd.Series = s.sort_values(ignore_index=True)
# This was added in 1.1.0 https://pandas.pydata.org/docs/whatsnew/v1.1.0.html
def test_types_sort_values_with_key() -> None:
s = pd.Series([1, 2, 3], index=[2, 3, 1])
res: pd.Series = s.sort_values(key=lambda k: -k)
def test_types_shift() -> None:
s = pd.Series([1, 2, 3])
s.shift()
s.shift(axis=0, periods=1)
s.shift(-1, fill_value=0)
def test_types_rank() -> None:
s = pd.Series([1, 1, 2, 5, 6, np.nan, 'milion'])
s.rank()
s.rank(axis=0, na_option='bottom')
s.rank(method="min", pct=True)
s.rank(method="dense", ascending=True)
s.rank(method="first", numeric_only=True)
def test_types_mean() -> None:
s = pd.Series([1, 2, 3, np.nan])
f1: float = s.mean()
s1: pd.Series = s.mean(axis=0, level=0)
f2: float = s.mean(skipna=False)
f3: float = s.mean(numeric_only=False)
def test_types_median() -> None:
s = pd.Series([1, 2, 3, np.nan])
f1: float = s.median()
s1: pd.Series = s.median(axis=0, level=0)
f2: float = s.median(skipna=False)
f3: float = s.median(numeric_only=False)
def test_types_sum() -> None:
s = pd.Series([1, 2, 3, np.nan])
s.sum()
s.sum(axis=0, level=0)
s.sum(skipna=False)
s.sum(numeric_only=False)
s.sum(min_count=4)
def test_types_cumsum() -> None:
s = pd.Series([1, 2, 3, np.nan])
s.cumsum()
s.cumsum(axis=0)
s.cumsum(skipna=False)
def test_types_min() -> None:
s = pd.Series([1, 2, 3, np.nan])
s.min()
s.min(axis=0)
s.min(level=0)
s.min(skipna=False)
def test_types_max() -> None:
s = pd.Series([1, 2, 3, np.nan])
s.max()
s.max(axis=0)
s.max(level=0)
s.max(skipna=False)
def test_types_quantile() -> None:
s = pd.Series([1, 2, 3, 10])
s.quantile([0.25, 0.5])
s.quantile(0.75)
s.quantile()
s.quantile(interpolation='nearest')
def test_types_clip() -> None:
s = pd.Series([-10, 2, 3, 10])
s.clip(lower=0, upper=5)
s.clip(lower=0, upper=5, inplace=True)
def test_types_abs() -> None:
s = pd.Series([-10, 2, 3, 10])
s.abs()
def test_types_var() -> None:
s = pd.Series([-10, 2, 3, 10])
s.var()
s.var(axis=0, ddof=1)
s.var(skipna=True, numeric_only=False)
def test_types_std() -> None:
s = pd.Series([-10, 2, 3, 10])
s.std()
s.std(axis=0, ddof=1)
s.std(skipna=True, numeric_only=False)
def test_types_idxmin() -> None:
s = pd.Series([-10, 2, 3, 10])
s.idxmin()
s.idxmin(axis=0)
def test_types_idxmax() -> None:
s = pd.Series([-10, 2, 3, 10])
s.idxmax()
s.idxmax(axis=0)
def test_types_value_counts() -> None:
s = pd.Series([1, 2])
s.value_counts()
def test_types_unique() -> None:
s = pd.Series([-10, 2, 2, 3, 10, 10])
s.unique()
def test_types_apply() -> None:
s = pd.Series([-10, 2, 2, 3, 10, 10])
s.apply(lambda x: x ** 2)
s.apply(np.exp)
s.apply(str)
def test_types_element_wise_arithmetic() -> None:
s = pd.Series([0, 1, -10])
s2 = pd.Series([7, -5, 10])
res_add1: pd.Series = s + s2
res_add2: pd.Series = s.add(s2, fill_value=0)
res_sub: pd.Series = s - s2
res_sub2: pd.Series = s.sub(s2, fill_value=0)
res_mul: pd.Series = s * s2
res_mul2: pd.Series = s.mul(s2, fill_value=0)
res_div: pd. Series = s / s2
res_div2: pd. Series = s.div(s2, fill_value=0)
res_floordiv: pd.Series = s // s2
res_floordiv2: pd.Series = s.floordiv(s2, fill_value=0)
res_mod: pd.Series = s % s2
res_mod2: pd.Series = s.mod(s2, fill_value=0)
res_pow: pd.Series = s ** abs(s2)
res_pow2: pd.Series = s.pow(abs(s2), fill_value=0)
def test_types_scalar_arithmetic() -> None:
s =
|
pd.Series([0,1,-10])
|
pandas.Series
|
import torch
import torch.nn as nn
import data_loader
import network1 as network
import pandas as pd
import matplotlib.pyplot as plt
from vgg import vgg16, VGGNormLayer, perceptual_loss
from argparse import ArgumentParser
torch.backends.cudnn.benchmark = True
torch.autograd.set_grad_enabled(False)
def save_graph(path, val_losses, counter):
'''
Saves average perceptual loss given a certain number of patches
'''
plt.figure(figsize=(16, 8))
plt.plot(counter, val_losses,
color='blue')
plt.scatter(counter, val_losses, color = 'blue')
plt.suptitle('Network1 Perceptual Loss vs Number of 32x32 Patches')
plt.legend(['Val Loss'], loc='upper right')
plt.xlabel('Number of Patches')
plt.ylabel('Perceptual loss')
plt.savefig(path)
def patch_find_sing(net, patch_size, data, target, device, batch_size):
'''
With a greedy approach, determine where to place patches such that network can best reconstruct an image
Return the centers of the patches and the associated loss
'''
points = [set() for _ in range(batch_size)]
stored_points = [[] for _ in range(batch_size)]
stored_losses = [[] for _ in range(batch_size)]
x_crop = patch_size[0] // 2
y_crop = patch_size[1] // 2
# calculate 25 best locations for each image
for _ in range(25):
best_loss = [float('inf') for _ in range(batch_size)]
best_tup = [(0, 0) for _ in range(batch_size)]
for x in range(patch_size[0]//2, data.shape[2], patch_size[0]//2):
for y in range(patch_size[1]//2, data.shape[3], patch_size[1]//2):
old_mask = data[:, :, x - x_crop:x + x_crop, y - y_crop:y + y_crop].clone()
new_mask = torch.cat((target[:, :, x - x_crop:x + x_crop, y - y_crop:y + y_crop],
torch.ones((batch_size, 1) + patch_size).to(device)), dim=1)
data[:, :, x - x_crop:x + x_crop, y - y_crop:y + y_crop] = new_mask
loss = net(data, target) #bx1
for i in range(batch_size):
if (x, y) in points[i]:
continue
if loss[i] < best_loss[i]:
best_loss[i] = loss[i].item()
best_tup[i] = (x, y)
data[:, :, x - x_crop:x + x_crop, y - y_crop:y + y_crop] = old_mask
for i in range(batch_size):
x, y = best_tup[i]
new_mask = torch.cat(
(target[i:i + 1, :, x - x_crop:x + x_crop, y - y_crop:y + y_crop],
torch.ones((1, 1) + patch_size).to(device)),
dim=1)
data[i:i + 1, :, x - x_crop:x + x_crop, y - y_crop:y + y_crop] = new_mask
for i in range(batch_size):
stored_points[i].append(best_tup[i])
stored_losses[i].append(best_loss[i])
points[i].add(best_tup[i])
return stored_points, stored_losses
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("--gpus", type=int, nargs='+', default=list(range(torch.cuda.device_count())), help="gpus")
parser.add_argument("--psize", type=int, default=32, help="patch size")
parser.add_argument("--bvsize", type=int, default=32, help="batch size val")
parser.add_argument("--dataset", type=str, default='celeba', help="name of dataset")
parser.add_argument("--datapath", type=str, help="path to data set")
parser.add_argument("--modelpath", type=str, help="path to pre-trained weights")
opt = parser.parse_args()
device = torch.device("cuda:{}".format(opt.gpus[0]) if torch.cuda.is_available() else "cpu")
device_ids, patch_size, batch_size = opt.gpus, (opt.psize, opt.psize), opt.bvsize
# initialize trained network
cnet = network.Net()
cnet.load_state_dict(torch.load(opt.modelpath, map_location=device)['state_dict'])
loss_net = network.FullModel(network.Net(), perceptual_loss, vgg16(), VGGNormLayer())
if torch.cuda.device_count() > 1:
loss_net = nn.DataParallel(loss_net, device_ids=device_ids)
loss_net.to(device)
# data loader
if opt.dataset == 'celeba':
loader = data_loader.get_celeba_loader
elif opt.dataset == 'FFHQ':
loader = data_loader.get_ffhq_loader
else:
loader = data_loader.get_stl_loader
val_loader = loader(opt.datapath, patch_size, batch_size, "val")
dic = {}
results = []
num_patches_loss = [0 for _ in range(25)]
# this loop determines finds the optimal locations for patches for every image
for ind, (data, target) in enumerate(val_loader):
data, target = data.to(device, non_blocking=True), target.to(device, non_blocking=True)
stored_points, stored_losses = patch_find_sing(loss_net, patch_size, data, target, device, batch_size)
for i in range(batch_size):
results.append(stored_points[i])
results.append(stored_losses[i])
num_patches_loss = [i + j for i, j in zip(num_patches_loss, stored_losses[i])]
dic[1+batch_size * ind + i] = (stored_points[i], stored_losses[i])
print('Loss {}\n'.format(stored_losses[-1][-1]))
torch.save(dic, "results/network1/patch_losses60.pickle")
|
pd.DataFrame(results)
|
pandas.DataFrame
|
"""
Base IO for all periodic datasets
"""
import os
import pandas as pd
from ._base import get_data_home
def fetch_health_app(data_home=None, filename="health_app.csv"):
"""Fetch and return the health app log dataset
from github.com/logpai/loghub
HealthApp is a mobile application for Android devices.
Logs were collected from an Android smartphone after 10+ days of use.
This dataset only represents the different types of logs, hence resulting
in only 20 different events.
============================== ==================================
Number of events 20
Average delta per event Timedelta('0 days 00:53:24.984000')
Average nb of points per event 100.0
======================== ===================================
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit-mine data is stored in `scikit-mine_data`.
Returns
-------
pd.Series
Transactions from the health app dataset, as an in-memory pandas Series.
Each unique transaction is represented as a Python list.
"""
data_home = data_home or get_data_home()
p = os.path.join(data_home, filename)
kwargs = dict(header=None, index_col=0, squeeze=True, dtype="string")
if filename in os.listdir(data_home):
s = pd.read_csv(p, index_col=0, squeeze=True)
else:
s = pd.read_csv(
"https://raw.githubusercontent.com/logpai/loghub/master/HealthApp/HealthApp_2k.log",
sep="|",
error_bad_lines=False,
usecols=[0, 1],
**kwargs
)
s.to_csv(p)
s.index.name = "timestamp"
s.index =
|
pd.to_datetime(s.index, format="%Y%m%d-%H:%M:%S:%f")
|
pandas.to_datetime
|
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from .base import BaseExtensionTests
class BaseGetitemTests(BaseExtensionTests):
"""Tests for ExtensionArray.__getitem__."""
def test_iloc_series(self, data):
ser = pd.Series(data)
result = ser.iloc[:4]
expected =
|
pd.Series(data[:4])
|
pandas.Series
|
"""
Test output formatting for Series/DataFrame, including to_string & reprs
"""
from datetime import datetime
from io import StringIO
import itertools
from operator import methodcaller
import os
from pathlib import Path
import re
from shutil import get_terminal_size
import sys
import textwrap
import dateutil
import numpy as np
import pytest
import pytz
from pandas.compat import (
IS64,
is_platform_windows,
)
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
get_option,
option_context,
read_csv,
reset_option,
set_option,
)
import pandas._testing as tm
import pandas.io.formats.format as fmt
import pandas.io.formats.printing as printing
use_32bit_repr = is_platform_windows() or not IS64
@pytest.fixture(params=["string", "pathlike", "buffer"])
def filepath_or_buffer_id(request):
"""
A fixture yielding test ids for filepath_or_buffer testing.
"""
return request.param
@pytest.fixture
def filepath_or_buffer(filepath_or_buffer_id, tmp_path):
"""
A fixture yielding a string representing a filepath, a path-like object
and a StringIO buffer. Also checks that buffer is not closed.
"""
if filepath_or_buffer_id == "buffer":
buf = StringIO()
yield buf
assert not buf.closed
else:
assert isinstance(tmp_path, Path)
if filepath_or_buffer_id == "pathlike":
yield tmp_path / "foo"
else:
yield str(tmp_path / "foo")
@pytest.fixture
def assert_filepath_or_buffer_equals(
filepath_or_buffer, filepath_or_buffer_id, encoding
):
"""
Assertion helper for checking filepath_or_buffer.
"""
def _assert_filepath_or_buffer_equals(expected):
if filepath_or_buffer_id == "string":
with open(filepath_or_buffer, encoding=encoding) as f:
result = f.read()
elif filepath_or_buffer_id == "pathlike":
result = filepath_or_buffer.read_text(encoding=encoding)
elif filepath_or_buffer_id == "buffer":
result = filepath_or_buffer.getvalue()
assert result == expected
return _assert_filepath_or_buffer_equals
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
def has_info_repr(df):
r = repr(df)
c1 = r.split("\n")[0].startswith("<class")
c2 = r.split("\n")[0].startswith(r"<class") # _repr_html_
return c1 or c2
def has_non_verbose_info_repr(df):
has_info = has_info_repr(df)
r = repr(df)
# 1. <class>
# 2. Index
# 3. Columns
# 4. dtype
# 5. memory usage
# 6. trailing newline
nv = len(r.split("\n")) == 6
return has_info and nv
def has_horizontally_truncated_repr(df):
try: # Check header row
fst_line = np.array(repr(df).splitlines()[0].split())
cand_col = np.where(fst_line == "...")[0][0]
except IndexError:
return False
# Make sure each row has this ... in the same place
r = repr(df)
for ix, l in enumerate(r.splitlines()):
if not r.split()[cand_col] == "...":
return False
return True
def has_vertically_truncated_repr(df):
r = repr(df)
only_dot_row = False
for row in r.splitlines():
if re.match(r"^[\.\ ]+$", row):
only_dot_row = True
return only_dot_row
def has_truncated_repr(df):
return has_horizontally_truncated_repr(df) or has_vertically_truncated_repr(df)
def has_doubly_truncated_repr(df):
return has_horizontally_truncated_repr(df) and has_vertically_truncated_repr(df)
def has_expanded_repr(df):
r = repr(df)
for line in r.split("\n"):
if line.endswith("\\"):
return True
return False
@pytest.mark.filterwarnings("ignore::FutureWarning:.*format")
class TestDataFrameFormatting:
def test_eng_float_formatter(self, float_frame):
df = float_frame
df.loc[5] = 0
fmt.set_eng_float_format()
repr(df)
fmt.set_eng_float_format(use_eng_prefix=True)
repr(df)
fmt.set_eng_float_format(accuracy=0)
repr(df)
tm.reset_display_options()
def test_show_null_counts(self):
df = DataFrame(1, columns=range(10), index=range(10))
df.iloc[1, 1] = np.nan
def check(show_counts, result):
buf = StringIO()
df.info(buf=buf, show_counts=show_counts)
assert ("non-null" in buf.getvalue()) is result
with option_context(
"display.max_info_rows", 20, "display.max_info_columns", 20
):
check(None, True)
check(True, True)
check(False, False)
with option_context("display.max_info_rows", 5, "display.max_info_columns", 5):
check(None, False)
check(True, False)
check(False, False)
# GH37999
with tm.assert_produces_warning(
FutureWarning, match="null_counts is deprecated.+"
):
buf = StringIO()
df.info(buf=buf, null_counts=True)
assert "non-null" in buf.getvalue()
# GH37999
with pytest.raises(ValueError, match=r"null_counts used with show_counts.+"):
df.info(null_counts=True, show_counts=True)
def test_repr_truncation(self):
max_len = 20
with option_context("display.max_colwidth", max_len):
df = DataFrame(
{
"A": np.random.randn(10),
"B": [
tm.rands(np.random.randint(max_len - 1, max_len + 1))
for i in range(10)
],
}
)
r = repr(df)
r = r[r.find("\n") + 1 :]
adj = fmt.get_adjustment()
for line, value in zip(r.split("\n"), df["B"]):
if adj.len(value) + 1 > max_len:
assert "..." in line
else:
assert "..." not in line
with option_context("display.max_colwidth", 999999):
assert "..." not in repr(df)
with option_context("display.max_colwidth", max_len + 2):
assert "..." not in repr(df)
def test_repr_deprecation_negative_int(self):
# TODO(2.0): remove in future version after deprecation cycle
# Non-regression test for:
# https://github.com/pandas-dev/pandas/issues/31532
width = get_option("display.max_colwidth")
with tm.assert_produces_warning(FutureWarning):
set_option("display.max_colwidth", -1)
set_option("display.max_colwidth", width)
def test_repr_chop_threshold(self):
df = DataFrame([[0.1, 0.5], [0.5, -0.1]])
reset_option("display.chop_threshold") # default None
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
with option_context("display.chop_threshold", 0.2):
assert repr(df) == " 0 1\n0 0.0 0.5\n1 0.5 0.0"
with option_context("display.chop_threshold", 0.6):
assert repr(df) == " 0 1\n0 0.0 0.0\n1 0.0 0.0"
with option_context("display.chop_threshold", None):
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
def test_repr_chop_threshold_column_below(self):
# GH 6839: validation case
df = DataFrame([[10, 20, 30, 40], [8e-10, -1e-11, 2e-9, -2e-11]]).T
with option_context("display.chop_threshold", 0):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 -1.000000e-11\n"
"2 30.0 2.000000e-09\n"
"3 40.0 -2.000000e-11"
)
with option_context("display.chop_threshold", 1e-8):
assert repr(df) == (
" 0 1\n"
"0 10.0 0.000000e+00\n"
"1 20.0 0.000000e+00\n"
"2 30.0 0.000000e+00\n"
"3 40.0 0.000000e+00"
)
with option_context("display.chop_threshold", 5e-11):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 0.000000e+00\n"
"2 30.0 2.000000e-09\n"
"3 40.0 0.000000e+00"
)
def test_repr_obeys_max_seq_limit(self):
with option_context("display.max_seq_items", 2000):
assert len(printing.pprint_thing(list(range(1000)))) > 1000
with option_context("display.max_seq_items", 5):
assert len(printing.pprint_thing(list(range(1000)))) < 100
with option_context("display.max_seq_items", 1):
assert len(printing.pprint_thing(list(range(1000)))) < 9
def test_repr_set(self):
assert printing.pprint_thing({1}) == "{1}"
def test_repr_is_valid_construction_code(self):
# for the case of Index, where the repr is traditional rather than
# stylized
idx = Index(["a", "b"])
res = eval("pd." + repr(idx))
tm.assert_series_equal(Series(res), Series(idx))
def test_repr_should_return_str(self):
# https://docs.python.org/3/reference/datamodel.html#object.__repr__
# "...The return value must be a string object."
# (str on py2.x, str (unicode) on py3)
data = [8, 5, 3, 5]
index1 = ["\u03c3", "\u03c4", "\u03c5", "\u03c6"]
cols = ["\u03c8"]
df = DataFrame(data, columns=cols, index=index1)
assert type(df.__repr__()) == str # both py2 / 3
def test_repr_no_backslash(self):
with option_context("mode.sim_interactive", True):
df = DataFrame(np.random.randn(10, 4))
assert "\\" not in repr(df)
def test_expand_frame_repr(self):
df_small = DataFrame("hello", index=[0], columns=[0])
df_wide = DataFrame("hello", index=[0], columns=range(10))
df_tall = DataFrame("hello", index=range(30), columns=range(5))
with option_context("mode.sim_interactive", True):
with option_context(
"display.max_columns",
10,
"display.width",
20,
"display.max_rows",
20,
"display.show_dimensions",
True,
):
with option_context("display.expand_frame_repr", True):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_truncated_repr(df_wide)
assert has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert has_expanded_repr(df_tall)
with option_context("display.expand_frame_repr", False):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_horizontally_truncated_repr(df_wide)
assert not has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert not has_expanded_repr(df_tall)
def test_repr_non_interactive(self):
# in non interactive mode, there can be no dependency on the
# result of terminal auto size detection
df = DataFrame("hello", index=range(1000), columns=range(5))
with option_context(
"mode.sim_interactive", False, "display.width", 0, "display.max_rows", 5000
):
assert not has_truncated_repr(df)
assert not has_expanded_repr(df)
def test_repr_truncates_terminal_size(self, monkeypatch):
# see gh-21180
terminal_size = (118, 96)
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
index = range(5)
columns = MultiIndex.from_tuples(
[
("This is a long title with > 37 chars.", "cat"),
("This is a loooooonger title with > 43 chars.", "dog"),
]
)
df = DataFrame(1, index=index, columns=columns)
result = repr(df)
h1, h2 = result.split("\n")[:2]
assert "long" in h1
assert "loooooonger" in h1
assert "cat" in h2
assert "dog" in h2
# regular columns
df2 = DataFrame({"A" * 41: [1, 2], "B" * 41: [1, 2]})
result = repr(df2)
assert df2.columns[0] in result.split("\n")[0]
def test_repr_truncates_terminal_size_full(self, monkeypatch):
# GH 22984 ensure entire window is filled
terminal_size = (80, 24)
df = DataFrame(np.random.rand(1, 7))
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
assert "..." not in str(df)
def test_repr_truncation_column_size(self):
# dataframe with last column very wide -> check it is not used to
# determine size of truncation (...) column
df = DataFrame(
{
"a": [108480, 30830],
"b": [12345, 12345],
"c": [12345, 12345],
"d": [12345, 12345],
"e": ["a" * 50] * 2,
}
)
assert "..." in str(df)
assert " ... " not in str(df)
def test_repr_max_columns_max_rows(self):
term_width, term_height = get_terminal_size()
if term_width < 10 or term_height < 10:
pytest.skip(f"terminal size too small, {term_width} x {term_height}")
def mkframe(n):
index = [f"{i:05d}" for i in range(n)]
return DataFrame(0, index, index)
df6 = mkframe(6)
df10 = mkframe(10)
with option_context("mode.sim_interactive", True):
with option_context("display.width", term_width * 2):
with option_context("display.max_rows", 5, "display.max_columns", 5):
assert not has_expanded_repr(mkframe(4))
assert not has_expanded_repr(mkframe(5))
assert not has_expanded_repr(df6)
assert has_doubly_truncated_repr(df6)
with option_context("display.max_rows", 20, "display.max_columns", 10):
# Out off max_columns boundary, but no extending
# since not exceeding width
assert not has_expanded_repr(df6)
assert not has_truncated_repr(df6)
with option_context("display.max_rows", 9, "display.max_columns", 10):
# out vertical bounds can not result in expanded repr
assert not has_expanded_repr(df10)
assert has_vertically_truncated_repr(df10)
# width=None in terminal, auto detection
with option_context(
"display.max_columns",
100,
"display.max_rows",
term_width * 20,
"display.width",
None,
):
df = mkframe((term_width // 7) - 2)
assert not has_expanded_repr(df)
df = mkframe((term_width // 7) + 2)
printing.pprint_thing(df._repr_fits_horizontal_())
assert has_expanded_repr(df)
def test_repr_min_rows(self):
df = DataFrame({"a": range(20)})
# default setting no truncation even if above min_rows
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
df = DataFrame({"a": range(61)})
# default of max_rows 60 triggers truncation if above
assert ".." in repr(df)
assert ".." in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 4):
# truncated after first two rows
assert ".." in repr(df)
assert "2 " not in repr(df)
assert "..." in df._repr_html_()
assert "<td>2</td>" not in df._repr_html_()
with option_context("display.max_rows", 12, "display.min_rows", None):
# when set to None, follow value of max_rows
assert "5 5" in repr(df)
assert "<td>5</td>" in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 12):
# when set value higher as max_rows, use the minimum
assert "5 5" not in repr(df)
assert "<td>5</td>" not in df._repr_html_()
with option_context("display.max_rows", None, "display.min_rows", 12):
# max_rows of None -> never truncate
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
def test_str_max_colwidth(self):
# GH 7856
df = DataFrame(
[
{
"a": "foo",
"b": "bar",
"c": "uncomfortably long line with lots of stuff",
"d": 1,
},
{"a": "foo", "b": "bar", "c": "stuff", "d": 1},
]
)
df.set_index(["a", "b", "c"])
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably long line with lots of stuff 1\n"
"1 foo bar stuff 1"
)
with option_context("max_colwidth", 20):
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably lo... 1\n"
"1 foo bar stuff 1"
)
def test_auto_detect(self):
term_width, term_height = get_terminal_size()
fac = 1.05 # Arbitrary large factor to exceed term width
cols = range(int(term_width * fac))
index = range(10)
df = DataFrame(index=index, columns=cols)
with option_context("mode.sim_interactive", True):
with option_context("display.max_rows", None):
with option_context("display.max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
with option_context("display.max_rows", 0):
with option_context("display.max_columns", 0):
# Truncate with auto detection.
assert has_horizontally_truncated_repr(df)
index = range(int(term_height * fac))
df = DataFrame(index=index, columns=cols)
with option_context("display.max_rows", 0):
with option_context("display.max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
# Truncate vertically
assert has_vertically_truncated_repr(df)
with option_context("display.max_rows", None):
with option_context("display.max_columns", 0):
assert has_horizontally_truncated_repr(df)
def test_to_string_repr_unicode(self):
buf = StringIO()
unicode_values = ["\u03c3"] * 10
unicode_values = np.array(unicode_values, dtype=object)
df = DataFrame({"unicode": unicode_values})
df.to_string(col_space=10, buf=buf)
# it works!
repr(df)
idx = Index(["abc", "\u03c3a", "aegdvg"])
ser = Series(np.random.randn(len(idx)), idx)
rs = repr(ser).split("\n")
line_len = len(rs[0])
for line in rs[1:]:
try:
line = line.decode(get_option("display.encoding"))
except AttributeError:
pass
if not line.startswith("dtype:"):
assert len(line) == line_len
# it works even if sys.stdin in None
_stdin = sys.stdin
try:
sys.stdin = None
repr(df)
finally:
sys.stdin = _stdin
def test_east_asian_unicode_false(self):
# not aligned properly because of east asian width
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あああああ あ\n"
"bb い いいい\nc う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\nあああ あああああ あ\n"
"いいいいいい い いいい\nうう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n0 あああああ ... さ\n"
".. ... ... ...\n3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\nあああ あああああ ... さ\n"
".. ... ... ...\naaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
def test_east_asian_unicode_true(self):
# Enable Unicode option -----------------------------------------
with option_context("display.unicode.east_asian_width", True):
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\n"
"a あああああ あ\n"
"bb い いいい\n"
"c う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\n"
"あああ あああああ あ\n"
"いいいいいい い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n"
"0 あああああ ... さ\n"
".. ... ... ...\n"
"3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\n"
"あああ あああああ ... さ\n"
"... ... ... ...\n"
"aaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
# ambiguous unicode
df = DataFrame(
{"b": ["あ", "いいい", "¡¡", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "¡¡¡"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c ¡¡ 33333\n"
"¡¡¡ ええええええ 4"
)
assert repr(df) == expected
def test_to_string_buffer_all_unicode(self):
buf = StringIO()
empty = DataFrame({"c/\u03c3": Series(dtype=object)})
nonempty = DataFrame({"c/\u03c3": Series([1, 2, 3])})
print(empty, file=buf)
print(nonempty, file=buf)
# this should work
buf.getvalue()
def test_to_string_with_col_space(self):
df = DataFrame(np.random.random(size=(1, 3)))
c10 = len(df.to_string(col_space=10).split("\n")[1])
c20 = len(df.to_string(col_space=20).split("\n")[1])
c30 = len(df.to_string(col_space=30).split("\n")[1])
assert c10 < c20 < c30
# GH 8230
# col_space wasn't being applied with header=False
with_header = df.to_string(col_space=20)
with_header_row1 = with_header.splitlines()[1]
no_header = df.to_string(col_space=20, header=False)
assert len(with_header_row1) == len(no_header)
def test_to_string_with_column_specific_col_space_raises(self):
df = DataFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
msg = (
"Col_space length\\(\\d+\\) should match "
"DataFrame number of columns\\(\\d+\\)"
)
with pytest.raises(ValueError, match=msg):
df.to_string(col_space=[30, 40])
with pytest.raises(ValueError, match=msg):
df.to_string(col_space=[30, 40, 50, 60])
msg = "unknown column"
with pytest.raises(ValueError, match=msg):
df.to_string(col_space={"a": "foo", "b": 23, "d": 34})
def test_to_string_with_column_specific_col_space(self):
df = DataFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
result = df.to_string(col_space={"a": 10, "b": 11, "c": 12})
# 3 separating space + each col_space for (id, a, b, c)
assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
result = df.to_string(col_space=[10, 11, 12])
assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
def test_to_string_truncate_indices(self):
for index in [
tm.makeStringIndex,
tm.makeUnicodeIndex,
tm.makeIntIndex,
tm.makeDateIndex,
tm.makePeriodIndex,
]:
for column in [tm.makeStringIndex]:
for h in [10, 20]:
for w in [10, 20]:
with option_context("display.expand_frame_repr", False):
df = DataFrame(index=index(h), columns=column(w))
with option_context("display.max_rows", 15):
if h == 20:
assert has_vertically_truncated_repr(df)
else:
assert not has_vertically_truncated_repr(df)
with option_context("display.max_columns", 15):
if w == 20:
assert has_horizontally_truncated_repr(df)
else:
assert not (has_horizontally_truncated_repr(df))
with option_context(
"display.max_rows", 15, "display.max_columns", 15
):
if h == 20 and w == 20:
assert has_doubly_truncated_repr(df)
else:
assert not has_doubly_truncated_repr(df)
def test_to_string_truncate_multilevel(self):
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
df = DataFrame(index=arrays, columns=arrays)
with option_context("display.max_rows", 7, "display.max_columns", 7):
assert has_doubly_truncated_repr(df)
def test_truncate_with_different_dtypes(self):
# 11594, 12045
# when truncated the dtypes of the splits can differ
# 11594
import datetime
s = Series(
[datetime.datetime(2012, 1, 1)] * 10
+ [datetime.datetime(1012, 1, 2)]
+ [datetime.datetime(2012, 1, 3)] * 10
)
with option_context("display.max_rows", 8):
result = str(s)
assert "object" in result
# 12045
df = DataFrame({"text": ["some words"] + [None] * 9})
with option_context("display.max_rows", 8, "display.max_columns", 3):
result = str(df)
assert "None" in result
assert "NaN" not in result
def test_truncate_with_different_dtypes_multiindex(self):
# GH#13000
df = DataFrame({"Vals": range(100)})
frame = pd.concat([df], keys=["Sweep"], names=["Sweep", "Index"])
result = repr(frame)
result2 = repr(frame.iloc[:5])
assert result.startswith(result2)
def test_datetimelike_frame(self):
# GH 12211
df = DataFrame({"date": [Timestamp("20130101").tz_localize("UTC")] + [NaT] * 5})
with option_context("display.max_rows", 5):
result = str(df)
assert "2013-01-01 00:00:00+00:00" in result
assert "NaT" in result
assert "..." in result
assert "[6 rows x 1 columns]" in result
dts = [Timestamp("2011-01-01", tz="US/Eastern")] * 5 + [NaT] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00-05:00 1\n"
"1 2011-01-01 00:00:00-05:00 2\n"
".. ... ..\n"
"8 NaT 9\n"
"9 NaT 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [NaT] * 5 + [Timestamp("2011-01-01", tz="US/Eastern")] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 NaT 1\n"
"1 NaT 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [Timestamp("2011-01-01", tz="Asia/Tokyo")] * 5 + [
Timestamp("2011-01-01", tz="US/Eastern")
] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00+09:00 1\n"
"1 2011-01-01 00:00:00+09:00 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
@pytest.mark.parametrize(
"start_date",
[
"2017-01-01 23:59:59.999999999",
"2017-01-01 23:59:59.99999999",
"2017-01-01 23:59:59.9999999",
"2017-01-01 23:59:59.999999",
"2017-01-01 23:59:59.99999",
"2017-01-01 23:59:59.9999",
],
)
def test_datetimeindex_highprecision(self, start_date):
# GH19030
# Check that high-precision time values for the end of day are
# included in repr for DatetimeIndex
df = DataFrame({"A": date_range(start=start_date, freq="D", periods=5)})
result = str(df)
assert start_date in result
dti = date_range(start=start_date, freq="D", periods=5)
df = DataFrame({"A": range(5)}, index=dti)
result = str(df.index)
assert start_date in result
def test_nonunicode_nonascii_alignment(self):
df = DataFrame([["aa\xc3\xa4\xc3\xa4", 1], ["bbbb", 2]])
rep_str = df.to_string()
lines = rep_str.split("\n")
assert len(lines[1]) == len(lines[2])
def test_unicode_problem_decoding_as_ascii(self):
dm = DataFrame({"c/\u03c3": Series({"test": np.nan})})
str(dm.to_string())
def test_string_repr_encoding(self, datapath):
filepath = datapath("io", "parser", "data", "unicode_series.csv")
df = read_csv(filepath, header=None, encoding="latin1")
repr(df)
repr(df[1])
def test_repr_corner(self):
# representing infs poses no problems
df = DataFrame({"foo": [-np.inf, np.inf]})
repr(df)
def test_frame_info_encoding(self):
index = ["'Til There Was You (1997)", "ldum klaka (Cold Fever) (1994)"]
fmt.set_option("display.max_rows", 1)
df = DataFrame(columns=["a", "b", "c"], index=index)
repr(df)
repr(df.T)
fmt.set_option("display.max_rows", 200)
def test_wide_repr(self):
with option_context(
"mode.sim_interactive",
True,
"display.show_dimensions",
True,
"display.max_columns",
20,
):
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
assert f"10 rows x {max_cols - 1} columns" in rep_str
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 120):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_wide_columns(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
df = DataFrame(
np.random.randn(5, 3), columns=["a" * 90, "b" * 90, "c" * 90]
)
rep_str = repr(df)
assert len(rep_str.splitlines()) == 20
def test_wide_repr_named(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
df.index.name = "DataFrame Index"
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "DataFrame Index" in line
reset_option("display.expand_frame_repr")
def test_wide_repr_multiindex(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)), index=midx)
df.index.names = ["Level 0", "Level 1"]
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "Level 0 Level 1" in line
reset_option("display.expand_frame_repr")
def test_wide_repr_multiindex_cols(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = get_option("display.max_columns")
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
mcols = MultiIndex.from_arrays(tm.rands_array(3, size=(2, max_cols - 1)))
df = DataFrame(
tm.rands_array(25, (10, max_cols - 1)), index=midx, columns=mcols
)
df.index.names = ["Level 0", "Level 1"]
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150, "display.max_columns", 20):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_unicode(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = 20
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_wide_long_columns(self):
with option_context("mode.sim_interactive", True):
df = DataFrame({"a": ["a" * 30, "b" * 30], "b": ["c" * 70, "d" * 80]})
result = repr(df)
assert "ccccc" in result
assert "ddddd" in result
def test_long_series(self):
n = 1000
s = Series(
np.random.randint(-50, 50, n),
index=[f"s{x:04d}" for x in range(n)],
dtype="int64",
)
import re
str_rep = str(s)
nmatches = len(re.findall("dtype", str_rep))
assert nmatches == 1
def test_index_with_nan(self):
# GH 2850
df = DataFrame(
{
"id1": {0: "1a3", 1: "9h4"},
"id2": {0: np.nan, 1: "d67"},
"id3": {0: "78d", 1: "79d"},
"value": {0: 123, 1: 64},
}
)
# multi-index
y = df.set_index(["id1", "id2", "id3"])
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"1a3 NaN 78d 123\n9h4 d67 79d 64"
)
assert result == expected
# index
y = df.set_index("id2")
result = y.to_string()
expected = (
" id1 id3 value\nid2 \n"
"NaN 1a3 78d 123\nd67 9h4 79d 64"
)
assert result == expected
# with append (this failed in 0.12)
y = df.set_index(["id1", "id2"]).set_index("id3", append=True)
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"1a3 NaN 78d 123\n9h4 d67 79d 64"
)
assert result == expected
# all-nan in mi
df2 = df.copy()
df2.loc[:, "id2"] = np.nan
y = df2.set_index("id2")
result = y.to_string()
expected = (
" id1 id3 value\nid2 \n"
"NaN 1a3 78d 123\nNaN 9h4 79d 64"
)
assert result == expected
# partial nan in mi
df2 = df.copy()
df2.loc[:, "id2"] = np.nan
y = df2.set_index(["id2", "id3"])
result = y.to_string()
expected = (
" id1 value\nid2 id3 \n"
"NaN 78d 1a3 123\n 79d 9h4 64"
)
assert result == expected
df = DataFrame(
{
"id1": {0: np.nan, 1: "9h4"},
"id2": {0: np.nan, 1: "d67"},
"id3": {0: np.nan, 1: "79d"},
"value": {0: 123, 1: 64},
}
)
y = df.set_index(["id1", "id2", "id3"])
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"NaN NaN NaN 123\n9h4 d67 79d 64"
)
assert result == expected
def test_to_string(self):
# big mixed
biggie = DataFrame(
{"A": np.random.randn(200), "B": tm.makeStringIndex(200)},
index=np.arange(200),
)
biggie.loc[:20, "A"] = np.nan
biggie.loc[:20, "B"] = np.nan
s = biggie.to_string()
buf = StringIO()
retval = biggie.to_string(buf=buf)
assert retval is None
assert buf.getvalue() == s
assert isinstance(s, str)
# print in right order
result = biggie.to_string(
columns=["B", "A"], col_space=17, float_format="%.5f".__mod__
)
lines = result.split("\n")
header = lines[0].strip().split()
joined = "\n".join([re.sub(r"\s+", " ", x).strip() for x in lines[1:]])
recons = read_csv(StringIO(joined), names=header, header=None, sep=" ")
tm.assert_series_equal(recons["B"], biggie["B"])
assert recons["A"].count() == biggie["A"].count()
assert (np.abs(recons["A"].dropna() - biggie["A"].dropna()) < 0.1).all()
# expected = ['B', 'A']
# assert header == expected
result = biggie.to_string(columns=["A"], col_space=17)
header = result.split("\n")[0].strip().split()
expected = ["A"]
assert header == expected
biggie.to_string(columns=["B", "A"], formatters={"A": lambda x: f"{x:.1f}"})
biggie.to_string(columns=["B", "A"], float_format=str)
biggie.to_string(columns=["B", "A"], col_space=12, float_format=str)
frame = DataFrame(index=np.arange(200))
frame.to_string()
def test_to_string_no_header(self):
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(header=False)
expected = "0 1 4\n1 2 5\n2 3 6"
assert df_s == expected
def test_to_string_specified_header(self):
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(header=["X", "Y"])
expected = " X Y\n0 1 4\n1 2 5\n2 3 6"
assert df_s == expected
msg = "Writing 2 cols but got 1 aliases"
with pytest.raises(ValueError, match=msg):
df.to_string(header=["X"])
def test_to_string_no_index(self):
# GH 16839, GH 13032
df = DataFrame({"x": [11, 22], "y": [33, -44], "z": ["AAA", " "]})
df_s = df.to_string(index=False)
# Leading space is expected for positive numbers.
expected = " x y z\n11 33 AAA\n22 -44 "
assert df_s == expected
df_s = df[["y", "x", "z"]].to_string(index=False)
expected = " y x z\n 33 11 AAA\n-44 22 "
assert df_s == expected
def test_to_string_line_width_no_index(self):
# GH 13998, GH 22505
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 1 \n 2 \n 3 \n\n y \n 4 \n 5 \n 6 "
assert df_s == expected
df = DataFrame({"x": [11, 22, 33], "y": [4, 5, 6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n11 \n22 \n33 \n\n y \n 4 \n 5 \n 6 "
assert df_s == expected
df = DataFrame({"x": [11, 22, -33], "y": [4, 5, -6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 11 \n 22 \n-33 \n\n y \n 4 \n 5 \n-6 "
assert df_s == expected
def test_to_string_float_formatting(self):
tm.reset_display_options()
fmt.set_option(
"display.precision",
5,
"display.column_space",
12,
"display.notebook_repr_html",
False,
)
df = DataFrame(
{"x": [0, 0.25, 3456.000, 12e45, 1.64e6, 1.7e8, 1.253456, np.pi, -1e6]}
)
df_s = df.to_string()
if _three_digit_exp():
expected = (
" x\n0 0.00000e+000\n1 2.50000e-001\n"
"2 3.45600e+003\n3 1.20000e+046\n4 1.64000e+006\n"
"5 1.70000e+008\n6 1.25346e+000\n7 3.14159e+000\n"
"8 -1.00000e+006"
)
else:
expected = (
" x\n0 0.00000e+00\n1 2.50000e-01\n"
"2 3.45600e+03\n3 1.20000e+46\n4 1.64000e+06\n"
"5 1.70000e+08\n6 1.25346e+00\n7 3.14159e+00\n"
"8 -1.00000e+06"
)
assert df_s == expected
df = DataFrame({"x": [3234, 0.253]})
df_s = df.to_string()
expected = " x\n0 3234.000\n1 0.253"
assert df_s == expected
tm.reset_display_options()
assert get_option("display.precision") == 6
df = DataFrame({"x": [1e9, 0.2512]})
df_s = df.to_string()
if _three_digit_exp():
expected = " x\n0 1.000000e+009\n1 2.512000e-001"
else:
expected = " x\n0 1.000000e+09\n1 2.512000e-01"
assert df_s == expected
def test_to_string_float_format_no_fixed_width(self):
# GH 21625
df = DataFrame({"x": [0.19999]})
expected = " x\n0 0.200"
assert df.to_string(float_format="%.3f") == expected
# GH 22270
df = DataFrame({"x": [100.0]})
expected = " x\n0 100"
assert df.to_string(float_format="%.0f") == expected
def test_to_string_small_float_values(self):
df = DataFrame({"a": [1.5, 1e-17, -5.5e-7]})
result = df.to_string()
# sadness per above
if _three_digit_exp():
expected = (
" a\n"
"0 1.500000e+000\n"
"1 1.000000e-017\n"
"2 -5.500000e-007"
)
else:
expected = (
" a\n"
"0 1.500000e+00\n"
"1 1.000000e-17\n"
"2 -5.500000e-07"
)
assert result == expected
# but not all exactly zero
df = df * 0
result = df.to_string()
expected = " 0\n0 0\n1 0\n2 -0"
def test_to_string_float_index(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(np.arange(5), index=index)
result = df.to_string()
expected = " 0\n1.5 0\n2.0 1\n3.0 2\n4.0 3\n5.0 4"
assert result == expected
def test_to_string_complex_float_formatting(self):
# GH #25514, 25745
with option_context("display.precision", 5):
df = DataFrame(
{
"x": [
(0.4467846931321966 + 0.0715185102060818j),
(0.2739442392974528 + 0.23515228785438969j),
(0.26974928742135185 + 0.3250604054898979j),
(-1j),
]
}
)
result = df.to_string()
expected = (
" x\n0 0.44678+0.07152j\n"
"1 0.27394+0.23515j\n"
"2 0.26975+0.32506j\n"
"3 -0.00000-1.00000j"
)
assert result == expected
def test_to_string_ascii_error(self):
data = [
(
"0 ",
" .gitignore ",
" 5 ",
" \xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2",
)
]
df = DataFrame(data)
# it works!
repr(df)
def test_to_string_int_formatting(self):
df = DataFrame({"x": [-15, 20, 25, -35]})
assert issubclass(df["x"].dtype.type, np.integer)
output = df.to_string()
expected = " x\n0 -15\n1 20\n2 25\n3 -35"
assert output == expected
def test_to_string_index_formatter(self):
df = DataFrame([range(5), range(5, 10), range(10, 15)])
rs = df.to_string(formatters={"__index__": lambda x: "abc"[x]})
xp = """\
0 1 2 3 4
a 0 1 2 3 4
b 5 6 7 8 9
c 10 11 12 13 14\
"""
assert rs == xp
def test_to_string_left_justify_cols(self):
tm.reset_display_options()
df = DataFrame({"x": [3234, 0.253]})
df_s = df.to_string(justify="left")
expected = " x \n0 3234.000\n1 0.253"
assert df_s == expected
def test_to_string_format_na(self):
tm.reset_display_options()
df = DataFrame(
{
"A": [np.nan, -1, -2.1234, 3, 4],
"B": [np.nan, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 NaN NaN\n"
"1 -1.0000 foo\n"
"2 -2.1234 foooo\n"
"3 3.0000 fooooo\n"
"4 4.0000 bar"
)
assert result == expected
df = DataFrame(
{
"A": [np.nan, -1.0, -2.0, 3.0, 4.0],
"B": [np.nan, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 NaN NaN\n"
"1 -1.0 foo\n"
"2 -2.0 foooo\n"
"3 3.0 fooooo\n"
"4 4.0 bar"
)
assert result == expected
def test_to_string_format_inf(self):
# Issue #24861
tm.reset_display_options()
df = DataFrame(
{
"A": [-np.inf, np.inf, -1, -2.1234, 3, 4],
"B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 -inf -inf\n"
"1 inf inf\n"
"2 -1.0000 foo\n"
"3 -2.1234 foooo\n"
"4 3.0000 fooooo\n"
"5 4.0000 bar"
)
assert result == expected
df = DataFrame(
{
"A": [-np.inf, np.inf, -1.0, -2.0, 3.0, 4.0],
"B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 -inf -inf\n"
"1 inf inf\n"
"2 -1.0 foo\n"
"3 -2.0 foooo\n"
"4 3.0 fooooo\n"
"5 4.0 bar"
)
assert result == expected
def test_to_string_decimal(self):
# Issue #23614
df = DataFrame({"A": [6.0, 3.1, 2.2]})
expected = " A\n0 6,0\n1 3,1\n2 2,2"
assert df.to_string(decimal=",") == expected
def test_to_string_line_width(self):
df = DataFrame(123, index=range(10, 15), columns=range(30))
s = df.to_string(line_width=80)
assert max(len(line) for line in s.split("\n")) == 80
def test_show_dimensions(self):
df = DataFrame(123, index=range(10, 15), columns=range(30))
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
True,
):
assert "5 rows" in str(df)
assert "5 rows" in df._repr_html_()
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
False,
):
assert "5 rows" not in str(df)
assert "5 rows" not in df._repr_html_()
with option_context(
"display.max_rows",
2,
"display.max_columns",
2,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
"truncate",
):
assert "5 rows" in str(df)
assert "5 rows" in df._repr_html_()
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
"truncate",
):
assert "5 rows" not in str(df)
assert "5 rows" not in df._repr_html_()
def test_repr_html(self, float_frame):
df = float_frame
df._repr_html_()
fmt.set_option("display.max_rows", 1, "display.max_columns", 1)
df._repr_html_()
fmt.set_option("display.notebook_repr_html", False)
df._repr_html_()
tm.reset_display_options()
df = DataFrame([[1, 2], [3, 4]])
fmt.set_option("display.show_dimensions", True)
assert "2 rows" in df._repr_html_()
fmt.set_option("display.show_dimensions", False)
assert "2 rows" not in df._repr_html_()
tm.reset_display_options()
def test_repr_html_mathjax(self):
df = DataFrame([[1, 2], [3, 4]])
assert "tex2jax_ignore" not in df._repr_html_()
with option_context("display.html.use_mathjax", False):
assert "tex2jax_ignore" in df._repr_html_()
def test_repr_html_wide(self):
max_cols = 20
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." not in df._repr_html_()
wide_df = DataFrame(tm.rands_array(25, size=(10, max_cols + 1)))
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in wide_df._repr_html_()
def test_repr_html_wide_multiindex_cols(self):
max_cols = 20
mcols = MultiIndex.from_product(
[np.arange(max_cols // 2), ["foo", "bar"]], names=["first", "second"]
)
df = DataFrame(tm.rands_array(25, size=(10, len(mcols))), columns=mcols)
reg_repr = df._repr_html_()
assert "..." not in reg_repr
mcols = MultiIndex.from_product(
(np.arange(1 + (max_cols // 2)), ["foo", "bar"]), names=["first", "second"]
)
df = DataFrame(tm.rands_array(25, size=(10, len(mcols))), columns=mcols)
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in df._repr_html_()
def test_repr_html_long(self):
with option_context("display.max_rows", 60):
max_rows = get_option("display.max_rows")
h = max_rows - 1
df = DataFrame({"A": np.arange(1, 1 + h), "B": np.arange(41, 41 + h)})
reg_repr = df._repr_html_()
assert ".." not in reg_repr
assert str(41 + max_rows // 2) in reg_repr
h = max_rows + 1
df = DataFrame({"A": np.arange(1, 1 + h), "B": np.arange(41, 41 + h)})
long_repr = df._repr_html_()
assert ".." in long_repr
assert str(41 + max_rows // 2) not in long_repr
assert f"{h} rows " in long_repr
assert "2 columns" in long_repr
def test_repr_html_float(self):
with option_context("display.max_rows", 60):
max_rows = get_option("display.max_rows")
h = max_rows - 1
df = DataFrame(
{
"idx": np.linspace(-10, 10, h),
"A": np.arange(1, 1 + h),
"B": np.arange(41, 41 + h),
}
).set_index("idx")
reg_repr = df._repr_html_()
assert ".." not in reg_repr
assert f"<td>{40 + h}</td>" in reg_repr
h = max_rows + 1
df = DataFrame(
{
"idx": np.linspace(-10, 10, h),
"A": np.arange(1, 1 + h),
"B": np.arange(41, 41 + h),
}
).set_index("idx")
long_repr = df._repr_html_()
assert ".." in long_repr
assert "<td>31</td>" not in long_repr
assert f"{h} rows " in long_repr
assert "2 columns" in long_repr
def test_repr_html_long_multiindex(self):
max_rows = 60
max_L1 = max_rows // 2
tuples = list(itertools.product(np.arange(max_L1), ["foo", "bar"]))
idx = MultiIndex.from_tuples(tuples, names=["first", "second"])
df = DataFrame(np.random.randn(max_L1 * 2, 2), index=idx, columns=["A", "B"])
with option_context("display.max_rows", 60, "display.max_columns", 20):
reg_repr = df._repr_html_()
assert "..." not in reg_repr
tuples = list(itertools.product(np.arange(max_L1 + 1), ["foo", "bar"]))
idx = MultiIndex.from_tuples(tuples, names=["first", "second"])
df = DataFrame(
np.random.randn((max_L1 + 1) * 2, 2), index=idx, columns=["A", "B"]
)
long_repr = df._repr_html_()
assert "..." in long_repr
def test_repr_html_long_and_wide(self):
max_cols = 20
max_rows = 60
h, w = max_rows - 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." not in df._repr_html_()
h, w = max_rows + 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in df._repr_html_()
def test_info_repr(self):
# GH#21746 For tests inside a terminal (i.e. not CI) we need to detect
# the terminal size to ensure that we try to print something "too big"
term_width, term_height = get_terminal_size()
max_rows = 60
max_cols = 20 + (max(term_width, 80) - 80) // 4
# Long
h, w = max_rows + 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert has_vertically_truncated_repr(df)
with option_context("display.large_repr", "info"):
assert has_info_repr(df)
# Wide
h, w = max_rows - 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert has_horizontally_truncated_repr(df)
with option_context(
"display.large_repr", "info", "display.max_columns", max_cols
):
assert has_info_repr(df)
def test_info_repr_max_cols(self):
# GH #6939
df = DataFrame(np.random.randn(10, 5))
with option_context(
"display.large_repr",
"info",
"display.max_columns",
1,
"display.max_info_columns",
4,
):
assert has_non_verbose_info_repr(df)
with option_context(
"display.large_repr",
"info",
"display.max_columns",
1,
"display.max_info_columns",
5,
):
assert not has_non_verbose_info_repr(df)
# test verbose overrides
# fmt.set_option('display.max_info_columns', 4) # exceeded
def test_info_repr_html(self):
max_rows = 60
max_cols = 20
# Long
h, w = max_rows + 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert r"<class" not in df._repr_html_()
with option_context("display.large_repr", "info"):
assert r"<class" in df._repr_html_()
# Wide
h, w = max_rows - 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert "<class" not in df._repr_html_()
with option_context(
"display.large_repr", "info", "display.max_columns", max_cols
):
assert "<class" in df._repr_html_()
def test_fake_qtconsole_repr_html(self, float_frame):
df = float_frame
def get_ipython():
return {"config": {"KernelApp": {"parent_appname": "ipython-qtconsole"}}}
repstr = df._repr_html_()
assert repstr is not None
fmt.set_option("display.max_rows", 5, "display.max_columns", 2)
repstr = df._repr_html_()
assert "class" in repstr # info fallback
tm.reset_display_options()
def test_pprint_pathological_object(self):
"""
If the test fails, it at least won't hang.
"""
class A:
def __getitem__(self, key):
return 3 # obviously simplified
df = DataFrame([A()])
repr(df) # just don't die
def test_float_trim_zeros(self):
vals = [
2.08430917305e10,
3.52205017305e10,
2.30674817305e10,
2.03954217305e10,
5.59897817305e10,
]
skip = True
for line in repr(DataFrame({"A": vals})).split("\n")[:-2]:
if line.startswith("dtype:"):
continue
if _three_digit_exp():
assert ("+010" in line) or skip
else:
assert ("+10" in line) or skip
skip = False
@pytest.mark.parametrize(
"data, expected",
[
(["3.50"], "0 3.50\ndtype: object"),
([1.20, "1.00"], "0 1.2\n1 1.00\ndtype: object"),
([np.nan], "0 NaN\ndtype: float64"),
([None], "0 None\ndtype: object"),
(["3.50", np.nan], "0 3.50\n1 NaN\ndtype: object"),
([3.50, np.nan], "0 3.5\n1 NaN\ndtype: float64"),
([3.50, np.nan, "3.50"], "0 3.5\n1 NaN\n2 3.50\ndtype: object"),
([3.50, None, "3.50"], "0 3.5\n1 None\n2 3.50\ndtype: object"),
],
)
def test_repr_str_float_truncation(self, data, expected):
# GH#38708
series = Series(data)
result = repr(series)
assert result == expected
@pytest.mark.parametrize(
"float_format,expected",
[
("{:,.0f}".format, "0 1,000\n1 test\ndtype: object"),
("{:.4f}".format, "0 1000.0000\n1 test\ndtype: object"),
],
)
def test_repr_float_format_in_object_col(self, float_format, expected):
# GH#40024
df = Series([1000.0, "test"])
with option_context("display.float_format", float_format):
result = repr(df)
assert result == expected
def test_dict_entries(self):
df = DataFrame({"A": [{"a": 1, "b": 2}]})
val = df.to_string()
assert "'a': 1" in val
assert "'b': 2" in val
def test_categorical_columns(self):
# GH35439
data = [[4, 2], [3, 2], [4, 3]]
cols = ["aaaaaaaaa", "b"]
df = DataFrame(data, columns=cols)
df_cat_cols = DataFrame(data, columns=pd.CategoricalIndex(cols))
assert df.to_string() == df_cat_cols.to_string()
def test_period(self):
# GH 12615
df = DataFrame(
{
"A": pd.period_range("2013-01", periods=4, freq="M"),
"B": [
pd.Period("2011-01", freq="M"),
pd.Period("2011-02-01", freq="D"),
pd.Period("2011-03-01 09:00", freq="H"),
pd.Period("2011-04", freq="M"),
],
"C": list("abcd"),
}
)
exp = (
" A B C\n"
"0 2013-01 2011-01 a\n"
"1 2013-02 2011-02-01 b\n"
"2 2013-03 2011-03-01 09:00 c\n"
"3 2013-04 2011-04 d"
)
assert str(df) == exp
@pytest.mark.parametrize(
"length, max_rows, min_rows, expected",
[
(10, 10, 10, 10),
(10, 10, None, 10),
(10, 8, None, 8),
(20, 30, 10, 30), # max_rows > len(frame), hence max_rows
(50, 30, 10, 10), # max_rows < len(frame), hence min_rows
(100, 60, 10, 10), # same
(60, 60, 10, 60), # edge case
(61, 60, 10, 10), # edge case
],
)
def test_max_rows_fitted(self, length, min_rows, max_rows, expected):
"""Check that display logic is correct.
GH #37359
See description here:
https://pandas.pydata.org/docs/dev/user_guide/options.html#frequently-used-options
"""
formatter = fmt.DataFrameFormatter(
DataFrame(np.random.rand(length, 3)),
max_rows=max_rows,
min_rows=min_rows,
)
result = formatter.max_rows_fitted
assert result == expected
def gen_series_formatting():
s1 = Series(["a"] * 100)
s2 = Series(["ab"] * 100)
s3 = Series(["a", "ab", "abc", "abcd", "abcde", "abcdef"])
s4 = s3[::-1]
test_sers = {"onel": s1, "twol": s2, "asc": s3, "desc": s4}
return test_sers
class TestSeriesFormatting:
def setup_method(self, method):
self.ts = tm.makeTimeSeries()
def test_repr_unicode(self):
s = Series(["\u03c3"] * 10)
repr(s)
a = Series(["\u05d0"] * 1000)
a.name = "title1"
repr(a)
def test_to_string(self):
buf = StringIO()
s = self.ts.to_string()
retval = self.ts.to_string(buf=buf)
assert retval is None
assert buf.getvalue().strip() == s
# pass float_format
format = "%.4f".__mod__
result = self.ts.to_string(float_format=format)
result = [x.split()[1] for x in result.split("\n")[:-1]]
expected = [format(x) for x in self.ts]
assert result == expected
# empty string
result = self.ts[:0].to_string()
assert result == "Series([], Freq: B)"
result = self.ts[:0].to_string(length=0)
assert result == "Series([], Freq: B)"
# name and length
cp = self.ts.copy()
cp.name = "foo"
result = cp.to_string(length=True, name=True, dtype=True)
last_line = result.split("\n")[-1].strip()
assert last_line == (f"Freq: B, Name: foo, Length: {len(cp)}, dtype: float64")
def test_freq_name_separation(self):
s = Series(
np.random.randn(10), index=date_range("1/1/2000", periods=10), name=0
)
result = repr(s)
assert "Freq: D, Name: 0" in result
def test_to_string_mixed(self):
s = Series(["foo", np.nan, -1.23, 4.56])
result = s.to_string()
expected = "0 foo\n" + "1 NaN\n" + "2 -1.23\n" + "3 4.56"
assert result == expected
# but don't count NAs as floats
s = Series(["foo", np.nan, "bar", "baz"])
result = s.to_string()
expected = "0 foo\n" + "1 NaN\n" + "2 bar\n" + "3 baz"
assert result == expected
s = Series(["foo", 5, "bar", "baz"])
result = s.to_string()
expected = "0 foo\n" + "1 5\n" + "2 bar\n" + "3 baz"
assert result == expected
def test_to_string_float_na_spacing(self):
s = Series([0.0, 1.5678, 2.0, -3.0, 4.0])
s[::2] = np.nan
result = s.to_string()
expected = (
"0 NaN\n"
+ "1 1.5678\n"
+ "2 NaN\n"
+ "3 -3.0000\n"
+ "4 NaN"
)
assert result == expected
def test_to_string_without_index(self):
# GH 11729 Test index=False option
s = Series([1, 2, 3, 4])
result = s.to_string(index=False)
expected = "1\n" + "2\n" + "3\n" + "4"
assert result == expected
def test_unicode_name_in_footer(self):
s = Series([1, 2], name="\u05e2\u05d1\u05e8\u05d9\u05ea")
sf = fmt.SeriesFormatter(s, name="\u05e2\u05d1\u05e8\u05d9\u05ea")
sf._get_footer() # should not raise exception
def test_east_asian_unicode_series(self):
# not aligned properly because of east asian width
# unicode index
s = Series(["a", "bb", "CCC", "D"], index=["あ", "いい", "ううう", "ええええ"])
expected = "あ a\nいい bb\nううう CCC\nええええ D\ndtype: object"
assert repr(s) == expected
# unicode values
s = Series(["あ", "いい", "ううう", "ええええ"], index=["a", "bb", "c", "ddd"])
expected = "a あ\nbb いい\nc ううう\nddd ええええ\ndtype: object"
assert repr(s) == expected
# both
s = Series(["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"])
expected = (
"ああ あ\nいいいい いい\nう ううう\nえええ ええええ\ndtype: object"
)
assert repr(s) == expected
# unicode footer
s = Series(
["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"], name="おおおおおおお"
)
expected = (
"ああ あ\nいいいい いい\nう ううう\n"
"えええ ええええ\nName: おおおおおおお, dtype: object"
)
assert repr(s) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
s = Series([1, 22, 3333, 44444], index=idx)
expected = (
"あ いい 1\n"
"う え 22\n"
"おおお かかかか 3333\n"
"き くく 44444\ndtype: int64"
)
assert repr(s) == expected
# object dtype, shorter than unicode repr
s = Series([1, 22, 3333, 44444], index=[1, "AB", np.nan, "あああ"])
expected = (
"1 1\nAB 22\nNaN 3333\nあああ 44444\ndtype: int64"
)
assert repr(s) == expected
# object dtype, longer than unicode repr
s = Series(
[1, 22, 3333, 44444], index=[1, "AB", Timestamp("2011-01-01"), "あああ"]
)
expected = (
"1 1\n"
"AB 22\n"
"2011-01-01 00:00:00 3333\n"
"あああ 44444\ndtype: int64"
)
assert repr(s) == expected
# truncate
with option_context("display.max_rows", 3):
s = Series(["あ", "いい", "ううう", "ええええ"], name="おおおおおおお")
expected = (
"0 あ\n ... \n"
"3 ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
s.index = ["ああ", "いいいい", "う", "えええ"]
expected = (
"ああ あ\n ... \n"
"えええ ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
# Enable Unicode option -----------------------------------------
with option_context("display.unicode.east_asian_width", True):
# unicode index
s = Series(["a", "bb", "CCC", "D"], index=["あ", "いい", "ううう", "ええええ"])
expected = (
"あ a\nいい bb\nううう CCC\n"
"ええええ D\ndtype: object"
)
assert repr(s) == expected
# unicode values
s = Series(["あ", "いい", "ううう", "ええええ"], index=["a", "bb", "c", "ddd"])
expected = (
"a あ\nbb いい\nc ううう\n"
"ddd ええええ\ndtype: object"
)
assert repr(s) == expected
# both
s = Series(["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"])
expected = (
"ああ あ\n"
"いいいい いい\n"
"う ううう\n"
"えええ ええええ\ndtype: object"
)
assert repr(s) == expected
# unicode footer
s = Series(
["あ", "いい", "ううう", "ええええ"],
index=["ああ", "いいいい", "う", "えええ"],
name="おおおおおおお",
)
expected = (
"ああ あ\n"
"いいいい いい\n"
"う ううう\n"
"えええ ええええ\n"
"Name: おおおおおおお, dtype: object"
)
assert repr(s) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
s = Series([1, 22, 3333, 44444], index=idx)
expected = (
"あ いい 1\n"
"う え 22\n"
"おおお かかかか 3333\n"
"き くく 44444\n"
"dtype: int64"
)
assert repr(s) == expected
# object dtype, shorter than unicode repr
s = Series([1, 22, 3333, 44444], index=[1, "AB", np.nan, "あああ"])
expected = (
"1 1\nAB 22\nNaN 3333\n"
"あああ 44444\ndtype: int64"
)
assert repr(s) == expected
# object dtype, longer than unicode repr
s = Series(
[1, 22, 3333, 44444],
index=[1, "AB", Timestamp("2011-01-01"), "あああ"],
)
expected = (
"1 1\n"
"AB 22\n"
"2011-01-01 00:00:00 3333\n"
"あああ 44444\ndtype: int64"
)
assert repr(s) == expected
# truncate
with option_context("display.max_rows", 3):
s = Series(["あ", "いい", "ううう", "ええええ"], name="おおおおおおお")
expected = (
"0 あ\n ... \n"
"3 ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
s.index = ["ああ", "いいいい", "う", "えええ"]
expected = (
"ああ あ\n"
" ... \n"
"えええ ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
# ambiguous unicode
s = Series(
["¡¡", "い¡¡", "ううう", "ええええ"], index=["ああ", "¡¡¡¡いい", "¡¡", "えええ"]
)
expected = (
"ああ ¡¡\n"
"¡¡¡¡いい い¡¡\n"
"¡¡ ううう\n"
"えええ ええええ\ndtype: object"
)
assert repr(s) == expected
def test_float_trim_zeros(self):
vals = [
2.08430917305e10,
3.52205017305e10,
2.30674817305e10,
2.03954217305e10,
5.59897817305e10,
]
for line in repr(Series(vals)).split("\n"):
if line.startswith("dtype:"):
continue
if _three_digit_exp():
assert "+010" in line
else:
assert "+10" in line
def test_datetimeindex(self):
index = date_range("20130102", periods=6)
s = Series(1, index=index)
result = s.to_string()
assert "2013-01-02" in result
# nat in index
s2 = Series(2, index=[Timestamp("20130111"), NaT])
s = s2.append(s)
result = s.to_string()
assert "NaT" in result
# nat in summary
result = str(s2.index)
assert "NaT" in result
@pytest.mark.parametrize(
"start_date",
[
"2017-01-01 23:59:59.999999999",
"2017-01-01 23:59:59.99999999",
"2017-01-01 23:59:59.9999999",
"2017-01-01 23:59:59.999999",
"2017-01-01 23:59:59.99999",
"2017-01-01 23:59:59.9999",
],
)
def test_datetimeindex_highprecision(self, start_date):
# GH19030
# Check that high-precision time values for the end of day are
# included in repr for DatetimeIndex
s1 = Series(date_range(start=start_date, freq="D", periods=5))
result = str(s1)
assert start_date in result
dti = date_range(start=start_date, freq="D", periods=5)
s2 = Series(3, index=dti)
result = str(s2.index)
assert start_date in result
def test_timedelta64(self):
from datetime import (
datetime,
timedelta,
)
Series(np.array([1100, 20], dtype="timedelta64[ns]")).to_string()
s = Series(date_range("2012-1-1", periods=3, freq="D"))
# GH2146
# adding NaTs
y = s - s.shift(1)
result = y.to_string()
assert "1 days" in result
assert "00:00:00" not in result
assert "NaT" in result
# with frac seconds
o = Series([datetime(2012, 1, 1, microsecond=150)] * 3)
y = s - o
result = y.to_string()
assert "-1 days +23:59:59.999850" in result
# rounding?
o = Series([datetime(2012, 1, 1, 1)] * 3)
y = s - o
result = y.to_string()
assert "-1 days +23:00:00" in result
assert "1 days 23:00:00" in result
o = Series([datetime(2012, 1, 1, 1, 1)] * 3)
y = s - o
result = y.to_string()
assert "-1 days +22:59:00" in result
assert "1 days 22:59:00" in result
o = Series([datetime(2012, 1, 1, 1, 1, microsecond=150)] * 3)
y = s - o
result = y.to_string()
assert "-1 days +22:58:59.999850" in result
assert "0 days 22:58:59.999850" in result
# neg time
td = timedelta(minutes=5, seconds=3)
s2 = Series(date_range("2012-1-1", periods=3, freq="D")) + td
y = s - s2
result = y.to_string()
assert "-1 days +23:54:57" in result
td = timedelta(microseconds=550)
s2 = Series(date_range("2012-1-1", periods=3, freq="D")) + td
y = s - td
result = y.to_string()
assert "2012-01-01 23:59:59.999450" in result
# no boxing of the actual elements
td = Series(pd.timedelta_range("1 days", periods=3))
result = td.to_string()
assert result == "0 1 days\n1 2 days\n2 3 days"
def test_mixed_datetime64(self):
df = DataFrame({"A": [1, 2], "B": ["2012-01-01", "2012-01-02"]})
df["B"] = pd.to_datetime(df.B)
result = repr(df.loc[0])
assert "2012-01-01" in result
def test_period(self):
# GH 12615
index = pd.period_range("2013-01", periods=6, freq="M")
s = Series(np.arange(6, dtype="int64"), index=index)
exp = (
"2013-01 0\n"
"2013-02 1\n"
"2013-03 2\n"
"2013-04 3\n"
"2013-05 4\n"
"2013-06 5\n"
"Freq: M, dtype: int64"
)
assert str(s) == exp
s = Series(index)
exp = (
"0 2013-01\n"
"1 2013-02\n"
"2 2013-03\n"
"3 2013-04\n"
"4 2013-05\n"
"5 2013-06\n"
"dtype: period[M]"
)
assert str(s) == exp
# periods with mixed freq
s = Series(
[
pd.Period("2011-01", freq="M"),
pd.Period("2011-02-01", freq="D"),
pd.Period("2011-03-01 09:00", freq="H"),
]
)
exp = (
"0 2011-01\n1 2011-02-01\n"
"2 2011-03-01 09:00\ndtype: object"
)
assert str(s) == exp
def test_max_multi_index_display(self):
# GH 7101
# doc example (indexing.rst)
# multi-index
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
tuples = list(zip(*arrays))
index = MultiIndex.from_tuples(tuples, names=["first", "second"])
s = Series(np.random.randn(8), index=index)
with option_context("display.max_rows", 10):
assert len(str(s).split("\n")) == 10
with option_context("display.max_rows", 3):
assert len(str(s).split("\n")) == 5
with option_context("display.max_rows", 2):
assert len(str(s).split("\n")) == 5
with option_context("display.max_rows", 1):
assert len(str(s).split("\n")) == 4
with option_context("display.max_rows", 0):
assert len(str(s).split("\n")) == 10
# index
s = Series(np.random.randn(8), None)
with option_context("display.max_rows", 10):
assert len(str(s).split("\n")) == 9
with option_context("display.max_rows", 3):
assert len(str(s).split("\n")) == 4
with option_context("display.max_rows", 2):
assert len(str(s).split("\n")) == 4
with option_context("display.max_rows", 1):
assert len(str(s).split("\n")) == 3
with option_context("display.max_rows", 0):
assert len(str(s).split("\n")) == 9
# Make sure #8532 is fixed
def test_consistent_format(self):
s = Series([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9999, 1, 1] * 10)
with option_context("display.max_rows", 10, "display.show_dimensions", False):
res = repr(s)
exp = (
"0 1.0000\n1 1.0000\n2 1.0000\n3 "
"1.0000\n4 1.0000\n ... \n125 "
"1.0000\n126 1.0000\n127 0.9999\n128 "
"1.0000\n129 1.0000\ndtype: float64"
)
assert res == exp
def chck_ncols(self, s):
with option_context("display.max_rows", 10):
res = repr(s)
lines = res.split("\n")
lines = [
line for line in repr(s).split("\n") if not re.match(r"[^\.]*\.+", line)
][:-1]
ncolsizes = len({len(line.strip()) for line in lines})
assert ncolsizes == 1
def test_format_explicit(self):
test_sers = gen_series_formatting()
with option_context("display.max_rows", 4, "display.show_dimensions", False):
res = repr(test_sers["onel"])
exp = "0 a\n1 a\n ..\n98 a\n99 a\ndtype: object"
assert exp == res
res = repr(test_sers["twol"])
exp = "0 ab\n1 ab\n ..\n98 ab\n99 ab\ndtype: object"
assert exp == res
res = repr(test_sers["asc"])
exp = (
"0 a\n1 ab\n ... \n4 abcde\n5 "
"abcdef\ndtype: object"
)
assert exp == res
res = repr(test_sers["desc"])
exp = (
"5 abcdef\n4 abcde\n ... \n1 ab\n0 "
"a\ndtype: object"
)
assert exp == res
def test_ncols(self):
test_sers = gen_series_formatting()
for s in test_sers.values():
self.chck_ncols(s)
def test_max_rows_eq_one(self):
s = Series(range(10), dtype="int64")
with option_context("display.max_rows", 1):
strrepr = repr(s).split("\n")
exp1 = ["0", "0"]
res1 = strrepr[0].split()
assert exp1 == res1
exp2 = [".."]
res2 = strrepr[1].split()
assert exp2 == res2
def test_truncate_ndots(self):
def getndots(s):
return len(re.match(r"[^\.]*(\.*)", s).groups()[0])
s = Series([0, 2, 3, 6])
with option_context("display.max_rows", 2):
strrepr = repr(s).replace("\n", "")
assert getndots(strrepr) == 2
s = Series([0, 100, 200, 400])
with option_context("display.max_rows", 2):
strrepr = repr(s).replace("\n", "")
assert getndots(strrepr) == 3
def test_show_dimensions(self):
# gh-7117
s = Series(range(5))
assert "Length" not in repr(s)
with option_context("display.max_rows", 4):
assert "Length" in repr(s)
with option_context("display.show_dimensions", True):
assert "Length" in repr(s)
with option_context("display.max_rows", 4, "display.show_dimensions", False):
assert "Length" not in repr(s)
def test_repr_min_rows(self):
s = Series(range(20))
# default setting no truncation even if above min_rows
assert ".." not in repr(s)
s = Series(range(61))
# default of max_rows 60 triggers truncation if above
assert ".." in repr(s)
with option_context("display.max_rows", 10, "display.min_rows", 4):
# truncated after first two rows
assert ".." in repr(s)
assert "2 " not in repr(s)
with option_context("display.max_rows", 12, "display.min_rows", None):
# when set to None, follow value of max_rows
assert "5 5" in repr(s)
with option_context("display.max_rows", 10, "display.min_rows", 12):
# when set value higher as max_rows, use the minimum
assert "5 5" not in repr(s)
with option_context("display.max_rows", None, "display.min_rows", 12):
# max_rows of None -> never truncate
assert ".." not in repr(s)
def test_to_string_name(self):
s = Series(range(100), dtype="int64")
s.name = "myser"
res = s.to_string(max_rows=2, name=True)
exp = "0 0\n ..\n99 99\nName: myser"
assert res == exp
res = s.to_string(max_rows=2, name=False)
exp = "0 0\n ..\n99 99"
assert res == exp
def test_to_string_dtype(self):
s = Series(range(100), dtype="int64")
res = s.to_string(max_rows=2, dtype=True)
exp = "0 0\n ..\n99 99\ndtype: int64"
assert res == exp
res = s.to_string(max_rows=2, dtype=False)
exp = "0 0\n ..\n99 99"
assert res == exp
def test_to_string_length(self):
s = Series(range(100), dtype="int64")
res = s.to_string(max_rows=2, length=True)
exp = "0 0\n ..\n99 99\nLength: 100"
assert res == exp
def test_to_string_na_rep(self):
s = Series(index=range(100), dtype=np.float64)
res = s.to_string(na_rep="foo", max_rows=2)
exp = "0 foo\n ..\n99 foo"
assert res == exp
def test_to_string_float_format(self):
s = Series(range(10), dtype="float64")
res = s.to_string(float_format=lambda x: f"{x:2.1f}", max_rows=2)
exp = "0 0.0\n ..\n9 9.0"
assert res == exp
def test_to_string_header(self):
s = Series(range(10), dtype="int64")
s.index.name = "foo"
res = s.to_string(header=True, max_rows=2)
exp = "foo\n0 0\n ..\n9 9"
assert res == exp
res = s.to_string(header=False, max_rows=2)
exp = "0 0\n ..\n9 9"
assert res == exp
def test_to_string_multindex_header(self):
# GH 16718
df = DataFrame({"a": [0], "b": [1], "c": [2], "d": [3]}).set_index(["a", "b"])
res = df.to_string(header=["r1", "r2"])
exp = " r1 r2\na b \n0 1 2 3"
assert res == exp
def test_to_string_empty_col(self):
# GH 13653
s = Series(["", "Hello", "World", "", "", "Mooooo", "", ""])
res = s.to_string(index=False)
exp = " \n Hello\n World\n \n \nMooooo\n \n "
assert re.match(exp, res)
class TestGenericArrayFormatter:
def test_1d_array(self):
# GenericArrayFormatter is used on types for which there isn't a dedicated
# formatter. np.bool_ is one of those types.
obj = fmt.GenericArrayFormatter(np.array([True, False]))
res = obj.get_result()
assert len(res) == 2
# Results should be right-justified.
assert res[0] == " True"
assert res[1] == " False"
def test_2d_array(self):
obj = fmt.GenericArrayFormatter(np.array([[True, False], [False, True]]))
res = obj.get_result()
assert len(res) == 2
assert res[0] == " [True, False]"
assert res[1] == " [False, True]"
def test_3d_array(self):
obj = fmt.GenericArrayFormatter(
np.array([[[True, True], [False, False]], [[False, True], [True, False]]])
)
res = obj.get_result()
assert len(res) == 2
assert res[0] == " [[True, True], [False, False]]"
assert res[1] == " [[False, True], [True, False]]"
def test_2d_extension_type(self):
# GH 33770
# Define a stub extension type with just enough code to run Series.__repr__()
class DtypeStub(pd.api.extensions.ExtensionDtype):
@property
def type(self):
return np.ndarray
@property
def name(self):
return "DtypeStub"
class ExtTypeStub(pd.api.extensions.ExtensionArray):
def __len__(self):
return 2
def __getitem__(self, ix):
return [ix == 1, ix == 0]
@property
def dtype(self):
return DtypeStub()
series = Series(ExtTypeStub())
res = repr(series) # This line crashed before #33770 was fixed.
expected = "0 [False True]\n" + "1 [ True False]\n" + "dtype: DtypeStub"
assert res == expected
def _three_digit_exp():
return f"{1.7e8:.4g}" == "1.7e+008"
class TestFloatArrayFormatter:
def test_misc(self):
obj = fmt.FloatArrayFormatter(np.array([], dtype=np.float64))
result = obj.get_result()
assert len(result) == 0
def test_format(self):
obj = fmt.FloatArrayFormatter(np.array([12, 0], dtype=np.float64))
result = obj.get_result()
assert result[0] == " 12.0"
assert result[1] == " 0.0"
def test_output_display_precision_trailing_zeroes(self):
# Issue #20359: trimming zeros while there is no decimal point
# Happens when display precision is set to zero
with option_context("display.precision", 0):
s = Series([840.0, 4200.0])
expected_output = "0 840\n1 4200\ndtype: float64"
assert str(s) == expected_output
def test_output_significant_digits(self):
# Issue #9764
# In case default display precision changes:
with option_context("display.precision", 6):
# DataFrame example from issue #9764
d = DataFrame(
{
"col1": [
9.999e-8,
1e-7,
1.0001e-7,
2e-7,
4.999e-7,
5e-7,
5.0001e-7,
6e-7,
9.999e-7,
1e-6,
1.0001e-6,
2e-6,
4.999e-6,
5e-6,
5.0001e-6,
6e-6,
]
}
)
expected_output = {
(0, 6): " col1\n"
"0 9.999000e-08\n"
"1 1.000000e-07\n"
"2 1.000100e-07\n"
"3 2.000000e-07\n"
"4 4.999000e-07\n"
"5 5.000000e-07",
(1, 6): " col1\n"
"1 1.000000e-07\n"
"2 1.000100e-07\n"
"3 2.000000e-07\n"
"4 4.999000e-07\n"
"5 5.000000e-07",
(1, 8): " col1\n"
"1 1.000000e-07\n"
"2 1.000100e-07\n"
"3 2.000000e-07\n"
"4 4.999000e-07\n"
"5 5.000000e-07\n"
"6 5.000100e-07\n"
"7 6.000000e-07",
(8, 16): " col1\n"
"8 9.999000e-07\n"
"9 1.000000e-06\n"
"10 1.000100e-06\n"
"11 2.000000e-06\n"
"12 4.999000e-06\n"
"13 5.000000e-06\n"
"14 5.000100e-06\n"
"15 6.000000e-06",
(9, 16): " col1\n"
"9 0.000001\n"
"10 0.000001\n"
"11 0.000002\n"
"12 0.000005\n"
"13 0.000005\n"
"14 0.000005\n"
"15 0.000006",
}
for (start, stop), v in expected_output.items():
assert str(d[start:stop]) == v
def test_too_long(self):
# GH 10451
with option_context("display.precision", 4):
# need both a number > 1e6 and something that normally formats to
# having length > display.precision + 6
df = DataFrame({"x": [12345.6789]})
assert str(df) == " x\n0 12345.6789"
df = DataFrame({"x": [2e6]})
assert str(df) == " x\n0 2000000.0"
df = DataFrame({"x": [12345.6789, 2e6]})
assert str(df) == " x\n0 1.2346e+04\n1 2.0000e+06"
class TestRepr_timedelta64:
def test_none(self):
delta_1d = pd.to_timedelta(1, unit="D")
delta_0d = pd.to_timedelta(0, unit="D")
delta_1s = pd.to_timedelta(1, unit="s")
delta_500ms = pd.to_timedelta(500, unit="ms")
drepr = lambda x: x._repr_base()
assert drepr(delta_1d) == "1 days"
assert drepr(-delta_1d) == "-1 days"
assert drepr(delta_0d) == "0 days"
assert drepr(delta_1s) == "0 days 00:00:01"
assert drepr(delta_500ms) == "0 days 00:00:00.500000"
assert drepr(delta_1d + delta_1s) == "1 days 00:00:01"
assert drepr(-delta_1d + delta_1s) == "-1 days +00:00:01"
assert drepr(delta_1d + delta_500ms) == "1 days 00:00:00.500000"
assert drepr(-delta_1d + delta_500ms) == "-1 days +00:00:00.500000"
def test_sub_day(self):
delta_1d = pd.to_timedelta(1, unit="D")
delta_0d = pd.to_timedelta(0, unit="D")
delta_1s = pd.to_timedelta(1, unit="s")
delta_500ms = pd.to_timedelta(500, unit="ms")
drepr = lambda x: x._repr_base(format="sub_day")
assert drepr(delta_1d) == "1 days"
assert drepr(-delta_1d) == "-1 days"
assert drepr(delta_0d) == "00:00:00"
assert drepr(delta_1s) == "00:00:01"
assert drepr(delta_500ms) == "00:00:00.500000"
assert drepr(delta_1d + delta_1s) == "1 days 00:00:01"
assert drepr(-delta_1d + delta_1s) == "-1 days +00:00:01"
assert drepr(delta_1d + delta_500ms) == "1 days 00:00:00.500000"
assert drepr(-delta_1d + delta_500ms) == "-1 days +00:00:00.500000"
def test_long(self):
delta_1d = pd.to_timedelta(1, unit="D")
delta_0d =
|
pd.to_timedelta(0, unit="D")
|
pandas.to_timedelta
|
# Author: <NAME>
# Feel free to use, copy, distribute or modify the Python Script, licensed under MIT License.
# Please ensure to provide proper credit for the same.
import streamlit as st
import pandas as pd
from csv import DictWriter
from datetime import datetime
import states_data
st.header('India Fights Covid-19')
st.write("Let's save our families and friends together!")
st.write("")
st.info("Click the TOP LEFT BAR / PANE to view the options.")
states = states_data.states_data()
def state_data(key):
states_list = list(states.keys())
state_selection = st.selectbox('States & Union Territories', options=states_list, key=key)
district_lists = list(states[state_selection].keys())
district_selection = st.selectbox('District', options=district_lists, key=key)
cities = st.selectbox('Cities', options=list(states[state_selection][district_selection]), key=key)
return state_selection, district_selection, cities
# 1. STATES IMPORTANT LINKS
st.write("---")
st.sidebar.subheader("Links & Helpline Number")
states_imp_links = {
"": "",
"National Links": {
"Links": {
"Cipla Med Access": "https://www.cipla.com/",
"Dr. Reddy's COVID-19 Med Access": "https://readytofightcovid.in/",
"Pan India Plasma Resources": "https://covidplasma.online/",
"COVID-19 Resources Citizen's Compiled Data- 1": "https://docs.google.com/spreadsheets/d/1mrlaZg8jvduKcxvCWs"
"-phAdltgBmY3lTOFTgH4-SLzY/edit#gid=2047572279",
"COVID-19 Resources Citizen's Compiled Data- 2": "https://docs.google.com/spreadsheets/d"
"/1fHiBtzxBC_3Q7I5SXr_GpNA4ivT73w4W4hjK6IkGDBY/edit#gid"
"=1482732175",
"COVID-19 Resources Citizen's Compiled Data- 3": "https://shubhamkarjos.github.io/WebDev/Covid/Covid-Help"
"/main.html "
},
"COVID Helpline Number": "+911123978046",
},
"Andaman & Nicobar Islands": {
# "Links": {
#
# },
"COVID Helpline Number": "03192232102",
},
"Andhra Pradesh": {
"Links": {
"COVID-19 AP": "http://dashboard.covid19.ap.gov.in/ims/hospbed_reports/",
},
"COVID Helpline Number": "08662410978",
},
"Arunachal Pradesh": {
# "Links": {
#
# },
"COVID Helpline Number": "9436055743",
},
"Assam": {
# "Links": {
#
# },
"COVID Helpline Number": "6913347770",
},
"Bihar": {
# "Links": {
#
# },
"COVID Helpline Number": "104",
},
"Chandigarh": {
# "Links": {
#
# },
"COVID Helpline Number": "9779558282",
},
"Chhattisgarh": {
"Links": {
"COVID-19 Chattisgarh": "https://cg.nic.in/health/covid19/RTPBedAvailable.aspx",
},
"COVID Helpline Number": "07712235091, 104",
},
"Dadra & Nagar Haveli & Daman & Diu": {
# "Links": {
#
# },
"COVID Helpline Number": "104",
},
"Delhi": {
"Links": {
"COVID-19 Delhi": "https://coronabeds.jantasamvad.org/beds.html",
},
"COVID Helpline Number": "01122307145",
},
"Goa": {
# "Links": {
#
# },
"COVID Helpline Number": "104",
},
"Gujarat": {
"Links": {
"COVID-19 GandhiNagar": "https://vmc.gov.in/HospitalModuleGMC/BedDetails.aspx?HOSP_ID=HOS00041",
"COVID-19 Vadodara": "https://vmc.gov.in/Covid19VadodaraApp/HospitalBedsDetails.aspx?tid=1",
"COVID-19 Resources Citizen's Compiled Data- 1": "https://docs.google.com/spreadsheets/d"
"/1ZyrYsowjk6PdC9N5yKBxMslI7FypoeIqDvlAYrqprL8/edit#gid=0 "
},
"COVID Helpline Number": "104",
},
"Haryana": {
"Links": {
"COVID-19 Haryana": "https://coronaharyana.in/",
},
"COVID Helpline Number": "8558893911",
},
"Himachal Pradesh": {
# "Links": {
#
# },
"COVID Helpline Number": "104",
},
"Jammu & Kashmir": {
# "Links": {
#
# },
"COVID Helpline Number": "01912520982",
},
"Jharkhand": {
# "Links": {
#
# },
"COVID Helpline Number": "104",
},
"Karnataka": {
"Links": {
"COVID-19 Bangalore": "https://docs.google.com/spreadsheets/u/1/d/e/2PACX-1vS-ipQLaCHZ8id4t4_NHf1FM4vQmBGQrGHAPFzNzJeuuGKsY_It6Tdb0Un_bC9gmig5G2dVxlXHoaEp/pubhtml?gid=1381543057&single=true",
},
"COVID Helpline Number": "104",
},
"Kerala": {
# "Links": {
#
# },
"COVID Helpline Number": "04712552056",
},
"Ladakh": {
# "Links": {
#
# },
"COVID Helpline Number": "01982256462",
},
"Lakshadweep": {
# "Links": {
#
# },
"COVID Helpline Number": "104",
},
"Madhya Pradesh": {
# "Links": {
#
# },
"COVID Helpline Number": "07552527177",
},
"Maharashtra": {
"Links": {
"COVID-19 Nagpur": "http://nsscdcl.org/covidbeds/AvailableHospitals.jsp",
"COVID-19 Panvel": "https://covidbedpanvel.in/HospitalInfo/showindex",
"COVID-19 Pune": "https://covidpune.com/",
"COVID-19 UlhasNagar": "https://umccovidbed.in/HospitalInfo/showindex",
"COVID-19 Thane": "https://covidbedthane.in/HospitalInfo/showindex",
},
"COVID Helpline Number": "02026127394",
},
"Manipur": {
# "Links": {
#
# },
"COVID Helpline Number": "3852411668",
},
"Meghalaya": {
# "Links": {
#
# },
"COVID Helpline Number": "108",
},
"Mizoram": {
# "Links": {
#
# },
"COVID Helpline Number": "102",
},
"Nagaland": {
# "Links": {
#
# },
"COVID Helpline Number": "7005539653",
},
"Odisha (Orissa)": {
# "Links": {
#
# },
"COVID Helpline Number": "9439994859",
},
"Puducherry (Pondicherry)": {
# "Links": {
#
# },
"COVID Helpline Number": "104",
},
"Punjab": {
"Links": {
"COVID-19 Ludhiana": "http://hbmsludhiana.in/index_app_detail.php?type=all",
}
},
"Rajasthan": {
"Links": {
"COVID-19 Rajasthan": "https://covidinfo.rajasthan.gov.in/covid-isolation-hospital.aspx",
},
"COVID Helpline Number": "01412225624",
},
"Sikkim": {
# "Links": {
#
# },
"COVID Helpline Number": "104",
},
"Tamil Nadu": {
"Links": {
"COVID-19 TN": "https://stopcorona.tn.gov.in/beds.php",
},
"COVID Helpline Number": "04429510500",
},
"Telangana": {
# "Links": {
#
# },
"COVID Helpline Number": "104",
},
"Tripura": {
# "Links": {
#
# },
"COVID Helpline Number": "03812315879",
},
"Uttarakhand (Uttaranchal)": {
# "Links": {
#
# },
"COVID Helpline Number": "104",
},
"Uttar Pradesh": {
"Links": {
"COVID-19 Lucknow": "https://docs.google.com/spreadsheets/d/1roxOi2_Uw4YBzLd5s8vC8cp6lbuM9016tWeWTcx2q5Y"
"/edit#gid=0 "
},
"COVID Helpline Number": "18001805145",
},
"West Bengal": {
# "Links": {
#
# },
"COVID Helpline Number": "3323412600",
},
}
select_state = st.sidebar.selectbox("", list(states_imp_links.keys()))
st.write(states_imp_links[select_state])
st.sidebar.subheader("Offering or Required Assistance? ")
person_kind = st.sidebar.selectbox("", ["Please Select", "Providing Help!", "Need Your Help!"])
# 2. PROVIDING HELP
if person_kind == "Providing Help!":
st.write("------------")
st.write("Thank you for being a potential life saver.")
st.write("Please provide correct information to the best of your knowledge.")
st.write("")
st.subheader("Volunteer for or Add a Lead:")
requirement = st.selectbox("", ["Please Select", "Ambulance Services", "Child Care", "Home Visit",
"Hospital Beds", "Medicine", "Oxygen Cylinders", "Plasma", "Others"])
# 2.1 PROVIDING HELP: AMBULANCE SERVICES
if requirement == "Ambulance Services":
contact_person = st.text_input('Contact Person: ')
st.subheader("Contact Number: Format: 9876543210, PLEASE DO NOT PREFIX +91.")
contact_information = st.text_input('Contact Number: ')
st.write("---")
st.subheader("Provide Pickup Location: ")
pickup_location_state, pickup_location_district, pickup_location_city = state_data(key="provider_pickup")
st.write("---")
st.subheader("Provide Drop Location: ")
drop_location_state, drop_location_district, drop_location_city = state_data(key="provider_drop")
verified_lead = st.selectbox("Verified: ", ["Yes", "No"])
additional_notes = st.text_input('Additional Notes: ')
created_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
updated_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
submit_info = st.button('Submit the info!', key="provider_drop")
if submit_info:
if not contact_person or not contact_information:
st.write("Please provide the necessary information."
" Contact Name & Mobile Number Info is necessary!")
else:
field_names = ["Contact Person", "Contact Mobile Number",
"Pickup: State", "Pickup: District", "Pickup: City",
"Drop: State", "Drop: District", "Drop: City", "Verified", "Additional Notes",
"Created Time", "Updated Time"]
dict_data = {"Contact Person": contact_person,
"Contact Mobile Number": contact_information,
"Pickup: State": pickup_location_state,
"Pickup: District": pickup_location_district,
"Pickup: City": pickup_location_city,
"Drop: State": drop_location_state,
"Drop: District": drop_location_district,
"Drop: City": drop_location_city,
"Verified": verified_lead,
"Additional Notes": additional_notes,
"Created Time": created_time,
"Updated Time": updated_time
}
with open('./ambulance_service_provider.csv', 'a') as f_object:
dictwriter_object = DictWriter(f_object, fieldnames=field_names)
dictwriter_object.writerow(dict_data)
f_object.close()
st.success('Information saved successfully. Thank you for being a helping hand at this time:)')
# 2.2 PROVIDING HELP: CHILD CARE
elif requirement == "Child Care":
contact_person = st.text_input('Contact Person: ')
contact_information = st.text_input('Contact Number: ')
state, district, city = state_data("provider_child_care")
verified_lead = st.selectbox("Verified: ", ["Yes", "No"])
additional_notes = st.text_input('Additional Notes: ')
created_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
updated_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
submit_info = st.button('Submit the info!', key="provider_child_care")
if submit_info:
if not contact_person or not contact_information:
st.write("Please provide the necessary information."
" Contact Name & Mobile Number Info is necessary!")
else:
field_names = ["Contact Person", "Contact Mobile Number",
"State", "District", "City", "Verified", "Additional Notes",
"Created Time", "Updated Time"]
dict_data = {"Contact Person": contact_person,
"Contact Mobile Number": contact_information,
"State": state,
"District": district,
"City": city,
"Verified": verified_lead,
"Additional Notes": additional_notes,
"Created Time": created_time,
"Updated Time": updated_time
}
with open('./child_care_provider.csv', 'a') as f_object:
dictwriter_object = DictWriter(f_object, fieldnames=field_names)
dictwriter_object.writerow(dict_data)
f_object.close()
st.success('Information saved successfully. Thank you for being a helping hand at this time:)')
# 2.3 PROVIDING HELP: HOME VISIT
elif requirement == "Home Visit":
contact_person = st.text_input('Contact Person: ')
contact_information = st.text_input('Enter Contact Number: ')
state, district, city = state_data("provider_home_visit")
verified_lead = st.selectbox("Verified: ", ["Yes", "No"])
additional_notes = st.text_input('Additional Notes: ')
created_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
updated_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
submit_info = st.button('Submit the info!', key="provider_home_visit")
if submit_info:
if not contact_person or not contact_information:
st.write("Please provide the necessary information."
" Contact Name & Mobile Number Info is necessary!")
else:
field_names = ["Contact Person", "Contact Mobile Number",
"State", "District", "City", "Verified", "Additional Notes",
"Created Time", "Updated Time"]
dict_data = {"Contact Person": contact_person,
"Contact Mobile Number": contact_information,
"State": state,
"District": district,
"City": city,
"Verified": verified_lead,
"Additional Notes": additional_notes,
"Created Time": created_time,
"Updated Time": updated_time
}
with open('./home_visit_provider.csv', 'a') as f_object:
dictwriter_object = DictWriter(f_object, fieldnames=field_names)
dictwriter_object.writerow(dict_data)
f_object.close()
st.success('Information saved successfully. Thank you for being a helping hand at this time:)')
# 2.4 PROVIDING HELP: HOSPITAL BEDS
elif requirement == "Hospital Beds":
contact_person = st.text_input('Contact Name or Doctor Name: ')
contact_information = st.text_input('Mobile Number: ')
hospital_name = st.text_input('Hospital Name: ')
hospital_address = st.text_input('Hospital Address: ')
state, district, city = state_data("provider_hospital_beds")
total_bed_count = st.text_input("Total Bed Count: ")
oxygen_bed_count = st.text_input("Oxygen Bed Count: ")
icu_bed_count = st.text_input("ICU Bed Count: ")
ventilator_bed_count = st.text_input("Ventilator Bed Count: ")
verified_lead = st.selectbox("Verified: ", ["Yes", "No"])
additional_notes = st.text_input('Additional Notes: ')
created_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
updated_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
submit_info = st.button('Submit the info!', key="provider_hospital_beds")
if submit_info:
field_names = ["Contact Person", "Contact Mobile Number", "Hospital Name", "Hospital Address",
"State", "District", "City", "Total Bed Count",
"Oxygen Bed Count", "ICU Bed Count", "Ventilator Bed Count", "Verified", "Additional Notes",
"Created Time", "Updated Time"]
dict_data = {"Contact Person": contact_person,
"Contact Mobile Number": contact_information,
"Hospital Name": hospital_name,
"Hospital Address": hospital_address,
"State": state,
"District": district,
"City": city,
"Total Bed Count": total_bed_count,
"Oxygen Bed Count": oxygen_bed_count,
"ICU Bed Count": icu_bed_count,
"Ventilator Bed Count": ventilator_bed_count,
"Verified": verified_lead,
"Additional Notes": additional_notes,
"Created Time": created_time,
"Updated Time": updated_time
}
with open('./hospital_bed_provider.csv', 'a') as f_object:
dictwriter_object = DictWriter(f_object, fieldnames=field_names)
dictwriter_object.writerow(dict_data)
f_object.close()
st.success('Information saved successfully. Thank you for being a helping hand at this time:)')
# 2.5 PROVIDING HELP: MEDICINES
elif requirement == "Medicine":
contact_person = st.text_input('Distributor / Retailer Name: ')
medicine_name = st.text_input('Medicine Name: ')
state, district, city = state_data(key="provider_medicine")
address = st.text_input('Distributor / Retailer Address: ')
contact_information = st.text_input('Contact Mobile Number: ')
verified_lead = st.selectbox("Verified: ", ["Yes", "No"])
additional_notes = st.text_input('Additional Notes: ')
created_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
updated_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
submit_info = st.button('Submit the info!')
if submit_info:
if not contact_person or not address:
st.write("Please provide the necessary information."
" Distributor / Retailer Name and Address Info is necessary!")
else:
field_names = ["Distributor Name", "Medicine Name",
"State", "District", "City", "Address", "Contact Number", "Verified", "Additional Notes",
"Created Time", "Updated Time"]
dict_data = {"Distributor Name": contact_person,
"Medicine Name": medicine_name,
"State": state,
"District": district,
"City": city,
"Address": address,
"Contact Number": contact_information,
"Verified": verified_lead,
"Additional Notes": additional_notes,
"Created Time": created_time,
"Updated Time": updated_time
}
with open('./medicines_provider.csv', 'a') as f_object:
dictwriter_object = DictWriter(f_object, fieldnames=field_names)
dictwriter_object.writerow(dict_data)
f_object.close()
st.success('Information saved successfully. Thank you for being a helping hand at this time:)')
# 2.6 PROVIDING HELP: OXYGEN CYLINDERS
elif requirement == "Oxygen Cylinders":
contact_person = st.text_input('Contact Name: ')
contact_information = st.text_input('Contact Mobile Number: ')
just_refill = st.selectbox("Just refilling?", ["Yes", "No"])
start_timings = st.time_input('Start Timing: ')
end_timings = st.time_input('End Timing: ')
availability_for = st.selectbox('Availability for', ["Home", "Hospitals", "Home & Hospitals"])
address = st.text_input('Address: ')
state, district, city = state_data(key="provider_oxygen_cylinders")
verified_lead = st.selectbox("Verified: ", ["Yes", "No"])
additional_notes = st.text_input('Additional Notes: ')
created_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
updated_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
submit_info = st.button('Submit the info!')
if submit_info:
if not contact_person or not contact_information:
st.write("Please provide the necessary information so that we can help together!"
" Contact Name & Mobile Number Info is necessary!")
else:
field_names = ["Contact Person", "Contact Mobile Number", "Just Refill", "Start Timings", "End Timings",
"Availability for", "Address", "State", "District", "City", "Verified",
"Additional Notes", "Created Time", "Updated Time"]
dict_data = {"Contact Person": contact_person,
"Contact Mobile Number": contact_information,
"Just Refill": just_refill,
"Start Timings": start_timings,
"End Timings": end_timings,
"Availability for": availability_for,
"Address": address,
"State": state,
"District": district,
"City": city,
"Verified": verified_lead,
"Additional Notes": additional_notes,
"Created Time": created_time,
"Updated Time": updated_time
}
with open('./oxygen_cylinders_provider.csv', 'a') as f_object:
dictwriter_object = DictWriter(f_object, fieldnames=field_names)
dictwriter_object.writerow(dict_data)
f_object.close()
st.success('Information saved successfully. Thank you for being a helping hand at this time:)')
# 2.7 PROVIDING HELP: PLASMA
elif requirement == "Plasma":
contact_person = st.text_input('Donor Name: ')
contact_information = st.text_input('Donor Contact Number: ')
contact_age = st.text_input("Donor Age: ")
blood_group = st.selectbox('Patient Blood Group: ', ['Please Select', 'A+', 'A-', 'B+', 'B-', 'AB+',
'AB-', 'O+', 'O-', 'Bombay Blood Group'])
recovered_date = st.date_input('Enter the date of recovery: ')
state, district, city = state_data(key="provider_plasma")
donated_before = st.selectbox("Have you donated it before?", ["Yes", "No"])
if donated_before == "Yes":
last_donated_date = st.date_input("Last Donated Date: ")
else:
last_donated_date = ""
antibodies_test = st.selectbox("Tested for antibodies yet?", ["Yes", "No"])
medical_issues = st.text_input("Any chronic disease such as high B.P., Diabetes etc.: ")
verified_lead = st.selectbox("Verified: ", ["Yes", "No"])
additional_notes = st.text_input('Additional Notes: ')
created_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
updated_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
submit_info = st.button('Submit the info!', key="provider_plasma")
if submit_info:
if not contact_person or not contact_information or not blood_group or not contact_age \
or not recovered_date or not donated_before:
st.write("Please provide the necessary information so that we can help together!"
" Donor Name, Mobile Number, Age, Blood Group, Recovered Date, "
"and Donated Before Info is necessary!")
else:
field_names = ["Donor Name", "Donor Contact Number",
"Donor Age", "Donor Blood Group", "Recovered Date",
"State", "District", "City", "Donated Before", "Last Donated Date",
"Antibodies Test", "Medical Issues", "Verified", "Additional Notes",
"Created Time", "Updated Time"]
dict_data = {"Donor Name": contact_person,
"Donor Contact Number": contact_information,
"Donor Age": contact_age,
"Donor Blood Group": blood_group,
"Recovered Date": recovered_date,
"State": state,
"District": district,
"City": city,
"Donated Before": donated_before,
"Last Donated Date": last_donated_date,
"Antibodies Test": antibodies_test,
"Medical Issues": medical_issues,
"Verified": verified_lead,
"Additional Notes": additional_notes,
"Created Time": created_time,
"Updated Time": updated_time
}
with open('./plasma_provider.csv', 'a') as f_object:
dictwriter_object = DictWriter(f_object, fieldnames=field_names)
dictwriter_object.writerow(dict_data)
f_object.close()
st.success('Information saved successfully. Thank you for being a helping hand at this time:)')
# 2.8 PROVIDING HELP: OTHERS
elif requirement == "Others":
text = st.text_input('Write others: ')
verified_lead = st.selectbox("Verified: ", ["Yes", "No"])
created_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
updated_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
submit_info = st.button('Submit the info!', key="provider_others")
if submit_info:
field_names = ["Text", "Verified",
"Created Time", "Updated Time"]
dict_data = {"Text": text,
"Verified": verified_lead,
"Created Time": created_time,
"Updated Time": updated_time}
with open('./others_service_provider.csv', 'a') as f_object:
dictwriter_object = DictWriter(f_object, fieldnames=field_names)
dictwriter_object.writerow(dict_data)
f_object.close()
st.success('Information saved successfully. Thank you for being a helping hand at this time:)')
# 3. NEED ASSISTANCE
elif person_kind == "Need Your Help!":
st.write("------------")
st.write("I'm trying my best to keep the webpage updated. Kindly please share with others so more data and "
"verified leads can be collected and the resources be made available to the needful people.")
requirement = st.selectbox("Need List", ["Ambulance Services", "Child Care", "Home Visit",
"Hospital Beds", "Medicine", "Oxygen Cylinders", "Plasma", "Others"])
# 3.1 ASSISTANCE: AMBULANCE SERVICES / HOSPITAL BED / PLASMA
if requirement == "Ambulance Services" or requirement == "Hospital Beds" or requirement == "Plasma" \
or requirement == "Oxygen Cylinders":
patient_name = st.text_input('Patient Name: ')
contact_information = st.text_input('Patient Mobile Number: ')
patient_age = st.text_input('Patient Age: ')
patient_sex = st.selectbox('Patient Sex: ', ['Male', 'Female', 'Transgender'])
patient_condition = st.selectbox('Patient Condition: ', ['Stable', 'SOS'])
assistance_for = st.selectbox("Assistance For: ", ["Ambulance Services", "Hospital Beds", "Oxygen Cylinder",
"Oxygen Cylinder Refill", "Plasma"])
if assistance_for == "Ambulance Services":
facilities = st.selectbox("Facilities: ", ["Normal", "Oxygen without AC", "Oxygen with AC", "Ventilator"])
else:
facilities = ""
patient_blood_group = st.selectbox('Patient Blood Group: ', ['Please Select', 'A+', 'A-', 'B+', 'B-', 'AB+',
'AB-', 'O+', 'O-', 'Bombay Blood Group'])
if assistance_for == "Hospital Beds":
bed_type = st.selectbox("Bed Type: ", ["Without Oxygen", "With Oxygen", "Ventilalor Bed"])
else:
bed_type = ""
patient_oxygen_level = st.text_input("Patient Oxygen Level")
state, district, city = state_data(key="assist_ambulance")
address = st.text_input('Patient Address: ')
additional_notes = st.text_input('Additional Notes: ')
status = st.selectbox("Status", ["", "Resolved"])
created_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
updated_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
submit_info = st.button('Submit the info!')
if submit_info:
if not patient_name or not contact_information or not assistance_for:
st.write("Please provide the necessary information."
" Patient Name, Mobile Number and Assistance For Info is necessary!")
else:
field_names = ["Patient Name", "Patient Mobile Number", "Patient Age", "Patient Sex", "Patient Condition",
"Assistance For", "Facilities", "Bed Type", "Patient Blood Group", "Patient Oxygen Level",
"State", "District", "City", "Address", "Additional Notes", "Status",
"Created Time", "Updated Time"]
dict_data = {"Patient Name": patient_name,
"Patient Mobile Number": contact_information,
"Patient Age": patient_age,
"Patient Sex": patient_sex,
"Patient Condition": patient_condition,
"Assistance For": assistance_for,
"Facilities": facilities,
"Bed Type": bed_type,
"Patient Blood Group": patient_blood_group,
"Patient Oxygen Level": patient_oxygen_level,
"State": state,
"District": district,
"City": city,
"Address": address,
"Additional Notes": additional_notes,
"Status": status,
"Created Time": created_time,
"Updated Time": updated_time
}
with open('./critical_assistance.csv', 'a') as f_object:
dictwriter_object = DictWriter(f_object, fieldnames=field_names)
dictwriter_object.writerow(dict_data)
f_object.close()
st.success('Information saved successfully. Please keep rechecking the page:)')
# 3.2 ASSISTANCE: MEDICINE
elif requirement == "Medicine":
state = list(state_data(key="medicine_assistance"))
df = pd.read_csv("./medicines_provider.csv")
state_retailers_data = df[df["State"] == state[0]]
st.table(state_retailers_data)
# for iterate in range(retailers_count[0]):
# retailer_data = state_retailers_data.iloc[iterate, :]
# data_to_df = pd.DataFrame(retailer_data, columns=[0])
# retailer_info = data_to_df.dropna()
# st.write(retailer_info)
# 3.3 ASSISTANCE: HOME VISIT / CHILD CARE
elif requirement == "Home Visit" or requirement == "Child Care":
contact_person = st.text_input('Patient Name: ')
contact_information = st.text_input('Patient Mobile Number: ')
patient_age = st.text_input('Patient Age: ')
patient_sex = st.selectbox('Patient Sex: ', ['Male', 'Female', 'Transgender'])
patient_condition = st.selectbox('Patient Condition: ', ['Stable', 'SOS'])
assistance_for = st.selectbox("Assistance For: ", ["Home Visit", "Child Care"])
state, district, city = state_data(key="assist_home_visit")
address = st.text_input('Patient Address: ')
additional_notes = st.text_input('Additional Notes: ')
status = st.selectbox("Status", ["", "Resolved"])
created_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
updated_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
submit_info = st.button('Submit the info!')
if submit_info:
if not contact_person or not contact_information or not assistance_for:
st.write("Please provide the necessary information."
" Patient Name, Mobile Number and Assistance For Info is necessary!")
else:
field_names = ["Patient Name", "Patient Mobile Number", "Patient Age", "Patient Sex", "Patient Condition",
"Assistance For", "State", "District", "City", "Address", "Additional Notes", "Status",
"Created Time", "Updated Time"]
dict_data = {"Patient Name": contact_person,
"Patient Mobile Number": contact_information,
"Patient Age": patient_age,
"Patient Sex": patient_sex,
"Patient Condition": patient_condition,
"Assistance For": assistance_for,
"State": state,
"District": district,
"City": city,
"Address": address,
"Additional Notes": additional_notes,
"Status": status,
"Created Time": created_time,
"Updated Time": updated_time
}
with open('./home_personal_assistance.csv', 'a') as f_object:
dictwriter_object = DictWriter(f_object, fieldnames=field_names)
dictwriter_object.writerow(dict_data)
f_object.close()
st.success('Information saved successfully. Please keep rechecking the page:)')
# 2.4 ASSISTANCE: OTHERS
elif requirement == "Others":
text = st.text_input('Write others: ')
status = st.selectbox("Status", ["", "Resolved"])
created_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
updated_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
submit_info = st.button('Submit the info!')
if submit_info:
field_names = ["Text", "Status"
"Created Time", "Updated Time"]
dict_data = {"Text": text,
"Status": status,
"Created Time": created_time,
"Updated Time": updated_time}
with open('./others_service_assistance.csv', 'a') as f_object:
dictwriter_object = DictWriter(f_object, fieldnames=field_names)
dictwriter_object.writerow(dict_data)
f_object.close()
st.success('Information saved successfully. Please keep rechecking the page:)')
# 4. UPDATE THE DATA
st.sidebar.subheader("Update the Data!")
data_type = st.sidebar.selectbox("", ["Please Select",
"Need Assistance: Ambulance Services", "Providing Assistance: Ambulance Services",
"Need Assistance: Child Care", "Providing Assistance: Child Care",
"Need Assistance: Home Visit", "Providing Assistance: Home Visit",
"Need Assistance: Hospital Beds", "Providing Assistance: Hospital Beds",
"Providing Assistance: Medicine",
"Need Assistance: Oxygen Cylinders", "Providing Assistance: Oxygen Cylinders",
"Need Assistance: Plasma", "Providing Assistance: Plasma",
])
# 4.1 UPDATE: NEED ASSISTANCE: HOME VISIT AND CHILD CARE
if data_type == "Need Assistance: Home Visit" or data_type == "Need Assistance: Child Care":
df = pd.read_csv("./home_personal_assistance.csv")
patient_name = st.text_input('Patient Name: ')
patient_mobile_number = st.text_input('Patient Mobile Number: ')
assistance_for = st.selectbox("Assistance For: ", ["Home Visit", "Child Care"])
if patient_name and patient_mobile_number and assistance_for:
patient_mobile_number = int(patient_mobile_number)
field_names = ["Patient Name", "Patient Mobile Number", "Patient Age", "Patient Sex", "Patient Condition",
"Assistance For", "State", "District", "City", "Address", "Additional Notes", "Status"]
select_field = st.selectbox("Select Info to be updated:", field_names)
updated_data = st.text_input("Updated Info: ")
df.loc[(df["Patient Name"] == patient_name) & (df["Patient Mobile Number"] == patient_mobile_number)
& (df["Assistance For"] == assistance_for), select_field] = updated_data
df.loc[(df["Patient Name"] == patient_name) & (df["Patient Mobile Number"] == patient_mobile_number)
& (df["Assistance For"] == assistance_for), "Updated Time"] = datetime.now().strftime(
"%d/%m/%Y %H:%M:%S")
st.subheader("Verify the details below before submitting the updated data:")
submit_info = st.button('Submit the info!')
if submit_info:
df.to_csv("./home_personal_assistance.csv", index=False)
st.success('Information updated successfully. Please keep rechecking the page:)')
# 4.2 UPDATE: NEED ASSISTANCE: AMBULANCE SERVICES, HOSPITAL BEDS, PLASMA, OXYGEN CYLINDERS
elif data_type == "Need Assistance: Ambulance Services" or data_type == "Need Assistance: Hospital Beds" or \
data_type == "Need Assistance: Plasma" or data_type == "Need Assistance: Oxygen Cylinders":
df = pd.read_csv("./critical_assistance.csv")
patient_name = st.text_input('Patient Name: ', key="Patient Name")
contact_information = st.text_input('Patient Mobile Number: ', key="Contact Info")
assistance_option = ["Ambulance Services", "Hospital Beds", "Oxygen Cylinder",
"Oxygen Cylinder Refill", "Plasma"]
assistance = st.selectbox('Assistance For', options=assistance_option, key="Patient Assistance")
if patient_name and contact_information and assistance:
contact_information = int(contact_information)
fields = ["Patient Name", "Patient Mobile Number", "Patient Age", "Patient Sex", "Patient Condition",
"Assistance For", "Facilities", "Bed Type", "Patient Blood Group", "Patient Oxygen Level",
"State", "District", "City", "Address", "Additional Notes", "Status"]
select_field = st.selectbox("Select Info to be updated:", fields)
updated_data = st.text_input("Updated Info: ")
df.loc[(df["Patient Name"] == patient_name) & (df["Patient Mobile Number"] == contact_information)
& (df["Assistance For"] == assistance), select_field] = updated_data
df.loc[(df["Patient Name"] == patient_name) & (df["Patient Mobile Number"] == contact_information)
& (df["Assistance For"] == assistance), "Updated Time"] = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
submit_info = st.button('Submit the info!')
if submit_info:
df.to_csv("./critical_assistance.csv", index=False)
st.success('Information updated successfully. Please keep rechecking the page:)')
# 4.3 UPDATE: VOLUNTEER: CHILD CARE
elif data_type == "Providing Assistance: Child Care":
df = pd.read_csv("./child_care_provider.csv")
contact_person = st.text_input('Enter Contact Name: ')
contact_information = st.text_input('Contact Number: ')
if contact_person and contact_information:
contact_information = int(contact_information)
fields = ["Contact Person", "Contact Mobile Number",
"State", "District", "City", "Verified", "Additional Notes",
]
select_field = st.selectbox("Select Info to be updated:", field_names)
updated_data = st.text_input("Updated Info: ")
df.loc[(df["Contact Person"] == contact_person) & (df["Contact Mobile Number"] == contact_information)
, select_field] = updated_data
df.loc[(df["Contact Person"] == contact_person) & (df["Contact Mobile Number"] == contact_information)
, "Updated Time"] = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
submit_info = st.button('Submit the info!')
if submit_info:
df.to_csv("./child_care_provider.csv", index=False)
st.success('Information updated successfully. :)')
# 4.4 UPDATE: VOLUNTEER: AMBULANCE SERVICE PROVIDER
elif data_type == "Providing Assistance: Ambulance Services":
df = pd.read_csv("./ambulance_service_provider.csv")
contact_person = st.text_input('Enter Contact Name: ')
contact_information = st.text_input('Contact Mobile Number: ')
if contact_person and contact_information:
contact_information = int(contact_information)
field_names = ["Contact Person", "Contact Mobile Number",
"Pickup: State", "Pickup: District", "Pickup: City",
"Drop: State", "Drop: District", "Drop: City", "Verified", "Additional Notes",
]
select_field = st.selectbox("Select Info to be updated:", field_names)
updated_data = st.text_input("Updated Info: ")
df.loc[(df["Contact Person"] == contact_person) & (df["Contact Mobile Number"] == contact_information)
, select_field] = updated_data
df.loc[(df["Contact Person"] == contact_person) & (df["Contact Mobile Number"] == contact_information)
, "Updated Time"] = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
submit_info = st.button('Submit the info!')
if submit_info:
df.to_csv("./ambulance_service_provider.csv", index=False)
st.success('Information updated successfully. :)')
# 3.5 UPDATE: VOLUNTEER: HOSPITAL BED PROVIDER
elif data_type == "Providing Assistance: Hospital Beds":
df = pd.read_csv("./hospital_bed_provider.csv")
hospital_name = st.text_input('Hospital Name: ')
hospital_address = st.text_input('Hospital Address: ')
if hospital_name and hospital_address:
field_names = ["Contact Person", "Contact Mobile Number", "Hospital Name", "Hospital Address",
"State", "District", "City", "Total Bed Count",
"Oxygen Bed Count", "ICU Bed Count", "Ventilator Bed Count", "Verified", "Additional Notes",
]
select_field = st.selectbox("Select Info to be updated:", field_names)
updated_data = st.text_input("Updated Info: ")
df.loc[(df["Hospital Name"] == hospital_name) & (df["Hospital Address"] == hospital_address)
, select_field] = updated_data
df.loc[(df["Hospital Name"] == hospital_name) & (df["Hospital Address"] == hospital_address)
, "Updated Time"] = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
submit_info = st.button('Submit the info!')
if submit_info:
df.to_csv("./hospital_bed_provider.csv")
st.success('Information updated successfully. :)')
st.write("----")
# 4.5 UPDATE: VOLUNTEER: MEDICINE PROVIDER
elif data_type == "Providing Assistance: Medicine":
df = pd.read_csv("./medicines_provider.csv")
contact_person = st.text_input('Distributor / Retailer Name: ')
address = st.text_input('Distributor / Retailer Address:')
if contact_person and address:
field_names = ["Distributor Name", "Medicine Name",
"State", "District", "City", "Address", "Contact Number", "Verified", "Additional Notes",
]
select_field = st.selectbox("Select Info to be updated:", field_names)
updated_data = st.text_input("Updated Info: ")
df.loc[(df["Distributor Name"] == contact_person) & (df["Address"] == address)
, select_field] = updated_data
df.loc[(df["Distributor Name"] == contact_person) & (df["Address"] == address)
, "Updated Time"] = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
submit_info = st.button('Submit the info!')
if submit_info:
df.to_csv("./medicines_provider.csv", index=False)
st.success('Information updated successfully. :)')
# 4.6 UPDATE: VOLUNTEER: OXYGEN CYLINDERS PROVIDER
elif data_type == "Providing Assistance: Oxygen Cylinders":
df = pd.read_csv("./oxygen_cylinders_provider.csv")
contact_person = st.text_input('Contact Name: ')
contact_information = st.text_input('Contact Mobile Number:')
if contact_person and contact_information:
field_names = ["Contact Person", "Contact Mobile Number", "Just Refill", "Start Timings", "End Timings",
"Availability for", "Address", "State", "District", "City", "Verified",
"Additional Notes"]
select_field = st.selectbox("Select Info to be updated:", field_names)
updated_data = st.text_input("Updated Info: ")
df.loc[(df["Contact Person"] == contact_person) & (df["Contact Mobile Number"] == contact_information)
, select_field] = updated_data
df.loc[(df["Contact Person"] == contact_person) & (df["Contact Mobile Number"] == contact_information)
, "Updated Time"] = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
submit_info = st.button('Submit the info!')
if submit_info:
df.to_csv("./oxygen_cylinders_provider.csv", index=False)
st.success('Information updated successfully. :)')
# 4.6 UPDATE: VOLUNTEER: PLASMA PROVIDER
elif data_type == "Providing Assistance: Plasma":
df = pd.read_csv("./plasma_provider.csv")
contact_person = st.text_input('Donor Name: ')
contact_information = st.text_input('Donor Contact Number: ')
if contact_person and contact_information:
field_names = ["Donor Name", "Donor Contact Number",
"Donor Age", "Donor Blood Group", "Recovered Date",
"State", "District", "City", "Donated Before", "Last Donated Date",
"Antibodies Test", "Medical Issues", "Verified", "Additional Notes",
]
select_field = st.selectbox("Select Info to be updated:", field_names)
updated_data = st.text_input("Updated Info: ")
df.loc[(df["Donor Name"] == contact_person) & (df["Donor Contact Number"] == contact_information)
, select_field] = updated_data
df.loc[(df["Donor Name"] == contact_person) & (df["Donor Contact Number"] == contact_information)
, "Updated Time"] = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
submit_info = st.button('Submit the info!')
if submit_info:
df.to_csv("./plasma_provider.csv", index=False)
st.success('Information updated successfully. :)')
# 4.7 UPDATE: VOLUNTEER: HOME VISIT PROVIDER
elif data_type == "Providing Assistance: Home Visit":
df = pd.read_csv("./home_visit_provider.csv")
contact_person = st.text_input('Contact Person: ')
contact_information = st.text_input('Enter Contact Number: ')
if contact_person and contact_information:
field_names = ["Contact Person", "Contact Mobile Number",
"State", "District", "City", "Verified", "Additional Notes",
]
select_field = st.selectbox("Select Info to be updated:", field_names)
updated_data = st.text_input("Updated Info: ")
df.loc[(df["Contact Person"] == contact_person) & (df["Contact Mobile Number"] == contact_information)
, select_field] = updated_data
df.loc[(df["Contact Person"] == contact_person) & (df["Contact Mobile Number"] == contact_information)
, "Updated Time"] = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
submit_info = st.button('Submit the info!')
if submit_info:
df.to_csv("./home_visit_provider.csv", index=False)
st.success('Information updated successfully. :)')
# 5. TWITTER SEARCH FOR COVID-19
st.sidebar.subheader("Twitter Search for Covid-19")
twitter_search = st.sidebar.checkbox("Generate Link")
if twitter_search:
st.write("---")
st.subheader("City Name")
city = st.text_input("", "Ghaziabad")
st.subheader("Show Posts with #verified")
verified = st.selectbox("", ["Yes", "No"])
if verified == "Yes":
verified = "verified+"
else:
verified = ""
st.subheader("Specify the Need [Multi-Select Option]")
required = {
"Bed": "bed+OR+beds",
"Ventilator": "ventilator+OR+ventilators",
"Oxygen": "oxygen",
"ICU": "icu",
"Test": "testing+OR+test+tests",
"Remdesivir": "remdesivir",
"Dexamethasone": "dexamethasone",
"Tusq Dx": "tusq+OR+tusq+dx+OR+tusqdx",
"Favipiravir": "favipiravir",
"Tocilizumab": "tocilizumab",
"Fabiflu": "fabiflu",
"Plasma": "plasma",
"Food": "food+OR+tiffin+OR+lunch"
}
multi_check = st.multiselect('', list(required.keys()))
st.subheader("Additional Keyword, if any: ")
other_keyword = st.text_input("")
link_1 = ""
for iterate, check in enumerate(multi_check):
if iterate == 0:
link_1 += required[check]
else:
link_1 += f"+{required[check]}"
st.subheader('Include the posts with #not-verified, #needed [BY DEFAULT- EXCLUDED]')
exclude_check = st.selectbox("", ["Select", "No"])
if exclude_check == "No":
link_2 = ''
else:
link_2 = "+-%22not+verified+-%22needed+-%22unverified+-%22required+-%22require+-%22nneed+-%22requirement"
link_3 = f"+{other_keyword}"
link = f"https://twitter.com/search?q={verified}{city}+%28{link_1}{link_3}%29{link_2}&f=live"
link_submit_info = st.button('Generate and Copy the Twitter link here!', key="link_generator")
if link_submit_info:
copy_link =
|
pd.DataFrame([link])
|
pandas.DataFrame
|
# @Date: 2019-08-16T23:31:03+08:00
# @Email: <EMAIL>
# @Filename: MMCIF_unit.py
# @Last modified time: 2019-08-21T16:02:36+08:00
import pandas as pd
import numpy as np
import os, re, time, requests, sys
from urllib import request, error
from retrying import retry
from multiprocessing.dummy import Pool
from bs4 import BeautifulSoup
from Bio.PDB.MMCIF2Dict import MMCIF2Dict
sys.path.append('./')
from Unit import Unit
class MMCIF_unit(Unit):
CONFIG = {
'PDB_ID': 'pdb_id',
'MMCIF_OLD_FOLDER': ['/data1/suntt/process0606/cgc_mmcif_file/', '/data1/suntt/CanDriver/Data/PDB_cgc/cgc_mmcif_file/', '/data1/suntt/CanDriver/Data/PDB_NEW/mmcif_file/'],
'MMCIF_FOLDER': '/home/zzf/Work/SIFTS_Plus_Muta_Maps/data/mmcif_file/',
'OUTPUT_FOLDER': '../../data/Mapping_Pipeline/output_files/',
'HEADERS': {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/17.17134'},
'CHAIN_TYPE_FILE_LIST': ('cgc_pdb_chain_type.txt', 'cgc_pdb_chain_type_extra', 'cgc_chain_type_error_pdb.txt', 'output/cgc_pdb_chain_type_all_new.txt'),
'ATOM_SEQRES_FILE_LIST': ('cgc_pdb_atom_seqres.txt', 'cgc_seqres_atom_error_pdb.txt', 'output/cgc_pdb_atom_seqres_add_chain_type_new.txt', 'output/cgc_pdb_coordinates_site_range.txt'),
'LIGAND_FILE_LIST': ('ligand_info0605.txt', 'ligand_info_extra0605.txt', 'ligand_info_error.txt', 'output/ligand_info_final1.txt', 'output/ligand_info_final2.txt'),
'CHAIN_AND_SEQRES_FILE_LIST': ('output/cgc_pdb_atom_seqres_protein_chain_info.txt', 'cgc_protein_chain_id_in_pdb.txt', 'output/cgc_pdb_atom_seqres_info_integration.txt'),
'ADD_MODIFICATION_FILE': 'output/cgc_pdb_atom_seqres_info_integration_new.txt',
'ADD_MISSING_FILE': ('output/cgc_information_statistics1.txt', 'output/cgc_pdb_atom_seqres_info_integration_new_add_coor_site.txt'),
'PDB_MUTATION_FILE': 'output/pdb_mutation_info.txt',
'RESOLUTION_FILE': ('output/resoluton_error.txt', 'output/pdb_resoluton_info.txt'),
'YEAR_INFO_1_FILE': ('../../data/Mapping_Pipeline/output_files/pdb_date_info_newest.txt', '../../data/Mapping_Pipeline/output_files/pdb_date_error_newest.txt'),
'YEAR_INFO_2_FILE': ('../../data/Mapping_Pipeline/output_files/pdb_date_supp_newest.txt', '../../data/Mapping_Pipeline/output_files/pdb_date_error_supp_newest.txt'),
'YEAR_INFO_ALL': '../../data/Mapping_Pipeline/output_files/pdb_date_info_newest_all.txt',
'FINAL_FILE': ('output/cgc_pdb_atom_seqres_info_integration_final.txt', 'PDB_cgc/output/cgc_pdb_atom_seqres_info_integration_final0614.txt'),
'MMICF_USECOLS': ['pdb_id', 'chain_id', 'seqres_len', 'coordinates_len', 'Modification_position', 'ligand_position_in_seqres', 'mis_range', 'mis_index', 'Modification_num', 'mutation_num'],
}
def set_output_folder(self, path):
self.CONFIG['OUTPUT_FOLDER'] = path
def download_cif_file(pdbId, path):
url = 'https://files.rcsb.org/view/%s.cif' % pdbId
html = request.urlopen(url).read()
html = html.decode('utf-8')
with open(path, 'w') as fw:
fw.write(html)
time.sleep(2)
def get_mmcif_file_path(self, pdbId, download=False):
print('get_mmcif_file_path(): Working on [%s]' % pdbId)
new_path = '%s%s.cif' % (self.CONFIG['MMCIF_FOLDER'], pdbId)
for path in self.CONFIG['MMCIF_OLD_FOLDER']:
old_path = '%s%s.cif' % (path, pdbId)
if os.path.exists(old_path):
return old_path
if os.path.exists(new_path):
return new_path
else:
if download:
MMCIF_unit.download_cif_file(pdbId, new_path)
return False
else:
return new_path
'''
def download_mmcif_file(self):
# 暂时不用(zzf)
@retry(stop_max_attempt_number=3, wait_fixed=1000)
pool = Pool(processes=20)
pool.map(download_cif_file, self.pdb_list)
'''
def extract_chain_type_info(self):
chain_type_file1, chain_type_file2, chain_type_error, chain_type_file_all = MMCIF_unit.CONFIG['CHAIN_TYPE_FILE_LIST']
outpath = self.CONFIG['OUTPUT_FOLDER']
demo_dict_df_list = []
fw = open(outpath + chain_type_file2, 'w')
error_pdb_file = open(outpath + chain_type_error, 'w')
for pdbId in self.pdb_list:
# pdbFileSavePath = '%s%s.cif' % (MMCIF_unit.CONFIG['MMCIF_FOLDER'], pdbId)
pdbFileSavePath = self.get_mmcif_file_path(pdbId, True)
if not pdbFileSavePath:
continue
mmcif_dict = MMCIF2Dict(pdbFileSavePath)
demo_dict = {}
index = ['_entity_poly.type', '_entity_poly.pdbx_strand_id']
try:
for i in index:
demo_dict[i] = mmcif_dict[i]
df = pd.DataFrame(demo_dict)
df['pdb_id'] = pdbId
demo_dict_df_list.append(df)
except:
try:
fw.write('%s\t%s\t%s\n' % (
pdbId, mmcif_dict['_entity_poly.type'], mmcif_dict['_entity_poly.pdbx_strand_id']))
except:
error_pdb_file.write(pdbId + '\n')
demo_df = pd.concat(demo_dict_df_list)
demo_df.to_csv(outpath + chain_type_file1, sep='\t', index=False)
fw.close()
error_pdb_file.close()
# 将chain_type的信息合并到一起
info = pd.read_csv(outpath + chain_type_file1, sep='\t', dtype=str)
info1 = pd.read_csv(outpath + chain_type_file2, sep='\t', dtype=str,
names=['pdb_id', '_entity_poly.type', '_entity_poly.pdbx_strand_id'])
info2 = pd.concat([info, info1], axis=0)
info2.rename(columns={'_entity_poly.pdbx_strand_id': 'chain_id', '_entity_poly.type': 'chain_type_details'},
inplace=True)
info2['chain_type'] = info2['chain_type_details'].replace('polypeptide(L)', 'protein').replace('polypeptide(D)',
'protein').replace(
'polydeoxyribonucleotide', 'DNA').replace('polyribonucleotide', 'RNA').replace(
'polydeoxyribonucleotide/polyribonucleotide hybrid', 'RNA+DNA')
# info2.to_csv(outpath+'PDB_cgc/cgc_pdb_chain_type_all.txt',sep='\t',index=False)
#重新设置索引号,避免同一索引对应不同行,因为两个数据concat时各自文件的索引仍是之前的
info2.index = range(len(info2))
#由于重新设置了索引不会造成混淆,所以可以使用以下方法,比较快
result = info2.drop('chain_id', axis=1).join(
info2['chain_id'].str.split(',', expand=True).stack().reset_index(level=1, drop=True).rename('chain_id_new'))
info3 = result[['pdb_id', 'chain_type']].drop_duplicates()
info4 = info3.sort_values(by=['chain_type']).groupby(['pdb_id'], as_index=False).agg(lambda x: ','.join(x))
info4.rename(columns={'chain_type': 'pdb_contain_chain_type'}, inplace=True)
info5 = pd.merge(result, info4, on=['pdb_id'], how='left')
info5.to_csv(outpath + chain_type_file_all, sep='\t', index=False)
def extract_seqres_and_atom_info(self):
atom_seqres_file, atom_seqres_error, atom_seqres_chain_type_oringnal, coordinates_file = MMCIF_unit.CONFIG['ATOM_SEQRES_FILE_LIST']
outpath = self.CONFIG['OUTPUT_FOLDER']
chain_type_file_all = MMCIF_unit.CONFIG['CHAIN_TYPE_FILE_LIST'][3]
demo_dict_df_list = []
error_pdb_file = open(outpath + atom_seqres_error, 'w')
for pdbId in self.pdb_list:
# pdbFileSavePath = '%s%s.cif' % (MMCIF_unit.CONFIG['MMCIF_FOLDER'], pdbId)
pdbFileSavePath = self.get_mmcif_file_path(pdbId)
if not pdbFileSavePath:
continue
mmcif_dict = MMCIF2Dict(pdbFileSavePath)
demo_dict = {}
index = ['_pdbx_poly_seq_scheme.mon_id', '_pdbx_poly_seq_scheme.ndb_seq_num',
'_pdbx_poly_seq_scheme.pdb_seq_num', '_pdbx_poly_seq_scheme.auth_seq_num',
'_pdbx_poly_seq_scheme.pdb_mon_id', '_pdbx_poly_seq_scheme.auth_mon_id',
'_pdbx_poly_seq_scheme.pdb_strand_id', '_pdbx_poly_seq_scheme.pdb_ins_code']
try:
for i in index:
demo_dict[i] = mmcif_dict[i]
df = pd.DataFrame(demo_dict)
df['pdb_id'] = pdbId
demo_dict_df_list.append(df)
except:
error_pdb_file.write(pdbId + '\n')
demo_df1 = pd.concat(demo_dict_df_list)
demo_df1.to_csv(outpath + atom_seqres_file, sep='\t', index=False)
error_pdb_file.close()
# 将chain_type信息加入seqres和atom部分
file1 = pd.read_csv(outpath + atom_seqres_file, sep='\t', dtype=str)
file2 = pd.read_csv(outpath + chain_type_file_all, sep='\t', dtype=str)
file2.rename(columns={'chain_id_new': 'chain_id'}, inplace=True) # ?
file3 = pd.merge(file1, file2, left_on=['pdb_id', '_pdbx_poly_seq_scheme.pdb_strand_id'],
right_on=['pdb_id', 'chain_id'], how='left')
# file3.to_csv(outpath+'PDB_cgc/cgc_pdb_atom_seqres_add_chain_type.txt',sep='\t',index=False)
file3.rename(columns={'_pdbx_poly_seq_scheme.mon_id': 'SEQRES', '_pdbx_poly_seq_scheme.pdb_mon_id': 'Coordinates',
'_pdbx_poly_seq_scheme.ndb_seq_num': 'pdb_index',
'_pdbx_poly_seq_scheme.pdb_seq_num': 'position_in_seqres',
'_pdbx_poly_seq_scheme.auth_seq_num': 'position_in_coordinates',
'_pdbx_poly_seq_scheme.pdb_ins_code': 'inside_code'}, inplace=True)
file4 = file3.drop(['_pdbx_poly_seq_scheme.auth_mon_id', '_pdbx_poly_seq_scheme.pdb_strand_id'], axis=1)
file4.to_csv(outpath + atom_seqres_chain_type_oringnal, sep='\t', index=False)
# 加入coordinates_start和coordinates_end信息
coordinates_range = file4[file4['pdb_contain_chain_type'].notna() & file4['pdb_contain_chain_type'].str.contains('protein')]
coordinates_range['pdb_ins_position'] = coordinates_range['position_in_seqres'] + coordinates_range['inside_code']
coordinates_range['pdb_ins_position'] = coordinates_range['pdb_ins_position'].str.replace('.', '')
coordinates_range1 = coordinates_range.groupby(['pdb_id', 'chain_id'], as_index=False)['pdb_ins_position'].agg(
lambda x: ';'.join(x))
coordinates_range1.to_csv(outpath + coordinates_file, sep='\t', index=False)
def extract_pdb_ligand_info(self):
outpath = self.CONFIG['OUTPUT_FOLDER']
ligand_file1, ligand_file2, ligand_file_error, ligand_file_final1, ligand_file_final2 = MMCIF_unit.CONFIG['LIGAND_FILE_LIST']
atom_seqres_chain_type_oringnal = MMCIF_unit.CONFIG['ATOM_SEQRES_FILE_LIST'][2]
demo_dict_df_list = []
fw = open(outpath + ligand_file2,'w')
fp = open(outpath + ligand_file_error,'w')
for pdbId in self.pdb_list:
# pdbFileSavePath = '%s%s.cif' % (MMCIF_unit.CONFIG['MMCIF_FOLDER'], pdbId)
pdbFileSavePath = self.get_mmcif_file_path(pdbId)
if not pdbFileSavePath:
continue
mmcif_dict = MMCIF2Dict(pdbFileSavePath)
demo_dict = {}
index = ['_struct_conn.conn_type_id','_struct_conn.ptnr1_auth_asym_id','_struct_conn.ptnr1_auth_comp_id','_struct_conn.ptnr1_auth_seq_id',
'_struct_conn.ptnr2_auth_asym_id','_struct_conn.ptnr2_auth_comp_id','_struct_conn.ptnr2_auth_seq_id']
try:
for i in index:
demo_dict[i] = mmcif_dict[i]
df = pd.DataFrame(demo_dict)
df['pdb_id'] = pdbId
demo_dict_df_list.append(df)
except:
try:
fw.write('%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n'%(pdbId,mmcif_dict['_struct_conn.conn_type_id'],mmcif_dict['_struct_conn.ptnr1_auth_asym_id'],mmcif_dict['_struct_conn.ptnr1_auth_comp_id'],
mmcif_dict['_struct_conn.ptnr1_auth_seq_id'],mmcif_dict['_struct_conn.ptnr2_auth_asym_id'],mmcif_dict['_struct_conn.ptnr2_auth_comp_id'],
mmcif_dict['_struct_conn.ptnr2_auth_seq_id']))
except:
fp.write(pdbId+'\n')
demo_df = pd.concat(demo_dict_df_list)
demo_df.to_csv(outpath + ligand_file1,sep='\t',index=False)
fw.close()
fp.close()
def ligand_count(ligand_ptnr_seq_id):
a = len(ligand_ptnr_seq_id.split(';'))
return a
def metal_check(connection_type):
if pd.isnull(connection_type):
return '0'
else:
if connection_type == 'metalc':
return '1'
else:
return '0'
# atom_seqres_chain_type_oringnal = 'output/cgc_pdb_atom_seqres_add_chain_type_new.txt'
ligand_info1 = pd.read_csv(outpath + ligand_file1, sep='\t', dtype=str, keep_default_na=False)
ligand_info2 = pd.read_csv(outpath + ligand_file2, sep='\t', dtype=str, keep_default_na=False,
names=['pdb_id', '_struct_conn.conn_type_id', '_struct_conn.ptnr1_auth_asym_id',
'_struct_conn.ptnr1_auth_comp_id', '_struct_conn.ptnr1_auth_seq_id',
'_struct_conn.ptnr2_auth_asym_id', '_struct_conn.ptnr2_auth_comp_id',
'_struct_conn.ptnr2_auth_seq_id'])
ligand_info_all = pd.concat([ligand_info1, ligand_info2], axis=0)
metal_ligand = ['ZN', 'MG', 'CA', 'FE', 'NA', 'MN', 'K', 'NI', 'CU', 'CO', 'CD', 'HG', 'PT', 'MO', 'BE', 'AL', 'BA',
'RU', 'SR', 'V', 'CS', 'W', 'AU', 'YB', 'LI', 'GD', 'PB', 'Y', 'TL', 'IR', 'RB', 'SM', 'AG',
'OS', 'PR', 'PD', 'EU', 'RH', 'RE', 'TB', 'TA', 'LU', 'HO', 'CR', 'GA', 'LA', 'SN', 'SB', 'CE',
'ZR',
'ER', 'TH', 'TI', 'IN', 'HF', 'SC', 'DY', 'BI', 'PA', 'PU', 'AM', 'CM', 'CF', 'GE', 'NB', 'TC',
'ND',
'PM', 'TM', 'PO', 'FR', 'RA', 'AC', 'NP', 'BK', 'ES', 'FM', 'MD', 'NO', 'LR', 'RF', 'DB', 'SG']
ligand_info_all1 = ligand_info_all[(ligand_info_all['_struct_conn.conn_type_id'] == 'metalc') & (
ligand_info_all['_struct_conn.ptnr1_auth_comp_id'].isin(metal_ligand))]
ligand_info_all1.rename(
columns={'_struct_conn.conn_type_id': 'connection_type', '_struct_conn.ptnr1_auth_asym_id': 'ligand_chain',
'_struct_conn.ptnr1_auth_comp_id': 'ligand_comp',
'_struct_conn.ptnr1_auth_seq_id': 'ligand_seq_id', '_struct_conn.ptnr2_auth_asym_id': 'chain_id',
'_struct_conn.ptnr2_auth_comp_id': 'ligand_ptnr_comp',
'_struct_conn.ptnr2_auth_seq_id': 'position_in_seqres'}, inplace=True)
ligand_info_all2 = ligand_info_all[(ligand_info_all['_struct_conn.conn_type_id'] == 'metalc') & (
ligand_info_all['_struct_conn.ptnr2_auth_comp_id'].isin(metal_ligand))]
ligand_info_all2.rename(
columns={'_struct_conn.conn_type_id': 'connection_type', '_struct_conn.ptnr2_auth_asym_id': 'ligand_chain',
'_struct_conn.ptnr2_auth_comp_id': 'ligand_comp',
'_struct_conn.ptnr2_auth_seq_id': 'ligand_seq_id', '_struct_conn.ptnr1_auth_asym_id': 'chain_id',
'_struct_conn.ptnr1_auth_comp_id': 'ligand_ptnr_comp',
'_struct_conn.ptnr1_auth_seq_id': 'position_in_seqres'}, inplace=True)
ligand_info_all3 = pd.concat([ligand_info_all1, ligand_info_all2], axis=0)
ligand_info_all3.reset_index(drop=True)
ligand_info_all3['ismetal'] = ligand_info_all3.apply(lambda x: metal_check(x.connection_type), axis=1)
ligand_info_all4 = ligand_info_all3.drop(['ligand_chain', 'ligand_seq_id'], axis=1)
ligand_info_all4 = ligand_info_all4.drop_duplicates()
data_index_position = pd.read_csv(outpath + atom_seqres_chain_type_oringnal, sep='\t', dtype=str)
data_index_position2 = data_index_position[
['pdb_id', 'chain_id', 'pdb_index', 'position_in_seqres']].drop_duplicates()
infomerge = pd.merge(ligand_info_all4, data_index_position2, how='left',
on=['pdb_id', 'chain_id', 'position_in_seqres'])
infomerge1 = infomerge[infomerge['pdb_index'].notna()]
infomerge1.to_csv(outpath + ligand_file_final1, sep='\t', index=False)
infomerge2 = infomerge1.groupby(['pdb_id', 'chain_id'], as_index=False)[
'ligand_comp', 'ligand_ptnr_comp', 'position_in_seqres', 'pdb_index', 'ismetal'].agg(lambda x: ';'.join(x))
infomerge2['ligand_count'] = infomerge2.apply(lambda x: ligand_count(x.position_in_seqres), axis=1)
infomerge2.to_csv(outpath + ligand_file_final2, sep='\t', index=False)
def deal_with_chain_and_seqres_atom(self):
def get_modification(m, k):
m1 = str(m).replace('?', '')
yes = [i + 1 for i, v in enumerate(m1) if v == 'X']
if yes != []:
length = int(len(yes))
if k == '1':
yes1 = str(yes).replace('[', '').replace(']', '').replace(' ','')
return yes1
elif k == '2':
return length
def get_modification_seqres_index(m):
# print m
yes = [i + 1 for i, v in enumerate(m) if v == 'X']
if yes != []:
yes1 = str(yes).replace('[', '').replace(']', '').replace(' ','')
return yes1
atom_seqres_protein_chain, cgc_protein_chain_id, integration_file = MMCIF_unit.CONFIG['CHAIN_AND_SEQRES_FILE_LIST']
outpath = self.CONFIG['OUTPUT_FOLDER']
atom_seqres_chain_type_oringnal = MMCIF_unit.CONFIG['ATOM_SEQRES_FILE_LIST'][2]
multiToOne = MMCIF_unit.MultiToOne()
f = pd.read_csv(outpath + atom_seqres_chain_type_oringnal, sep='\t', dtype=str)
f['SEQRES'] = f.apply(lambda x: multiToOne.multi_letter_convert_to_one_letter(x.SEQRES), axis=1)
f['Coordinates'] = f.apply(lambda x: multiToOne.multi_letter_convert_to_one_letter(x.Coordinates), axis=1)
##以下仅针对蛋白链
f1 = f[f['chain_type'] == 'protein']# .reset_index(drop=True)
f1.to_csv(outpath + atom_seqres_protein_chain, sep='\t', index=False)
# 将seqres信息放入一行
f2 = f1[['pdb_id', 'chain_id', 'SEQRES', 'inside_code']] # 此处不能去重
f3 = f2.groupby(['pdb_id', 'chain_id'], as_index=False)['SEQRES'].agg(lambda x: ''.join(x))
f3['seqres_len'] = f3['SEQRES'].str.len()
# 将coordinates信息放入一行
f4 = f1[['pdb_id', 'chain_id', 'Coordinates', 'inside_code']] # 此处不能去重
f5 = f4.groupby(['pdb_id', 'chain_id'], as_index=False)['Coordinates'].agg(lambda x: ''.join(x))
f5['coordinates_len'] = f5['Coordinates'].str.replace('?', '').str.len()
# 合并两部分信息
f6 = pd.merge(f3, f5, on=['pdb_id', 'chain_id'], how='left')
# 提取所有蛋白链的chain_id
allchain = f6[['pdb_id', 'chain_id']].drop_duplicates()
allchain1 = allchain.sort_values(by=['chain_id']).groupby(['pdb_id'], as_index=False).agg(lambda x: ','.join(x))
allchain1.rename(columns={'chain_id': 'pdb_protein_chain_id'}, inplace=True)
allchain1.to_csv(outpath + cgc_protein_chain_id, sep='\t', index=False)
##以下针对非蛋白链
ff1 = f[f['chain_type'] != 'protein']# .reset_index(drop=True)
if len(ff1) != 0:
ff2 = ff1[['pdb_id', 'chain_id', 'SEQRES', 'inside_code']] # 此处不能去重
ff3 = ff2.groupby(['pdb_id', 'chain_id'], as_index=False)['SEQRES'].agg(lambda x: ''.join(x))
ff3['seqres_len'] = ff3['SEQRES'].str.replace('D', '').str.len()
# 将coordinates信息放入一行
ff4 = ff1[['pdb_id', 'chain_id', 'Coordinates', 'inside_code']] # 此处不能去重
ff5 = ff4.groupby(['pdb_id', 'chain_id'], as_index=False)['Coordinates'].agg(lambda x: ''.join(x))
ff5['coordinates_len'] = ff5['Coordinates'].str.replace('D', '').str.replace('?', '').str.len()
# 合并两部分信息
ff6 = pd.merge(ff3, ff5, on=['pdb_id', 'chain_id'], how='left')
ff6['Coordinates'] = ff6['Coordinates'].str.replace('D', '')
full = pd.concat([f6, ff6], axis=0)
else:
full = f6
#将修饰信息加入文件中
full['Modification_position'] = full.apply(lambda x: get_modification(x.Coordinates, '1'), axis=1)
full['Modification_num'] = full.apply(lambda x: get_modification(x.Coordinates, '2'), axis=1)
full['Modification_position_seqres_index'] = full.apply(lambda x: get_modification_seqres_index(x.Coordinates),axis=1)
full.to_csv(outpath + integration_file, sep='\t', index=False)
def add_modification_pdb_type_to_integraton_file(self):
def modification_type(Modification_position,coordinates_len):
if pd.isnull(Modification_position):
modification_site='no_modification'
else:
modification_site=[]
modification_list = str(Modification_position).split(',')
for i in modification_list:
#判断modify的类型,i的类型定义为整型
if int(i) <= 5:
modify='start'
elif int(i) >= int(coordinates_len)-5:
modify='end'
else:
modify='middle'
#如果出现过就不放入list中
if modify not in modification_site:
modification_site.append(modify)
modification_site = ','.join(modification_site)
return modification_site
outpath = self.CONFIG['OUTPUT_FOLDER']
integration_file = MMCIF_unit.CONFIG['CHAIN_AND_SEQRES_FILE_LIST'][-1]
chain_type_file_all = MMCIF_unit.CONFIG['CHAIN_TYPE_FILE_LIST'][-1]
integration_file_new = MMCIF_unit.CONFIG['ADD_MODIFICATION_FILE']
# 将modification_type、chain_type、pdb_type信息加入到integration文件中
ff = pd.read_csv(outpath + integration_file, sep='\t')
ff['Modification_position'] = ff.apply(lambda x: x['Modification_position'].replace('[', '').replace(']', '').replace(' ', '') if isinstance(x['Modification_position'], str) else np.nan, axis=1)
ff['modification_site'] = ff.apply(lambda x: modification_type(x.Modification_position, x.coordinates_len), axis=1)
'''
pdb_type = pd.read_csv(outpath + pdb_and_sifts_protein_chain, sep='\t')
pdb_type1 = pdb_type[['pdb_id', 'pdb_type', 'pdb_protein_chain_id']].drop_duplicates()
ff1 = pd.merge(ff, pdb_type1, on=['pdb_id'], how='left')
'''
chain_type = pd.read_csv(outpath + chain_type_file_all, sep='\t', dtype=str)
chain_type.rename(columns={'chain_id_new': 'chain_id'}, inplace=True)
ff2 = pd.merge(ff, chain_type, on=['pdb_id', 'chain_id'], how='left') # ff1(before)
ff2.to_csv(outpath + integration_file_new, sep='\t', index=False)
def add_missing_coordinates_start_end(self):
def getmisindex(a):
str = a
word = '\\?'
b = [m.start() + 1 for m in re.finditer(word, str)]
if b != []:
return b
else:
return ''
def select_UNK(m):# m is the Seqres'content for one line
if len(set(m))==1 and '!' in list(set(m)):
return 'yes'
else:
return 'no'
def mis_or_not(a):
if '?' in a:
return 'yes'
else:
return 'no'
outpath = self.CONFIG['OUTPUT_FOLDER']
integration_file_new = MMCIF_unit.CONFIG['ADD_MODIFICATION_FILE']
all_chain_and_length, integration_new_missing_range = MMCIF_unit.CONFIG['ADD_MISSING_FILE']
coordinates_file = MMCIF_unit.CONFIG['ATOM_SEQRES_FILE_LIST'][-1]
ff = pd.read_csv(outpath + integration_file_new, sep='\t', dtype=str)
ff['chain_and_length'] = ff['chain_id'] + ':' + ff['coordinates_len']
# protein链和长度合并
ff1 = ff[ff['chain_type'] == 'protein'][['pdb_id', 'chain_and_length']]
ff1.rename(columns={'chain_and_length': 'protein_chain_and_length'}, inplace=True)
protein = ff1.sort_values(by=['protein_chain_and_length']).groupby(['pdb_id'], as_index=False).agg(
lambda x: ','.join(x))
# DNA链和长度合并
ff2 = ff[ff['chain_type'] == 'DNA'][['pdb_id', 'chain_and_length']]
ff2.rename(columns={'chain_and_length': 'DNA_chain_and_length'}, inplace=True)
DNA = ff2.sort_values(by=['DNA_chain_and_length']).groupby(['pdb_id'], as_index=False).agg(lambda x: ','.join(x))
# RNA链和长度合并
ff3 = ff[ff['chain_type'] == 'RNA'][['pdb_id', 'chain_and_length']]
ff3.rename(columns={'chain_and_length': 'RNA_chain_and_length'}, inplace=True)
RNA = ff3.sort_values(by=['RNA_chain_and_length']).groupby(['pdb_id'], as_index=False).agg(lambda x: ','.join(x))
# mix链和长度合并
ff4 = ff[ff['chain_type'] == 'RNA+DNA'][['pdb_id', 'chain_and_length']]
ff4.rename(columns={'chain_and_length': 'mix_chain_and_length'}, inplace=True)
mix = ff4.sort_values(by=['mix_chain_and_length']).groupby(['pdb_id'], as_index=False).agg(lambda x: ','.join(x))
# 合并上面四类
all1 = pd.merge(protein, DNA, on=['pdb_id'], how='left')
all2 = pd.merge(all1, RNA, on=['pdb_id'], how='left')
all3 = pd.merge(all2, mix, on=['pdb_id'], how='left')
'''
pdb_type = pd.read_csv(outpath + pdb_and_sifts_protein_chain, sep='\t', dtype=str)
pdb_type1 = pdb_type[['pdb_id', 'pdb_type']].drop_duplicates()
all4 = pd.merge(all3, pdb_type1, on=['pdb_id'], how='left')
'''
all3.to_csv(outpath + all_chain_and_length, sep='\t', index=False) # all4(before)
# 添加整条链均为空的注释
ff['UNK_ALL_IN_CHAIN'] = ff.apply(lambda x: select_UNK(x.SEQRES), axis=1)
unk_file = ff[ff['UNK_ALL_IN_CHAIN'] == 'yes']
unk_pdb = set(unk_file['pdb_id'])
integration_file1 = ff[ff['pdb_id'].isin(unk_pdb)]
integration_file1['only_contains_unk_in_chain_pdb'] = 'yes'
integration_file2 = ff[~ff['pdb_id'].isin(unk_pdb)]
integration_file2['only_contains_unk_in_chain_pdb'] = 'no'
integration_file3 = pd.concat([integration_file1, integration_file2], axis=0)
# 将统计的信息与整合后的信息合并
statistics = pd.merge(integration_file3, all3, on=['pdb_id'], how='left') # , 'pdb_type'], how='left') # all4(before)
# 增加丢失信息
statistics['mis_index'] = statistics.apply(lambda x: getmisindex(x.Coordinates), axis=1)
statistics['mis_range'] = statistics.apply(lambda x: MMCIF_unit.getInterval(x.mis_index), axis=1)
'''
# statistics['mis_index1'] = statistics['mis_index'].astype(str).str.replace('[','').str.replace('','')
statistics['ismis'] = statistics.apply(lambda x: mis_or_not(x.Coordinates), axis=1)
statistics['mis_each_len'] = statistics.apply(lambda x: geteachmis_len(x.mis_range), axis=1)
statistics['mis_distance_each'] = statistics.apply(lambda x: getdistance_eachmis(x.mis_range), axis=1)
statistics['mis_distance_judgeup5'] = statistics.apply(lambda x: judge_distance5(x.mis_distance_each), axis=1)
statistics['mis_count'] = statistics.apply(lambda x: miscount(x.Coordinates), axis=1)
'''
coordinates_range = pd.read_csv(outpath + coordinates_file, sep='\t', dtype=str)
statistics1 = pd.merge(statistics, coordinates_range, on=['pdb_id', 'chain_id'], how='left')
statistics1.to_csv(outpath + integration_new_missing_range, sep='\t', index=False)
def get_pdb_muta_info(self):
@retry(stop_max_attempt_number=3, wait_fixed=1000)
def getmutation_poly(pdbid):
fw = open(self.CONFIG['OUTPUT_FOLDER'] + MMCIF_unit.CONFIG['PDB_MUTATION_FILE'], 'a')
url = 'https://www.rcsb.org/structure/{}'.format(pdbid)
r = requests.get(url, headers=MMCIF_unit.CONFIG['HEADERS'])
soup = BeautifulSoup(r.text, 'html.parser')
s1 = soup.find(id='MacromoleculeTable')
table = s1.find_all(class_='table-responsive')
table_all = []
for i in table:
table2 = i.find(class_='table table-bordered table-condensed')
table3 = table2.find('tbody')
table4 = table3.find(id=re.compile(r'macromolecule-entityId-'))
table5_chain = table4.find(class_='ellipsisToolTip').text
table5_mutation_0 = table4.find_all('td')[4]
table5_mutation_1 = re.split(r'\xa0', table5_mutation_0.text)[0].split(': ')[1]
table_all.append([pdbid, str(table5_chain), str(table5_mutation_1)])
fw.write('\n'.join([str('\t'.join(x)) for x in table_all]))
fw.write('\n')
fw.close()
time.sleep(2)
# div id="MacromoleculeTable"
# div class="table-responsive"
# table class="table table-bordered table-condensed"
# tbody
# tr id="macromolecule-entityId-3-rowDescription"
# 第五个td的第一个strong后面
fw = open(self.CONFIG['OUTPUT_FOLDER'] + MMCIF_unit.CONFIG['PDB_MUTATION_FILE'], 'w')
fw.write('pdb_id\tchain_id\tmutation_num\n')
fw.close()
# 测试
# pdbid = '1a02'
# getmutation_poly(pdbid)
# 读取我们的突变数据集,获取当前的pdb信息
pool = Pool(processes=10)
pool.map(getmutation_poly, self.pdb_list)
def get_resolution(self):
@retry(stop_max_attempt_number=3, wait_fixed=1000)
def getResolution(pdbid):
# output infomation of error
fw=open(self.CONFIG['OUTPUT_FOLDER'] + MMCIF_unit.CONFIG['RESOLUTION_FILE'][0],'a')
# output resolution of pdb
fw2=open(self.CONFIG['OUTPUT_FOLDER'] + MMCIF_unit.CONFIG['RESOLUTION_FILE'][1],'a')
url = 'https://www.rcsb.org/structure/{}'.format(pdbid)
r = requests.get(url, headers=MMCIF_unit.CONFIG['HEADERS'])
soup = BeautifulSoup(r.text, 'html.parser')
try:
s1 = soup.find(id='exp_header_0_diffraction_resolution') # x-ray
s2 = soup.find(id='exp_header_0_em_resolution') # ELECTRON MICROSCOPY
s3 = soup.find(id='exp_header_0_method') # NMR
if s1:
resolution = re.split(r'\xa0',s1.text)[1]
method = 'x-ray'
elif s2:
resolution = re.split(r'\xa0',s2.text)[1]
method = 'electron'
elif s3:
resolution = 'none'
method = 'nmr'
fw2.write(pdbid+'\t'+str(resolution)+'_'+method+'\n')
return [pdbid,str(resolution)+'_'+method]#②
# return pdbid+'\t'+str(resolution)+'_'+method+'\n'①
except Exception as e: # 不属于以上三种情况
fw.write(e)
fw.close()
fw2.close()
time.sleep(2)
# use my pdblist to running function
fw=open(self.CONFIG['OUTPUT_FOLDER'] + MMCIF_unit.CONFIG['RESOLUTION_FILE'][1],'w')
fw.write('pdb_id\tresolution\n')
fw.close()
# give 10 processes
pool=Pool(processes=10)
pool.map(getResolution, self.pdb_list)
def get_year_info1(self):
@retry(stop_max_attempt_number=3, wait_fixed=1000)
def get_date1(pdbid):
print(pdbid)
fw = open(MMCIF_unit.CONFIG['YEAR_INFO_1_FILE'][0], 'a')
fp = open(MMCIF_unit.CONFIG['YEAR_INFO_1_FILE'][1], 'a')
url = 'https://www.rcsb.org/structure/' + pdbid
r = requests.get(url, headers=MMCIF_unit.CONFIG['HEADERS'])
soup = BeautifulSoup(r.text, 'html.parser')
s1 = soup.findAll("div", {"class": re.compile("col-md-6 col-sm-6 col-xs-12 col-xs-12")})
try:
a = str(s1[-1].text.split(':')[1].split('Type')[0].strip(' '))
b = str(s1[-1].text.split(':')[-2].split('Type')[0].strip(' '))
fw.write('%s\t%s\t%s\n' % (pdbid, a, b))
except:
fp.write(pdbid + '\n')
fw.close()
fp.close()
time.sleep(2)
fw = open(MMCIF_unit.CONFIG['YEAR_INFO_1_FILE'][0], 'w')
fw.write('pdb_id\tinitial_version_time\tnewest_version_time\n')
fw.close()
pool = Pool(processes=20)
pool.map(get_date1, self.pdb_list)
def get_year_info2(self):
@retry(stop_max_attempt_number=3, wait_fixed=1000)
def get_date2(pdbid):
print(pdbid)
fw = open(MMCIF_unit.CONFIG['YEAR_INFO_2_FILE'][0], 'a')
fp = open(MMCIF_unit.CONFIG['YEAR_INFO_2_FILE'][1], 'a')
url = 'https://www.rcsb.org/structure/' + pdbid
r = requests.get(url, headers=MMCIF_unit.CONFIG['HEADERS'])
soup = BeautifulSoup(r.text, 'html.parser')
s1 = soup.findAll("div", {"class": re.compile("col-md-6 col-sm-6 col-xs-12 col-xs-12")})
try:
a = str(s1[-1].text.split(':')[1].split('Type')[0].strip(' '))
b = str(s1[-1].text.split(':')[-2].split('Type')[0].strip(' '))
fw.write('%s\t%s\t%s\n' % (pdbid, a, b))
except:
fp.write(pdbid + '\n')
fw.close()
fp.close()
time.sleep(2)
fw = open(MMCIF_unit.CONFIG['YEAR_INFO_2_FILE'][0], 'w')
fw.write('pdb_id\tinitial_version_time\tnewest_version_time\n')
fw.close()
pool = Pool(processes=5)
data_temp = pd.read_csv(MMCIF_unit.CONFIG['YEAR_INFO_1_FILE'][0], sep='\t', names=['pdb_id'])
pdbid = list(set(data_temp['pdb_id']))
pool.map(get_date2, pdbid)
def get_year_info_all(self):
year1 = pd.read_csv(MMCIF_unit.CONFIG['YEAR_INFO_1_FILE'][0], sep='\t', dtype=str)
year2 = pd.read_csv(MMCIF_unit.CONFIG['YEAR_INFO_2_FILE'][0], sep='\t', dtype=str)
yearall =
|
pd.concat([year1, year2], axis=0)
|
pandas.concat
|
from datetime import datetime
import urllib.request
import pandas as pd
import zipfile
import requests
import plotly
import plotly.graph_objects as go
import folium
from branca.element import Template, MacroElement
from bs4 import BeautifulSoup
from datetime import datetime
from dateutil.relativedelta import relativedelta
from geopy.geocoders import Nominatim
from geopy.extra.rate_limiter import RateLimiter
#Plot 1 and 2 start
## Get EPRACCUR data from NHSD
url = 'https://files.digital.nhs.uk/assets/ods/current/epraccur.zip'
filehandle, _ = urllib.request.urlretrieve(url)
zip_file_object = zipfile.ZipFile(filehandle, 'r')
first_file = zip_file_object.namelist()[0]
file = zip_file_object.open(first_file)
content = file.read()
csv_file = open('assets/data/epraccur_data.csv', 'wb')
csv_file.write(content)
csv_file.close()
header_list = ["Organisation Code", "Name", "National Grouping", "High Level Health Geography", "Address line 1", "Address line 2", "Address line 3",
"Address line 4", "Address line 5","Postcode","Open Date","Close Date","Status Code","Organisation Sub-Type Code","Commissioner","Join Provider/Purchaser Date",
"Left Provider/Purchaser Date","Contact Telephone Number", "Null 1", "Null 2", "Null 3", "Amended Record Indicator", "Null 4", "Provider/Purchaser",
"Null 5", "Prescribing Setting", "Null 6"]
## Get EPRACCUR data from NHSD end
##EPRACCUR data processing
gp_practice_df = pd.read_csv('assets/data/epraccur_data.csv', names=header_list)
gp_practice_df.fillna('', inplace=True)
gp_practice_df['Partial Address'] = gp_practice_df[['Address line 1', 'Address line 2', 'Address line 3', 'Address line 4',]].agg(', '.join, axis=1)
gp_practice_df['Full Address'] = gp_practice_df[['Partial Address', 'Address line 5',]].agg(' '.join, axis=1)
gp_practice_df['Full Address'] = gp_practice_df['Full Address'].str.title()
gp_practice_df['Name'] = gp_practice_df['Name'].str.title()
gp_practice_df_1 = gp_practice_df.drop(columns = {"High Level Health Geography", "Address line 1", "Address line 2", "Address line 3", "Address line 4",
"Address line 5", "Open Date", "Close Date", "Organisation Sub-Type Code", "Commissioner", "Join Provider/Purchaser Date", "Left Provider/Purchaser Date",
"Null 1", "Null 2", "Null 3", "Amended Record Indicator", "Null 4", "Partial Address", "Provider/Purchaser", "Null 5", "Null 6"})
gp_practice_df_2 = gp_practice_df_1[gp_practice_df_1["Status Code"] == "A"]
gp_practice_df_3 = gp_practice_df_2[gp_practice_df_2["Prescribing Setting"] == 4]
gp_practice_df_eng = gp_practice_df_3[gp_practice_df_3["National Grouping"].str.contains("YAC|YAD|YAE|YAF|W00")==False]
gp_practice_df_eng_1 = gp_practice_df_eng.reset_index(drop = True)
gp_practice_df_eng_2 = gp_practice_df_eng_1.copy()
gp_practice_df_eng_3 = gp_practice_df_eng_2.drop( columns = {"Status Code", "Prescribing Setting"})
gp_practice_df_ldn = gp_practice_df_eng_3[gp_practice_df_eng_3["National Grouping"].str.contains("Y56")==True]
gp_practice_df_ldn['Name'] = gp_practice_df_ldn['Name'].str.replace('Gp', 'GP')
gp_practice_df_ldn['Full Address'] = gp_practice_df_ldn['Full Address'].str.replace(' ,', ' ').str.replace(' ', ' ').str.replace('Gp', 'GP').map(lambda x: x.rstrip(', '))
gp_practice_df_ldn_2 = gp_practice_df_ldn[gp_practice_df_ldn["Organisation Code"].str.contains("E85124|Y06487")==False]
gp_practice_df_ldn_3 = gp_practice_df_ldn_2.reset_index(drop = True)
##EPRACCUR data processing end
##Get Patients registered at GP practices data from NHSD
month_year_variable = datetime.now().strftime('%B-%Y').lower()
url = "https://digital.nhs.uk/data-and-information/publications/statistical/patients-registered-at-a-gp-practice/%s" %month_year_variable
response = urllib.request.urlopen(url)
soup = BeautifulSoup(response.read(), "lxml")
data = soup.select_one("a[href*='gp-reg-pat-prac-all.csv']")
if data != None:
csv_url = data['href']
req = requests.get(csv_url)
url_content = req.content
csv_file = open('assets/data/gp_pop_data.csv', 'wb')
csv_file.write(url_content)
csv_file.close()
else:
last_month = datetime.now() - relativedelta(months=1)
last_month_year_variable = last_month.strftime('%B-%Y').lower()
url = "https://digital.nhs.uk/data-and-information/publications/statistical/patients-registered-at-a-gp-practice/%s" %last_month_year_variable
response = urllib.request.urlopen(url)
soup = BeautifulSoup(response.read(), "lxml")
data = soup.select_one("a[href*='gp-reg-pat-prac-all.csv']")
csv_url = data['href']
req = requests.get(csv_url)
url_content = req.content
csv_file = open('assets/data/gp_pop_data.csv', 'wb')
csv_file.write(url_content)
csv_file.close()
gp_pop_df = pd.read_csv('assets/data/gp_pop_data.csv')
gp_pop_df.rename(columns={'CODE': 'Organisation Code', 'NUMBER_OF_PATIENTS': 'Number of patients registered at GP practices in England'}, inplace=True)
gp_pop_df_1 = gp_pop_df.drop(columns = {'PUBLICATION', 'EXTRACT_DATE', 'TYPE', 'CCG_CODE', 'ONS_CCG_CODE', 'SEX', 'AGE', 'POSTCODE'})
gp_pop_df_1 = gp_pop_df_1[gp_pop_df_1 ["Organisation Code"].str.contains("E85124|Y06487")==False]
gp_pop_df_1 = gp_pop_df_1.reset_index(drop = True)
##Get Patients registered at GP practices data from NHSD end
##Merge EPRACCUR and patients registered at GP practices data
gp_pop_ldn = gp_practice_df_ldn_3.join(gp_pop_df_1, rsuffix='Organisation Code')
gp_pop_ldn.rename(columns={'Number of patients registered at GP practices in England': 'Number of patients registered at the GP practice'}, inplace=True)
gp_pop_ldn["Address"] = gp_pop_ldn[["Full Address", "Postcode"]].agg(', '.join, axis=1)
gp_pop_ldn_1 = gp_pop_ldn.drop(columns={'Organisation CodeOrganisation Code', 'National Grouping', 'Full Address'})
gp_pop_ldn_1 = gp_pop_ldn_1[["Organisation Code", "Name", "Address", "Postcode", "Contact Telephone Number", "Number of patients registered at the GP practice"]]
##Merge EPRACCUR and patients registered at GP practices data end
##Visualization Plot 1
x0 = gp_pop_ldn_1['Number of patients registered at the GP practice']
x1 = gp_pop_df_1['Number of patients registered at GP practices in England']
count_england = gp_pop_df_1['Number of patients registered at GP practices in England'].count()
count_london = gp_pop_ldn_1['Number of patients registered at the GP practice'].count()
fig_1 = go.Figure()
fig_1.add_trace(go.Box(x=x0,
boxmean=True,
boxpoints= 'all',
jitter=0.3,
name="London",
marker_color ="#0072CE",
whiskerwidth=0.5,
marker_size=3,
line_width=2))
fig_1.add_trace(go.Box(x=x1,
boxmean=True,
boxpoints= 'all',
jitter=0.3,
name="England",
marker_color = "#003087",
whiskerwidth=0.5,
marker_size=3,
line_width=2))
fig_1.update_layout(
{"plot_bgcolor": "rgba(0, 0, 0, 0)", "paper_bgcolor": "rgba(0, 0, 0, 0)"},
font = dict(family = "Arial", size = 16),
autosize=True,
margin=dict(l=75, r=50, b=160, t=30, pad=4, autoexpand=True), hoverlabel=dict(
font_size=12,
font_family="Arial"
), xaxis=dict(title='Number of patients registered at individual GP practices', zeroline=False))
fig_1.add_annotation(dict(font=dict(family = "Arial",size=15),
x=0.33,
y=-0.40,
showarrow=False,
text="Number of GP practices in England: %s" %count_england,
textangle=0,
xanchor='right',
xref="paper",
yref="paper"))
fig_1.add_annotation(dict(font=dict(family = "Arial",size=15),
x=0.323,
y=-0.46,
showarrow=False,
text="Number of GP practices in London: %s" %count_london,
textangle=0,
xanchor='right',
xref="paper",
yref="paper"))
##Visualization Plot 1 end
## Write out to file (.html) Plot 1
config = {"displayModeBar": False, "displaylogo": False}
plotly_obj = plotly.offline.plot(
fig_1, include_plotlyjs=False, output_type="div", config=config
)
with open("_includes/plotly_obj.html", "w") as file:
file.write(plotly_obj)
## Write out to file (.html) Plot 1 end
#Merge new GP practice data with data from previous timepoint to avoid uncessary Nomatin API requests
file_name = 'assets/data/gp_pop_ldn_mapped.csv'
old_data = pd.read_csv(file_name, index_col=0)
gp_pop_ldn_1 = gp_pop_ldn_1.merge(old_data[['Organisation Code','loc', 'Point', 'Latitude', 'Longitude', 'Altitude']],on='Organisation Code', how = 'left')
gp_pop_ldn_1.rename(columns={'loc_x': 'loc', 'Point_x': 'Point', 'Latitude_x': 'Latitude', 'Longitude_x': 'Longitude', 'Altitude_x': 'Altitude' }, inplace=True)
#Merge new GP practice data with data from previous timepoint to avoid uncessary Nomatin API requests end
##Get GP practice coordinates using geopy if New GP practcies added to EPRACCUR
geolocator = Nominatim(user_agent="open_access_nhs")
geocode = RateLimiter(geolocator.geocode, min_delay_seconds=1)
if gp_pop_ldn_1['loc'].count() != gp_pop_ldn_1['Organisation Code'].count():
missing_data = pd.isnull(gp_pop_ldn_1["loc"])
missing_data_df = gp_pop_ldn_1[missing_data]
missing_data_df["loc"] = missing_data_df["Postcode"].apply(geolocator.geocode)
missing_data_df["Point"]= missing_data_df["loc"].apply(lambda loc: tuple(loc.point) if loc else None)
missing_data_df[['Latitude', 'Longitude', 'Altitude']] = pd.DataFrame(missing_data_df['Point'].to_list(), index=missing_data_df.index)
gp_pop_ldn_1 = gp_pop_ldn_1.dropna()
gp_pop_ldn_1 =
|
pd.concat([gp_pop_ldn_1, missing_data_df], ignore_index=True)
|
pandas.concat
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import json
from datetime import datetime, timedelta
def get_data():
# Load json data
with open('../data/json_file.json') as data_file:
patients = json.load(data_file)
print("JSON file loaded")
# Features computation
print("Features computation launched...")
visits = []
for patient in patients.values():
for i in range(1, len(patient['visits']) + 1):
visits.append(patient['visits'][str(i)])
n_visits = len(visits)
print("n_visits = %s" % n_visits)
# Features DataFrame with encounter_nums index
encounter_nums = [int(visit.get('encounter_num')) for visit in visits]
X = pd.DataFrame(index=encounter_nums)
# Time vector & censoring indicator
print("Adding labels...", end="")
next_visit = [visit.get('next_visit') for visit in visits]
T = np.array([1e10 if str(t) == 'none' else t for t in next_visit]).astype(
int)
end_dates = pd.to_datetime([visit.get('end_date') for visit in visits])
start_dates = pd.to_datetime([visit.get('start_date') for visit in visits])
C =
|
pd.to_datetime('2016-01-15 00:00:00')
|
pandas.to_datetime
|
import os
import nibabel as nib
from nipype.interfaces.base import TraitedSpec, DynamicTraitedSpec, File, traits, \
BaseInterface, BaseInterfaceInputSpec, isdefined
from nipype.utils.filemanip import split_filename
from nipype.interfaces.io import add_traits
from temporalimage import load as ti_load
from temporalimage import Quantity
import kineticmodel
class KineticModelInputSpec(BaseInterfaceInputSpec):
model = traits.Enum(*kineticmodel.KineticModel.model_values, mandatory=True,
desc='one of: ' + \
', '.join(kineticmodel.KineticModel.model_values))
timeSeriesImgFile = File(exists=True, mandatory=True,
desc='path to dynamic PET image')
frameTimingFile = File(exists=True, mandatory=True,
desc=('csv/sif/json file listing the duration of '
'each time frame in the 4D image'))
refRegionMaskFile = File(exists=True, mandatory=True,
desc='Reference region mask')
startTime = traits.Float(0.0, mandatory=False,
desc=('minute into the time series image at which '
'to start computing the parametric images, inclusive'))
endTime = traits.Float(mandatory=False,
desc=('minute into the time series image at which '
'to stop computing the parametric images, exclusive'))
startActivity = traits.Enum(*kineticmodel.KineticModel.startActivity_values,
mandatory=True,
desc='one of: ' + \
', '.join(kineticmodel.KineticModel.startActivity_values))
weights = traits.Enum(*kineticmodel.KineticModel.weights_values,
desc='one of: ' + \
', '.join(kineticmodel.KineticModel.weights_values),
mandatory=True)
halflife = traits.Float(mandatory=False,
desc=('halflife of the radiotracer, in minutes '
'(required for decay corrected weights)'))
fwhm = traits.Float(mandatory=False,
desc=('Full width at half max (in mm) for Gaussian '
'smoothing (required for SRTM_Zhou2003)'))
class KineticModel(BaseInterface):
"""
Kinetic model applied to voxelwise data.
"""
input_spec = KineticModelInputSpec
output_spec = DynamicTraitedSpec
def _run_interface(self, runtime):
model = self.inputs.model
timeSeriesImgFile = self.inputs.timeSeriesImgFile
refRegionMaskFile = self.inputs.refRegionMaskFile
frameTimingFile = self.inputs.frameTimingFile
endTime = self.inputs.endTime
ti = ti_load(timeSeriesImgFile, frameTimingFile)
if isdefined(self.inputs.startTime):
startTime = Quantity(self.inputs.startTime, 'minute')
else:
startTime = ti.get_startTime()
if isdefined(self.inputs.endTime):
endTime = Quantity(self.inputs.endTime, 'minute')
else:
endTime = ti.get_endTime()
if isdefined(self.inputs.halflife):
halflife = Quantity(self.inputs.halflife, 'minute')
else:
halflife = None
if isdefined(self.inputs.fwhm):
fwhm = self.inputs.fwhm
else:
fwhm = None
_, base, _ = split_filename(timeSeriesImgFile)
ti = ti.extractTime(startTime, endTime)
self.modStartTime = ti.get_startTime().to('min').magnitude
self.modEndTime = ti.get_endTime().to('min').magnitude
class_ = getattr(kineticmodel, model)
results_img = class_.volume_wrapper(ti=ti,
refRegionMaskFile=refRegionMaskFile,
startActivity=self.inputs.startActivity,
weights=self.inputs.weights,
halflife=halflife,
fwhm=fwhm)
for result_name in class_.result_names:
res_img = nib.Nifti1Image(results_img[result_name],
ti.affine, ti.header)
res_fname = base + '_' + '{:02.2f}'.format(self.modEndTime) + \
'min_'+result_name+'.nii.gz'
nib.save(res_img,res_fname)
return runtime
def _add_output_traits(self, base):
class_ = getattr(kineticmodel, self.inputs.model)
return add_traits(base, class_.result_names)
def _outputs(self):
return self._add_output_traits(super(KineticModel, self)._outputs())
def _list_outputs(self):
outputs = self._outputs().get()
model = self.inputs.model
fname = self.inputs.timeSeriesImgFile
_, base, _ = split_filename(fname)
class_ = getattr(kineticmodel, model)
for result_name in class_.result_names:
outputs[result_name] = os.path.abspath(base + '_' + \
'{:02.2f}'.format(self.modEndTime)+'min_'+result_name+'.nii.gz')
return outputs
class KineticModelROIInputSpec(BaseInterfaceInputSpec):
model = traits.Enum(*kineticmodel.KineticModel.model_values, mandatory=True,
desc='one of: ' + \
', '.join(kineticmodel.KineticModel.model_values))
roiTACcsvFile = File(exists=True, mandatory=True,
desc='csv file containing TACs per ROI')
frameTimingFile = File(exists=True, mandatory=True,
desc=('csv/sif/json file listing the duration of '
'each time frame in the 4D image'))
refRegion = traits.String(desc=('Name of reference region, ',
'must be included in roiTACcsvfile'),
mandatory=True)
startTime = traits.Float(0.0, mandatory=False,
desc=('minute into the time series image at which '
'to start computing the parametric images, '
'inclusive'))
endTime = traits.Float(desc=('minute into the time series image at which '
'to stop computing the parametric images, '
'exclusive'),
mandatory=False)
startActivity = traits.Enum(*kineticmodel.KineticModel.startActivity_values,
mandatory=True,
desc='one of: ' + \
', '.join(kineticmodel.KineticModel.startActivity_values))
weights = traits.Enum(*kineticmodel.KineticModel.weights_values,
desc='one of: ' + \
', '.join(kineticmodel.KineticModel.weights_values),
mandatory=True)
halflife = traits.Float(mandatory=False,
desc=('halflife of the radiotracer, in minutes '
'(required for decay corrected weights)'))
class KineticModelROIOutputSpec(TraitedSpec):
csvFile = File(exists=True, desc='csv file')
class KineticModelROI(BaseInterface):
"""
Kinetic model applied to regional data.
"""
input_spec = KineticModelROIInputSpec
output_spec = KineticModelROIOutputSpec
def _run_interface(self, runtime):
import pandas as pd
from temporalimage.t4d import _csvread_frameTiming, \
_sifread_frameTiming, \
_jsonread_frameTiming
model = self.inputs.model
roiTACcsvFile = self.inputs.roiTACcsvFile
refRegion = self.inputs.refRegion
frameTimingFile = self.inputs.frameTimingFile
endTime = self.inputs.endTime
roiTACs = pd.read_csv(roiTACcsvFile)
_, timingfileext = os.path.splitext(frameTimingFile)
if timingfileext=='.csv':
frameStart, frameEnd = _csvread_frameTiming(frameTimingFile)
elif timingfileext=='.sif':
frameStart, frameEnd, _ = _sifread_frameTiming(frameTimingFile)
elif timingfileext=='.json':
frameStart, frameEnd, _ = _jsonread_frameTiming(frameTimingFile)
# Compute the time mid-way for each time frame
t = (frameStart + frameEnd ) / 2
# Compute the duration of each time frame
dt = frameEnd - frameStart
if isdefined(self.inputs.startTime):
startTime = Quantity(self.inputs.startTime, 'minute')
else:
startTime = frameStart[0]
if isdefined(self.inputs.endTime):
endTime = Quantity(self.inputs.endTime, 'minute')
else:
endTime = frameEnd[-1]
if isdefined(self.inputs.halflife):
halflife = Quantity(self.inputs.halflife, 'minute')
else:
halflife = None
_, base, _ = split_filename(roiTACcsvFile)
# find the first time frame with frameStart at or shortest after the specified start time
startIndex = next((i for i,t in enumerate(frameStart) if t>=startTime), len(frameStart)-1)
# find the first time frame with frameEnd shortest after the specified end time
endIndex = next((i for i,t in enumerate(frameEnd) if t>endTime), len(frameStart))
TAC_rownames = roiTACs['ROI'].values
isref = TAC_rownames==refRegion
if isref.sum()!=1:
raise ValueError("Exactly one row should correspond to the reference TAC")
# separate reference region TAC from other TACs
# we add 1 to startIndex and endIndex because the first column in
# roiTACs is ROI names
refTAC = roiTACs.iloc[isref,startIndex+1:endIndex+1].values.flatten()
TAC = roiTACs.iloc[~isref,startIndex+1:endIndex+1].values
TAC_rownames = TAC_rownames[~isref]
# subset time vectors
t = t[startIndex:endIndex]
dt = dt[startIndex:endIndex]
class_ = getattr(kineticmodel, model)
km = class_(t, dt, TAC, refTAC,
startActivity=self.inputs.startActivity,
weights=self.inputs.weights,
halflife=halflife)
km.fit()
results =
|
pd.DataFrame({'ROI': TAC_rownames})
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from pandas.testing import assert_frame_equal, assert_series_equal
from woodwork.logical_types import (
Boolean,
Categorical,
Double,
Integer,
NaturalLanguage,
)
from evalml.pipelines.components import TargetImputer
def test_target_imputer_no_y(X_y_binary):
X, y = X_y_binary
imputer = TargetImputer()
assert imputer.fit_transform(None, None) == (None, None)
imputer = TargetImputer()
imputer.fit(None, None)
assert imputer.transform(None, None) == (None, None)
def test_target_imputer_with_X():
X = pd.DataFrame({"some col": [1, 3, np.nan]})
y = pd.Series([np.nan, 1, 3])
imputer = TargetImputer(impute_strategy="median")
y_expected = pd.Series([2, 1, 3])
X_expected = pd.DataFrame({"some col": [1, 3, np.nan]})
X_t, y_t = imputer.fit_transform(X, y)
assert_series_equal(y_expected, y_t, check_dtype=False)
assert_frame_equal(X_expected, X_t, check_dtype=False)
def test_target_imputer_median():
y = pd.Series([np.nan, 1, 10, 10, 6])
imputer = TargetImputer(impute_strategy="median")
y_expected = pd.Series([8, 1, 10, 10, 6])
_, y_t = imputer.fit_transform(None, y)
assert_series_equal(y_expected, y_t, check_dtype=False)
def test_target_imputer_mean():
y = pd.Series([np.nan, 2, 0])
imputer = TargetImputer(impute_strategy="mean")
y_expected = pd.Series([1, 2, 0])
_, y_t = imputer.fit_transform(None, y)
assert_series_equal(y_expected, y_t, check_dtype=False)
@pytest.mark.parametrize(
"fill_value, y, y_expected",
[
(None, pd.Series([np.nan, 0, 5]), pd.Series([0, 0, 5])),
(
None,
pd.Series([np.nan, "a", "b"]),
pd.Series(["missing_value", "a", "b"]).astype("category"),
),
(3, pd.Series([np.nan, 0, 5]), pd.Series([3, 0, 5])),
(3, pd.Series([np.nan, "a", "b"]), pd.Series([3, "a", "b"]).astype("category")),
],
)
def test_target_imputer_constant(fill_value, y, y_expected):
imputer = TargetImputer(impute_strategy="constant", fill_value=fill_value)
_, y_t = imputer.fit_transform(None, y)
assert_series_equal(y_expected, y_t, check_dtype=False)
def test_target_imputer_most_frequent():
y = pd.Series([np.nan, "a", "b"])
imputer = TargetImputer(impute_strategy="most_frequent")
y_expected = pd.Series(["a", "a", "b"]).astype("category")
_, y_t = imputer.fit_transform(None, y)
assert_series_equal(y_expected, y_t, check_dtype=False)
y = pd.Series([np.nan, 1, 1, 2])
imputer = TargetImputer(impute_strategy="most_frequent")
y_expected = pd.Series([1, 1, 1, 2])
_, y_t = imputer.fit_transform(None, y)
assert_series_equal(y_expected, y_t, check_dtype=False)
def test_target_imputer_col_with_non_numeric_with_numeric_strategy():
y = pd.Series([np.nan, "a", "b"])
imputer = TargetImputer(impute_strategy="mean")
with pytest.raises(
ValueError, match="Cannot use mean strategy with non-numeric data"
):
imputer.fit_transform(None, y)
with pytest.raises(
ValueError, match="Cannot use mean strategy with non-numeric data"
):
imputer.fit(None, y)
imputer = TargetImputer(impute_strategy="median")
with pytest.raises(
ValueError, match="Cannot use median strategy with non-numeric data"
):
imputer.fit_transform(None, y)
with pytest.raises(
ValueError, match="Cannot use median strategy with non-numeric data"
):
imputer.fit(None, y)
@pytest.mark.parametrize("data_type", ["pd", "ww"])
def test_target_imputer_all_bool_return_original(data_type, make_data_type):
y = pd.Series([True, True, False, True, True], dtype=bool)
y = make_data_type(data_type, y)
y_expected = pd.Series([True, True, False, True, True], dtype=bool)
imputer = TargetImputer()
imputer.fit(None, y)
_, y_t = imputer.transform(None, y)
assert_series_equal(y_expected, y_t)
@pytest.mark.parametrize("data_type", ["pd", "ww"])
def test_target_imputer_boolean_dtype(data_type, make_data_type):
y = pd.Series([True, np.nan, False, np.nan, True], dtype="category")
y_expected = pd.Series([True, True, False, True, True], dtype="category")
y = make_data_type(data_type, y)
imputer = TargetImputer()
imputer.fit(None, y)
_, y_t = imputer.transform(None, y)
assert_series_equal(y_expected, y_t)
def test_target_imputer_fit_transform_all_nan_empty():
y = pd.Series([np.nan, np.nan])
imputer = TargetImputer()
imputer.fit(None, y)
with pytest.raises(RuntimeError, match="Transformed data is empty"):
imputer.transform(None, y)
imputer = TargetImputer()
with pytest.raises(RuntimeError, match="Transformed data is empty"):
imputer.fit_transform(None, y)
def test_target_imputer_numpy_input():
y = np.array([np.nan, 0, 2])
imputer = TargetImputer(impute_strategy="mean")
y_expected = np.array([1, 0, 2])
_, y_t = imputer.fit_transform(None, y)
assert np.allclose(y_expected, y_t)
np.testing.assert_almost_equal(y, np.array([np.nan, 0, 2]))
def test_target_imputer_does_not_reset_index():
y = pd.Series(np.arange(10))
y[5] = np.nan
assert y.index.tolist() == list(range(10))
y.drop(0, inplace=True)
pd.testing.assert_series_equal(
pd.Series(
[1, 2, 3, 4, np.nan, 6, 7, 8, 9], dtype=float, index=list(range(1, 10))
),
y,
)
imputer = TargetImputer(impute_strategy="mean")
imputer.fit(None, y=y)
_, y_t = imputer.transform(None, y)
pd.testing.assert_series_equal(
pd.Series([1.0, 2, 3, 4, 5, 6, 7, 8, 9], dtype=float, index=list(range(1, 10))),
y_t,
)
@pytest.mark.parametrize(
"y, y_expected",
[
(pd.Series([1, 0, 5, None]), pd.Series([1, 0, 5, 2])),
(pd.Series([0.1, 0.0, 0.5, None]), pd.Series([0.1, 0.0, 0.5, 0.2])),
],
)
def test_target_imputer_with_none(y, y_expected):
imputer = TargetImputer(impute_strategy="mean")
_, y_t = imputer.fit_transform(None, y)
assert_series_equal(y_expected, y_t, check_dtype=False)
@pytest.mark.parametrize(
"y, y_expected",
[
(
pd.Series(["b", "a", "a", None], dtype="category"),
pd.Series(["b", "a", "a", "a"], dtype="category"),
),
(
pd.Series([True, None, False, True], dtype="category"),
pd.Series([True, True, False, True], dtype="category"),
),
(
pd.Series(["b", "a", "a", None]),
|
pd.Series(["b", "a", "a", "a"], dtype="category")
|
pandas.Series
|
import logging
import itertools
import pandas as pd
import openpathsampling as paths
from openpathsampling.netcdfplus import StorableNamedObject
# from functools import reduce # not built-in for py3
logger = logging.getLogger(__name__)
def _default_state_name(state):
return state.name if state.is_named else str(state)
def _name_unnamed_states(unnamed_states, all_names):
name_index = 0
for state in unnamed_states:
while index_to_string(name_index) in all_names:
name_index += 1
state = state.named(index_to_string(name_index))
name_index += 1
def _or_bar_namer(volumes):
return "|".join([v.name for v in volumes])
# TODO: this should be moved into a general tools module
def listify(obj):
try:
_ = iter(obj)
except TypeError:
obj = [obj]
return obj
def index_to_string(index):
n_underscore = index // 26
letter_value = index % 26
mystr = "_"*n_underscore + chr(65 + letter_value)
return mystr
# TODO: this will be removed when we start using the analysis.tis methods
# for the network.rate_matrix
def _set_hist_args(transition, hist_args):
for histname in hist_args.keys():
trans_hist = transition.ensemble_histogram_info[histname]
if trans_hist.hist_args == {}:
trans_hist.hist_args = hist_args[histname]
class TransitionNetwork(StorableNamedObject):
"""
Subclasses of TransitionNetwork are the main way to set up calculations
Attributes
----------
sampling_ensembles
all_ensembles
sampling_transitions
"""
def __init__(self):
super(TransitionNetwork, self).__init__()
# self.transitions = {}
# self.special_ensembles = {}
@property
def sampling_ensembles(self):
"""
Ensembles from the sampling transitions, excluding special ensembles.
"""
return sum([t.ensembles for t in self.sampling_transitions], [])
@property
def analysis_ensembles(self):
"""
Ensembles from the analysis transitions, excluding special ensembles.
"""
return sum([t.ensembles for t in self.transitions.values()], [])
@property
def all_ensembles(self):
"""
All ensembles in the sampling transitions, including special
ensembles.
"""
all_ens = self.sampling_ensembles
for special_dict in self.special_ensembles.values():
all_ens.extend(list(special_dict.keys()))
return all_ens
@property
def sampling_transitions(self):
"""The transitions used in sampling"""
try:
return self._sampling_transitions
except AttributeError:
return None
class GeneralizedTPSNetwork(TransitionNetwork):
"""General class for TPS-based method.
The main differences between fixed-length and flexible-length TPS is a
small change in the ensemble. In implementation, this means that they
use different transition classes, and that they have slightly different
function signatures (fixed-length requires a length argument).
To simplify this, and to make the docstrings readable, we make each
class into a simple subclass of this GeneralizedTPSNetwork, which acts
as an abstract class that manages most of the relevant code.
Parameters
----------
initial_states : list of :class:`.Volume`
acceptable initial states
final_states : list of :class:`.Volume`
acceptable final states
allow_self_transitions : bool
whether self-transitions (A->A) are allowed; default is False
Attributes
----------
TransitionType : :class:`paths.Transition`
Type of transition used here. Sets, for example, fixed or flexible
pathlengths.
"""
TransitionType = NotImplemented
def __init__(self, initial_states, final_states,
allow_self_transitions=False, **kwargs):
# **kwargs gets passed to the transition
super(GeneralizedTPSNetwork, self).__init__()
self.initial_states = listify(initial_states)
self.final_states = listify(final_states)
self.special_ensembles = {None: {}}
all_initial = paths.join_volumes(self.initial_states, _or_bar_namer)
if set(self.initial_states) == set(self.final_states):
all_final = all_initial # so we don't create 2 objs for it
else:
all_final = paths.join_volumes(self.final_states, _or_bar_namer)
self._sampling_transitions, self.transitions = \
self._build_transitions(self.initial_states, self.final_states,
allow_self_transitions, **kwargs)
def _build_transitions(self, initial_states, final_states,
allow_self_transitions, **kwargs):
sampling_transitions = self._build_sampling_transitions(
initial_states, final_states, allow_self_transitions, **kwargs
)
transitions = self._build_analysis_transitions(
initial_states, final_states, allow_self_transitions, **kwargs
)
return sampling_transitions, transitions
def _sampling_transitions_from_pairs(self, state_pairs, **kwargs):
initial, final = state_pairs[0]
sampling_transition = self.TransitionType(initial, final, **kwargs)
for initial, final in state_pairs[1:]:
sampling_transition.add_transition(initial, final)
return [sampling_transition]
def _build_sampling_transitions(self, initial_states, final_states,
allow_self_transitions, **kwargs):
if allow_self_transitions:
initial_to_joined_final = {
initial: paths.join_volumes(final_states, _or_bar_namer)
for initial in initial_states
}
else:
initial_to_joined_final = {
initial: paths.join_volumes([final for final in final_states
if initial != final],
_or_bar_namer)
for initial in initial_states
}
sampling_transitions = self._sampling_transitions_from_pairs(
state_pairs=list(initial_to_joined_final.items()),
**kwargs
)
return sampling_transitions
def _build_analysis_transitions(self, initial_states, final_states,
allow_self_transitions, **kwargs):
transitions = {
(initial, final): self.TransitionType(initial, final, **kwargs)
for (initial, final) in itertools.product(initial_states,
final_states)
if initial != final
}
return transitions
def to_dict(self):
ret_dict = {
'transitions': self.transitions,
'x_sampling_transitions': self._sampling_transitions,
'special_ensembles': self.special_ensembles
}
try:
ret_dict['initial_states'] = self.initial_states
ret_dict['final_states'] = self.final_states
except AttributeError: # pragma: no cover
# DEPRECATED: remove for 2.0
from openpathsampling.deprecations import \
SAVE_RELOAD_OLD_TPS_NETWORK
SAVE_RELOAD_OLD_TPS_NETWORK.warn()
pass # backward compatibility
return ret_dict
@property
def all_states(self):
"""list of all initial and final states"""
return list(set(self.initial_states + self.final_states))
@classmethod
def from_dict(cls, dct):
network = cls.__new__(cls)
super(GeneralizedTPSNetwork, network).__init__()
network._sampling_transitions = dct['x_sampling_transitions']
network.transitions = dct['transitions']
try:
network.initial_states = dct['initial_states']
network.final_states = dct['final_states']
except KeyError: # pragma: no cover
# DEPRECATED: remove for 2.0
pass # backward compatibility
try:
network.special_ensembles = dct['special_ensembles']
except KeyError: # pragma: no cover
# DEPRECATED: remove for 2.0
network.special_ensembles = {None: {}}
# default behavior for backward compatibility
return network
@classmethod
def from_state_pairs(cls, state_pairs, **kwargs):
# TODO: redo this to use the new _sampling_transitions_from_pairs
# method
sampling = []
transitions = {}
initial_states = []
final_states = []
for (initial, final) in state_pairs:
initial_states += [initial]
final_states += [final]
if len(sampling) == 1:
sampling[0].add_transition(initial, final)
elif len(sampling) == 0:
sampling = [cls.TransitionType(initial, final, **kwargs)]
else:
raise RuntimeError("More than one sampling transition for TPS?")
transitions[(initial, final)] = cls.TransitionType(initial,
final,
**kwargs)
dict_result = {
'x_sampling_transitions': sampling,
'transitions': transitions
}
dict_result.update(kwargs)
network = cls.from_dict(dict_result)
network.initial_states = initial_states
network.final_states = final_states
return network
@classmethod
def from_states_all_to_all(cls, states, allow_self_transitions=False,
**kwargs):
return cls(states, states,
allow_self_transitions=allow_self_transitions, **kwargs)
class TPSNetwork(GeneralizedTPSNetwork):
"""
Class for flexible pathlength TPS networks (2-state or multiple state).
"""
TransitionType = paths.TPSTransition
# we implement these functions entirely to fix the signature (super's
# version allow arbitrary kwargs) so the documentation can read them.
def __init__(self, initial_states, final_states,
allow_self_transitions=False):
super(TPSNetwork, self).__init__(initial_states, final_states,
allow_self_transitions)
@classmethod
def from_state_pairs(cls, state_pairs, allow_self_transitions=False):
return super(TPSNetwork, cls).from_state_pairs(state_pairs)
@classmethod
def from_states_all_to_all(cls, states, allow_self_transitions=False):
return super(TPSNetwork, cls).from_states_all_to_all(
states, allow_self_transitions
)
class FixedLengthTPSNetwork(GeneralizedTPSNetwork):
"""
Class for fixed pathlength TPS networks (2-states or multiple states).
"""
TransitionType = paths.FixedLengthTPSTransition
# as with TPSNetwork, we don't really need to add these functions.
# However, without them, we need to explicitly name `length` as
# length=value in these functions. This frees us of that, and gives us
# clearer documentation.
def __init__(self, initial_states, final_states, length,
allow_self_transitions=False):
super(FixedLengthTPSNetwork, self).__init__(
initial_states, final_states,
allow_self_transitions=allow_self_transitions, length=length
)
@classmethod
def from_state_pairs(cls, state_pairs, length):
return super(FixedLengthTPSNetwork, cls).from_state_pairs(
state_pairs, length=length
)
@classmethod
def from_states_all_to_all(cls, states, length,
allow_self_transitions=False):
return super(FixedLengthTPSNetwork, cls).from_states_all_to_all(
states=states,
allow_self_transitions=allow_self_transitions,
length=length
)
class TISNetwork(TransitionNetwork):
# NOTE: this is an abstract class with several properties used by many
# TIS-based networks
# TODO: most of the analysis stuff should end up in here; the bigger
# differences are in setup, not analysis
def __init__(self, trans_info, ms_outers):
self.trans_info = trans_info
try:
ms_outers = list(ms_outers)
except TypeError:
if ms_outers is not None:
ms_outers = [ms_outers]
self.ms_outer_objects = ms_outers
self._sampling_to_analysis = None
self._analysis_to_sampling = None
self._sampling_ensemble_for = None
super(TISNetwork, self).__init__()
@property
def sampling_to_analysis(self):
"""dict mapping sampling transitions to analysis transitions"""
if self._sampling_to_analysis is None:
self._sampling_to_analysis = {
sampling_t: [t for t in self.transitions.values()
if sampling_t.interfaces == t.interfaces]
for sampling_t in self.sampling_transitions
}
return self._sampling_to_analysis
@property
def analysis_to_sampling(self):
"""dict mapping analysis transitions to sampling transitions"""
# in current examples, the result list here is always length 1, but
# perhaps future methods will use multiple sampling transitions
# (different order parameters?) to describe one physical transition
if self._analysis_to_sampling is None:
self._analysis_to_sampling = {
t: [sampling_t for sampling_t in self.sampling_to_analysis
if t in self.sampling_to_analysis[sampling_t]]
for t in self.transitions.values()
}
return self._analysis_to_sampling
@property
def sampling_ensemble_for(self):
"""dict mapping ensembles (incl. sampling) to sampling ensemble"""
if self._sampling_ensemble_for is None:
self._sampling_ensemble_for = {ens: ens
for ens in self.sampling_ensembles}
for ens in self.analysis_ensembles:
analysis_transitions = [t for t in self.transitions.values()
if ens in t.ensembles]
analysis_trans = analysis_transitions[0] # could use any
ens_idx = analysis_trans.ensembles.index(ens)
sampling_trans = self.analysis_to_sampling[analysis_trans]
assert len(sampling_trans) == 1 # this only works in this case
sampling_ens = sampling_trans[0].ensembles[ens_idx]
self._sampling_ensemble_for[ens] = sampling_ens
return self._sampling_ensemble_for
def set_fluxes(self, flux_dictionary):
"""
Parameters
----------
flux_dictionary : dict of 2-tuple to float
keys are in the form (state, interface), and values are the
associated flux
Raises
------
KeyError
If the flux for one of the transitions isn't in the dictionary.
"""
# for now, if you don't have all the fluxes needed, it raises a
# KeyError
for trans in self.transitions.values():
trans._flux = flux_dictionary[(trans.stateA, trans.interfaces[0])]
@property
def minus_ensembles(self):
return list(self.special_ensembles['minus'].keys())
@property
def ms_outers(self):
return list(self.special_ensembles['ms_outer'].keys())
def add_ms_outer_interface(self, ms_outer, transitions, forbidden=None):
relevant = ms_outer.relevant_transitions(transitions)
ensemble = ms_outer.make_ensemble(relevant, forbidden)
# TODO: this should use defaultdict, I think
dct = {ensemble: relevant}
try:
self.special_ensembles['ms_outer'].update(dct)
except KeyError:
self.special_ensembles['ms_outer'] = dct
@property
def all_states(self):
return list(set(self.initial_states + self.final_states))
def get_state(self, snapshot):
"""
Find which core state a snapshot is in, if any
Parameters
----------
snapshot : `openpathsampling.engines.BaseSnapshot`
the snapshot to be tested
Returns
-------
`openpathsampling.Volume`
the volume object defining the state
"""
for state in self.all_states:
if state(snapshot):
return state
return None
class MSTISNetwork(TISNetwork):
"""
Multiple state transition interface sampling network.
The way this works is that it sees two effective sets of transitions.
First, there are sampling transitions. These are based on ensembles
which go to any final state. Second, there are analysis transitions.
These are based on ensembles which go to a specific final state.
Sampling is done using the sampling transitions. Sampling transitions
are stored in the `from_state[state]` dictionary. For MSTIS, the flux
and total crossing probabilities are independent of the final state, and
so the analysis calculates them in the sampling transitions, and copies
the results into the analysis transitions. This way flux and total
crossing probably are only calculated once per interface set.
The conditional transition probability depends on the final state, so it
(and the rate) are calculated using the analysis transitions. The
analysis transitions are obtained using `.transition[(stateA, stateB)]`.
"""
def to_dict(self):
ret_dict = {
'from_state': self.from_state,
'states': self.states,
'special_ensembles': self.special_ensembles,
'trans_info': self.trans_info,
'ms_outer_objects': self.ms_outer_objects
}
return ret_dict
@classmethod
def from_dict(cls, dct):
network = cls.__new__(cls)
# replace automatically created attributes with stored ones
network.from_state = dct['from_state']
network.special_ensembles = dct['special_ensembles']
network.states = dct['states']
network.__init__(
trans_info=dct['trans_info'],
ms_outers=dct['ms_outer_objects']
)
return network
def __init__(self, trans_info, ms_outers=None):
"""
Creates MSTISNetwork, including interfaces.
Parameters
----------
trans_info : list of tuple
Details of each state-based ensemble set. 2-tuple in the order
(state, interface_set) where state is a Volume, and
interface_set is an InterfaceSet (with associated
CollectiveVariable)
ms_outers : MSOuterTISInterface or list of MSOuterTISInterface
mutliple state outer interfaces for this network
"""
super(MSTISNetwork, self).__init__(trans_info, ms_outers)
# build sampling transitions
states, interfaces = zip(*trans_info)
self.states = states
if not hasattr(self, "from_state"):
self.special_ensembles = {}
self.from_state = {}
self._build_fromstate_transitions(trans_info)
if self.ms_outer_objects is not None:
for ms_outer in self.ms_outer_objects:
all_transitions = list(self.from_state.values())
self.add_ms_outer_interface(ms_outer, all_transitions)
self._sampling_transitions = list(self.from_state.values())
# by default, we set assign these values to all ensembles
self.hist_args = {}
self.transitions = self._build_analysis_transitions()
@property
def all_states(self):
return self.states
def _build_transitions(self, trans_info, ms_outers, special_ensembles):
sampling_ensembles = self._build_sampling_ensembles(trans_info)
return sampling_transitions, transitions, special_ensembles
@staticmethod
def _build_analysis_transition_for_sampling(sampling_transition,
all_states):
local_transitions = {}
state_A = sampling_transition.stateA
other_states = set(all_states) - set([state_A])
str_A = _default_state_name(state_A)
for state_B in other_states:
str_B = _default_state_name(state_B)
trans = paths.TISTransition(
stateA=state_A,
stateB=state_B,
interfaces=sampling_transition.interfaces,
name=str_A + "->" + str_B,
orderparameter=sampling_transition.orderparameter
)
# override created stuff
trans.ensembles = sampling_transition.ensembles
for i in range(len(trans.ensembles)):
trans.ensembles[i].named(trans.name + "[" + str(i) + "]")
trans.minus_ensemble = sampling_transition.minus_ensemble
local_transitions[(state_A, state_B)] = trans
return local_transitions
def _build_analysis_transitions(self):
# set up analysis transitions (not to be saved)
transitions = {}
for from_A in self.from_state.values():
local_transitions = self._build_analysis_transition_for_sampling(
sampling_transition=from_A,
all_states=self.all_states
)
transitions.update(local_transitions)
return transitions
@staticmethod
def build_one_state_sampling_transition(state, interfaces, all_states):
other_states = list(set(all_states) - set([state]))
union_others = paths.join_volumes(
volume_list=other_states,
name="all states except " + str(state.name)
)
this_trans = paths.TISTransition(
stateA=state,
stateB=union_others,
interfaces=interfaces,
name="Out " + state.name,
orderparameter=interfaces.cv
)
return this_trans
def _build_fromstate_transitions(self, trans_info):
"""
Builds the sampling transitions (the self.from_state dictionary).
This also sets self.states (list of states volumes), self.outers
(list of interface volumes making the MS-outer interface), and
self.outer_ensembles (list of TISEnsembles associated with the
self.outers interfaces). Additionally, it gives default names
volumes, interfaces, and transitions.
Parameters
----------
trans_info : list of 2-tuples
See description in __init__.
"""
states, interfaces = zip(*trans_info)
orderparams = [iface_set.cv for iface_set in interfaces]
# NAMING STATES (give default names)
all_states = paths.join_volumes(states).named("all states")
all_names = list(set([s.name for s in states]))
unnamed_states = [s for s in states if not s.is_named]
_name_unnamed_states(unnamed_states, all_names)
# BUILDING ENSEMBLES
self.states = states
for (state, ifaces) in trans_info:
this_trans = self.build_one_state_sampling_transition(
state=state,
interfaces=ifaces,
all_states=states
)
# op = ifaces.cv
# state_index = states.index(state)
# other_states = states[:state_index]+states[state_index+1:]
# other_states = list(set(states) - set([state]))
# union_others = paths.join_volumes(
# volume_list=other_states,
# name="all states except " + str(state.name)
# )
# union_others = paths.volume.join_volumes(other_states)
# union_others.named("all states except " + str(state.name))
# out_others = paths.AllOutXEnsemble(union_others)
# this_trans = paths.TISTransition(
# stateA=state,
# stateB=union_others,
# interfaces=ifaces,
# name="Out " + state.name,
# orderparameter=op
# )
self.from_state[state] = this_trans
this_minus = self.from_state[state].minus_ensemble #& out_others
this_inner = self.from_state[state].ensembles[0]
# TODO: this should use defaultdict, I think
try:
self.special_ensembles['minus'][this_minus] = [this_trans]
except KeyError:
self.special_ensembles['minus'] = {this_minus : [this_trans]}
def __str__(self):
mystr = "Multiple State TIS Network:\n"
for state in self.from_state.keys():
mystr += str(self.from_state[state])
return mystr
def rate_matrix(self, steps, force=False):
"""
Calculate the matrix of all rates.
Parameters
----------
steps : iterable of :class:`.MCStep`
steps to be analyzed
force : bool (False)
if True, cached results are overwritten
Returns
-------
pandas.DataFrame
Rates from row_label to column_label. Diagonal is NaN.
"""
# for each transition in from_state:
# 1. Calculate the flux and the TCP
names = [s.name for s in self.states]
self._rate_matrix =
|
pd.DataFrame(columns=names, index=names)
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix, csgraph
import scipy
import igraph as ig
import leidenalg
import time
import hnswlib
import matplotlib
import matplotlib.pyplot as plt
import math
import multiprocessing
from scipy.sparse.csgraph import minimum_spanning_tree
from scipy import sparse
from sklearn.metrics.pairwise import euclidean_distances
import umap
import phate
import scanpy as sc
from scipy.sparse.csgraph import connected_components
import pygam as pg
import matplotlib.colors as colors
import matplotlib.cm as cm
from termcolor import colored
import seaborn as sns
from matplotlib.path import get_path_collection_extents
#April 6 2021,upload this version to github/pypip
def prob_reaching_terminal_state1( terminal_state, all_terminal_states, A, root, pt, num_sim, q,
cumstateChangeHist, cumstateChangeHist_all, seed):
#this function is defined outside the VIA class to enable smooth parallel processing in Windows
np.random.seed(seed)
n_states = A.shape[0]
A = A / (np.max(A))
jj = 0
for row in A:
if np.all(row == 0): A[jj, jj] = 1
jj = jj + 1
P = A / A.sum(axis=1).reshape((n_states, 1))
n_steps = int(2 * n_states) # 2
currentState = root
state = np.zeros((1, n_states))
state[0, currentState] = 1
currentState = root
state = np.zeros((1, n_states))
state[0, currentState] = 1
state_root = state.copy()
neigh_terminal = np.where(A[:, terminal_state] > 0)[0]
non_nn_terminal_state = []
for ts_i in all_terminal_states:
if pt[ts_i] > pt[terminal_state]: non_nn_terminal_state.append(ts_i)
for ts_i in all_terminal_states:
if np.all(neigh_terminal != ts_i): non_nn_terminal_state.append(ts_i)
count_reach_terminal_state = 0
count_r = 0
for i in range(num_sim):
stateChangeHist = np.zeros((n_states, n_states))
stateChangeHist[root, root] = 1
state = state_root
currentState = root
stateHist = state
terminal_state_found = False
x = 0
while (x < n_steps) & (
(terminal_state_found == False)): # & (non_neighbor_terminal_state_reached == False)):
currentRow = np.ma.masked_values((P[currentState]), 0.0)
nextState = simulate_multinomial(currentRow)
# print('next state', nextState)
if nextState == terminal_state:
terminal_state_found = True
count_r = count_r + 1
# Keep track of state changes
stateChangeHist[currentState, nextState] += 1
# Keep track of the state vector itself
state = np.zeros((1, n_states))
state[0, nextState] = 1.0
# Keep track of state history
stateHist = np.append(stateHist, state, axis=0)
currentState = nextState
x = x + 1
if (terminal_state_found == True):
cumstateChangeHist = cumstateChangeHist + np.any(
stateChangeHist > 0, axis=0)
count_reach_terminal_state = count_reach_terminal_state + 1
cumstateChangeHist_all = cumstateChangeHist_all + np.any(
stateChangeHist > 0, axis=0)
# avoid division by zero on states that were never reached (e.g. terminal states that come after the target terminal state)
cumstateChangeHist_all[cumstateChangeHist_all == 0] = 1
# prob_ = cumstateChangeHist / cumstateChangeHist_all
np.set_printoptions(precision=3)
q.append([cumstateChangeHist, cumstateChangeHist_all])
def simulate_markov_sub( A, num_sim, hitting_array, q, root):
n_states = A.shape[0]
P = A / A.sum(axis=1).reshape((n_states, 1))
# hitting_array = np.ones((P.shape[0], 1)) * 1000
hitting_array_temp = np.zeros((P.shape[0], 1)).astype('float64')
n_steps = int(2 * n_states)
hitting_array_final = np.zeros((1, n_states))
currentState = root
state = np.zeros((1, n_states))
state[0, currentState] = 1
state_root = state.copy()
for i in range(num_sim):
dist_list = []
# print(i, 'th simulation in Markov')
# if i % 10 == 0: print(i, 'th simulation in Markov', time.ctime())
state = state_root
currentState = root
stateHist = state
for x in range(n_steps):
currentRow = np.ma.masked_values((P[currentState]), 0.0)
nextState = simulate_multinomial(currentRow)
dist = A[currentState, nextState]
dist = (1 / ((1 + math.exp((dist - 1)))))
dist_list.append(dist)
# print('next state', nextState)
# Keep track of state changes
# stateChangeHist[currentState,nextState]+=1
# Keep track of the state vector itself
state = np.zeros((1, n_states))
state[0, nextState] = 1.0
currentState = nextState
# Keep track of state history
stateHist = np.append(stateHist, state, axis=0)
for state_i in range(P.shape[0]):
first_time_at_statei = np.where(stateHist[:, state_i] == 1)[0]
if len(first_time_at_statei) == 0:
hitting_array_temp[state_i, 0] = n_steps + 1
else:
total_dist = 0
for ff in range(first_time_at_statei[0]):
total_dist = dist_list[ff] + total_dist
hitting_array_temp[state_i, 0] = total_dist # first_time_at_statei[0]
hitting_array = np.append(hitting_array, hitting_array_temp, axis=1)
hitting_array = hitting_array[:, 1:]
q.append(hitting_array)
def getbb(sc, ax):
"""
Function to return a list of bounding boxes in data coordinates for a scatter plot.
Directly taken from https://stackoverflow.com/questions/55005272/
"""
ax.figure.canvas.draw() # need to draw before the transforms are set.
transform = sc.get_transform()
transOffset = sc.get_offset_transform()
offsets = sc._offsets
paths = sc.get_paths()
transforms = sc.get_transforms()
if not transform.is_affine:
paths = [transform.transform_path_non_affine(p) for p in paths]
transform = transform.get_affine()
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
transOffset = transOffset.get_affine()
if isinstance(offsets, np.ma.MaskedArray):
offsets = offsets.filled(np.nan)
bboxes = []
if len(paths) and len(offsets):
if len(paths) < len(offsets):
# for usual scatters you have one path, but several offsets
paths = [paths[0]] * len(offsets)
if len(transforms) < len(offsets):
# often you may have a single scatter size, but several offsets
transforms = [transforms[0]] * len(offsets)
for p, o, t in zip(paths, offsets, transforms):
result = get_path_collection_extents(
transform.frozen(), [p], [t], [o], transOffset.frozen()
)
# bboxes.append(result.inverse_transformed(ax.transData))
bboxes.append(result.transformed(ax.transData.inverted()))
return bboxes
def plot_sc_pb(ax, embedding, prob, ti, cmap_name='viridis'):
# threshold = #np.percentile(prob, 95)#np.mean(prob) + 3 * np.std(prob)
# print('thresold', threshold, np.max(prob))
# prob = [x if x < threshold else threshold for x in prob]
prob = np.sqrt(prob) # scale values to improve visualization of colors
cmap = matplotlib.cm.get_cmap('plasma')
norm = matplotlib.colors.Normalize(vmin=0, vmax=np.max(prob))
prob = np.asarray(prob)
# print('prob plot stats (min, max, mean)', min(prob), max(prob), np.mean(prob))
# changing the alpha transapency parameter for plotting points
c = cmap(norm(prob))
c = c.reshape(-1, 4)
loc_c = np.where(prob <= 0.3)[0]
c[loc_c, 3] = 0.2
loc_c = np.where((prob > 0.3) & (prob <= 0.5))[0]
c[loc_c, 3] = 0.5
loc_c = np.where((prob > 0.5) & (prob <= 0.7))[0]
c[loc_c, 3] = 0.8
loc_c = np.where((prob > 0.7))[0]
c[loc_c, 3] = 0.8
if embedding.shape[0]>10000: size_point = 10
else: size_point = 30
ax.scatter(embedding[:, 0], embedding[:, 1], c=c, s=size_point, cmap=cmap_name,
edgecolors='none')
ax.set_title('Target: ' + str(ti))
def get_loc_terminal_states(via0, X_input):
# we need the location of terminal states from first iteration (Via0) to pass onto the second iterations of Via (Via1)
# this will allow identification of the terminal-cluster in fine-grained Via1 that best captures the terminal state from coarse Via0
tsi_list = [] # find the single-cell which is nearest to the average-location of a terminal cluster in PCA space (
for tsi in via0.terminal_clusters:
loc_i = np.where(np.asarray(via0.labels) == tsi)[0]
val_pt = [via0.single_cell_pt_markov[i] for i in loc_i]
th_pt = np.percentile(val_pt, 50) # 50
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] >= th_pt]
temp = np.mean(X_input[loc_i], axis=0)
labelsq, distances = via0.knn_struct.knn_query(temp, k=1)
#print(labelsq[0])
tsi_list.append(labelsq[0][0])
return tsi_list
def simulate_multinomial(vmultinomial):
# used in Markov Simulations
r = np.random.uniform(0.0, 1.0)
CS = np.cumsum(vmultinomial)
CS = np.insert(CS, 0, 0)
m = (np.where(CS < r))[0]
nextState = m[len(m) - 1]
return nextState
def sc_loc_ofsuperCluster_PCAspace(p0, p1, idx):
# ci_list first finds location in unsampled PCA space of the location of the super-cluster or sub-terminal-cluster and root
# Returns location (index) of cell nearest to the ci_list in the downsampled space
print("dict of terminal state pairs, Super: sub: ", p1.dict_terminal_super_sub_pairs)
p0_labels = np.asarray(p0.labels)
p1_labels = np.asarray(p1.labels)
p1_sc_markov_pt = p1.single_cell_pt_markov
ci_list = []
for ci in range(len(list(set(p0.labels)))):
if ci in p1.revised_super_terminal_clusters: # p0.terminal_clusters:
loc_i = np.where(p1_labels == p1.dict_terminal_super_sub_pairs[ci])[0]
# loc_i = np.where(p0_labels == ci)[0]
# val_pt = [p1.single_cell_pt_markov[i] for i in loc_i]
val_pt = [p1_sc_markov_pt[i] for i in loc_i]
th_pt = np.percentile(val_pt, 0) # 80
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] >= th_pt]
temp = np.mean(p0.data[loc_i], axis=0)
labelsq, distances = p0.knn_struct.knn_query(temp, k=1)
ci_list.append(labelsq[0][0])
elif (ci in p0.root) & (len(p0.root) == 1):
loc_root = np.where(np.asarray(p0.root) == ci)[0][0]
p1_root_label = p1.root[loc_root]
loc_i = np.where(np.asarray(p1_labels) == p1_root_label)[0]
# loc_i = np.where(p0.labels == ci)[0]
val_pt = [p1_sc_markov_pt[i] for i in loc_i]
th_pt = np.percentile(val_pt, 20) # 50
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] <= th_pt]
temp = np.mean(p0.data[loc_i], axis=0)
labelsq, distances = p0.knn_struct.knn_query(temp, k=1)
ci_list.append(labelsq[0][0])
else:
loc_i = np.where(p0_labels == ci)[0]
temp = np.mean(p0.data[loc_i], axis=0)
labelsq, distances = p0.knn_struct.knn_query(temp, k=1)
ci_list.append(labelsq[0][0])
X_ds = p0.data[idx]
p_ds = hnswlib.Index(space='l2', dim=p0.data.shape[1])
p_ds.init_index(max_elements=X_ds.shape[0], ef_construction=200, M=16)
p_ds.add_items(X_ds)
p_ds.set_ef(50)
new_superclust_index_ds = {}
for en_item, item in enumerate(ci_list):
labelsq, distances = p_ds.knn_query(p0.data[item, :], k=1)
# new_superclust_index_ds.append(labelsq[0][0])
new_superclust_index_ds.update({en_item: labelsq[0][0]})
# print('new_superclust_index_ds',new_superclust_index_ds)
return new_superclust_index_ds
def sc_loc_ofsuperCluster_embeddedspace(embedding, p0, p1, idx):
# ci_list: single cell location of average location of supercluster based on embedded space hnsw
# idx is the indices of the subsampled elements
print("dict of terminal state pairs, Super: sub: ", p1.dict_terminal_super_sub_pairs)
knn_hnsw = hnswlib.Index(space='l2', dim=embedding.shape[1])
knn_hnsw.init_index(max_elements=embedding.shape[0], ef_construction=200, M=16)
knn_hnsw.add_items(embedding)
knn_hnsw.set_ef(50)
p0_labels = np.asarray(p0.labels)[idx]
p1_labels = np.asarray(p1.labels)[idx]
p1_sc_markov_pt = list(np.asarray(p1.single_cell_pt_markov)[idx])
p0_sc_markov_pt = list(np.asarray(p0.single_cell_pt_markov)[idx])
ci_list = []
ci_dict = {}
for ci in list(set(p0_labels)):
if ci in p1.revised_super_terminal_clusters:
print('ci is in ', ci, 'terminal clus')
loc_i = np.where(p1_labels == p1.dict_terminal_super_sub_pairs[ci])[0]
val_pt = [p1_sc_markov_pt[i] for i in loc_i]
th_pt = np.percentile(val_pt, 80)
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] >= th_pt]
x = [embedding[xi, 0] for xi in loc_i]
y = [embedding[yi, 1] for yi in loc_i]
elif ci in p0.root:
if len(p0.root) > 1:
print('ci is in ', ci, 'Root')
loc_i = np.where(p0_labels == ci)[0]
val_pt = [p0_sc_markov_pt[i] for i in loc_i]
else:
loc_root = np.where(np.asarray(p0.root) == ci)[0][0]
p1_root_label = p1.root[loc_root]
loc_i = np.where(np.asarray(p1_labels) == p1_root_label)[0]
val_pt = [p1_sc_markov_pt[i] for i in loc_i]
th_pt = np.percentile(val_pt, 20)
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] <= th_pt]
x = [embedding[xi, 0] for xi in loc_i]
y = [embedding[yi, 1] for yi in loc_i]
else:
print('ci is in ', ci, 'not root , not terminal clus')
loc_i = np.where(p0_labels == ci)[0]
x = [embedding[xi, 0] for xi in loc_i]
y = [embedding[yi, 1] for yi in loc_i]
labelsq, distancesq = knn_hnsw.knn_query(np.array([np.mean(x), np.mean(y)]), k=1)
ci_list.append(labelsq[0][0])
ci_dict[ci] = labelsq[0][0]
print('sc_loc nn clusterp0', ci, np.mean(x), np.mean(y))
print(embedding[labelsq[0][0], 0], embedding[labelsq[0][0], 1])
return knn_hnsw, ci_dict
def make_knn_embeddedspace(embedding):
# knn struct built in the embedded space to be used for drawing the lineage trajectories onto the 2D plot
knn_hnsw = hnswlib.Index(space='l2', dim=embedding.shape[1])
knn_hnsw.init_index(max_elements=embedding.shape[0], ef_construction=200, M=16)
knn_hnsw.add_items(embedding)
knn_hnsw.set_ef(50)
return knn_hnsw
def draw_sc_evolution_trajectory_dijkstra(p1, embedding, knn_hnsw, G, idx, cmap_name='plasma'):
# G is the igraph knn (low K) used for shortest path in high dim space. no idx needed as it's made on full sample
# knn_hnsw is the knn made in the embedded space used for query to find the nearest point in the downsampled embedding
# that corresponds to the single cells in the full graph
# embedding is the full or downsampled 2D representation of the full dataset. idx is the list of indices of the full dataset for which the embedding is available
# idx is the selected indices of the downsampled samples used in the visualization
y_root = []
x_root = []
root1_list = []
p1_sc_bp = p1.single_cell_bp[idx, :]
p1_labels = np.asarray(p1.labels)[idx]
p1_sc_pt_markov = list(np.asarray(p1.single_cell_pt_markov)[idx])
p1_cc = p1.connected_comp_labels
X_data = p1.data # the input data (sometimes this is the PCA space) with all samples
X_ds = X_data[idx, :]
p_ds = hnswlib.Index(space='l2', dim=X_ds.shape[1])
p_ds.init_index(max_elements=X_ds.shape[0], ef_construction=200, M=16)
p_ds.add_items(X_ds)
p_ds.set_ef(50)
for ii, r_i in enumerate(p1.root):
loc_i = np.where(p1_labels == p1.root[ii])[0]
x = [embedding[xi, 0] for xi in loc_i]
y = [embedding[yi, 1] for yi in loc_i]
labels_root, distances_root = knn_hnsw.knn_query(np.array([np.mean(x), np.mean(y)]),
k=1) # sc location in embedded space of root cell
x_root.append(embedding[labels_root, 0][0])
y_root.append(embedding[labels_root, 1][0])
labelsroot1, distances1 = p1.knn_struct.knn_query(X_ds[labels_root[0][0], :],
k=1) # index of sc-root-cell in the full-PCA space. Need for path
root1_list.append(labelsroot1[0][0])
# single-cell branch probability evolution probability
for i, ti in enumerate(p1.terminal_clusters):
fig, ax = plt.subplots()
plot_sc_pb(ax, embedding, p1_sc_bp[:, i], ti, cmap_name=cmap_name)
loc_i = np.where(p1_labels == ti)[0]
val_pt = [p1_sc_pt_markov[i] for i in loc_i]
th_pt = np.percentile(val_pt, 50) # 50
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] >= th_pt]
x = [embedding[xi, 0] for xi in
loc_i] # location of sc nearest to average location of terminal clus in the EMBEDDED space
y = [embedding[yi, 1] for yi in loc_i]
labels, distances = knn_hnsw.knn_query(np.array([np.mean(x), np.mean(y)]),
k=1) # knn_hnsw is knn of embedded space
x_sc = embedding[labels[0], 0] # terminal sc location in the embedded space
y_sc = embedding[labels[0], 1]
start_time = time.time()
labelsq1, distances1 = p1.knn_struct.knn_query(X_ds[labels[0][0], :],
k=1) # find the nearest neighbor in the PCA-space full graph
path = G.get_shortest_paths(root1_list[p1_cc[ti]], to=labelsq1[0][0]) # weights='weight')
# G is the knn of all sc points
path_idx = [] # find the single-cell which is nearest to the average-location of a terminal cluster
# get the nearest-neighbor in this downsampled PCA-space graph. These will make the new path-way points
path = path[0]
# clusters of path
cluster_path = []
for cell_ in path:
cluster_path.append(p1.labels[cell_])
# print(colored('cluster_path', 'green'), colored('terminal state: ', 'blue'), ti, cluster_path)
revised_cluster_path = []
revised_sc_path = []
for enum_i, clus in enumerate(cluster_path):
num_instances_clus = cluster_path.count(clus)
if (clus == cluster_path[0]) | (clus == cluster_path[-1]):
revised_cluster_path.append(clus)
revised_sc_path.append(path[enum_i])
else:
if num_instances_clus > 1:
revised_cluster_path.append(clus)
revised_sc_path.append(path[enum_i])
print(colored('Cluster-path', 'green'), colored('terminal state: ', 'blue'), ti, revised_cluster_path)
path = revised_sc_path # (based on hi-dim PCA KNN)
#find the single-cells in the original high-dimensional path that correspond to the closest single-cells in the downsampled space
for pii in path:
labelsq, distances = p_ds.knn_query(X_data[pii, :], k=1)
path_idx.append(labelsq[0][0])
## for the cells in the downsampled path, find the corresponding clusters
downsampled_cluster_idx = []
for clus_ in path_idx:
downsampled_cluster_idx.append(p1_labels[clus_])
print(colored('Cluster-path in downsampled embedded space (if visualization is performed on downsampled input)', 'green'), colored('terminal state: ', 'blue'), ti,downsampled_cluster_idx)
path = path_idx
n_orange = len(path)
orange_m = np.zeros((n_orange, 3))
for enum_point, point in enumerate(path):
# ax.text(embedding[point, 0], embedding[point, 1], 'D ' + str(enum_point), color='blue', fontsize=8)
orange_m[enum_point, 0] = embedding[point, 0]
orange_m[enum_point, 1] = embedding[point, 1]
orange_m[enum_point, 2] = p1_sc_pt_markov[point]
from sklearn.neighbors import NearestNeighbors
k_orange = min(3, n_orange) # increasing can smoothen in simple trajectories (Toy)
nbrs = NearestNeighbors(n_neighbors=k_orange, algorithm='ball_tree').fit(
orange_m[:, 0:]) # make a knn in low-dim space using points of path in embedded space
distances, indices = nbrs.kneighbors(orange_m[:, 0:])
row_list = []
col_list = []
dist_list = []
for i_or in range(n_orange):
for j_or in range(1, k_orange):
row_list.append(i_or)
col_list.append(indices[i_or, j_or])
dist_list.append(distances[i_or, j_or])
print('target number ' + str(ti))
orange_adjacency_knn = csr_matrix((np.array(dist_list), (np.array(row_list), np.array(col_list))),
shape=(n_orange, n_orange))
n_mst, comp_labels_mst = connected_components(csgraph=orange_adjacency_knn, directed=False, return_labels=True)
for enum_point, point in enumerate(path): # [0]):
orange_m[enum_point, 2] = p1_sc_pt_markov[point] * p1_sc_pt_markov[
point] * 2
while n_mst > 1:
comp_root = comp_labels_mst[0]
min_ed = 9999999
loc_comp_i = np.where(comp_labels_mst == comp_root)[0]
loc_comp_noti = np.where(comp_labels_mst != comp_root)[0]
orange_pt_val = [orange_m[cc, 2] for cc in loc_comp_i]
loc_comp_i_revised = [loc_comp_i[cc] for cc in range(len(orange_pt_val)) if
orange_pt_val[cc] >= np.percentile(orange_pt_val, 70)]
if len(loc_comp_i_revised)<3: loc_comp_i_revised = loc_comp_i
#print('len loc_compi and loc_notcompi',len(loc_comp_i_revised), len(loc_comp_noti))
for nn_i in loc_comp_i_revised:
ed = euclidean_distances(orange_m[nn_i, :].reshape(1, -1), orange_m[loc_comp_noti])
if np.min(ed) < min_ed:
ed_where_min = np.where(ed[0] == np.min(ed))[0][0]
min_ed = np.min(ed)
ed_loc_end = loc_comp_noti[ed_where_min]
ed_loc_start = nn_i
#print('Connecting components before sc-bp-GAM: the closest pair of points', ed_loc_start, ed_loc_end)
orange_adjacency_knn[ed_loc_start, ed_loc_end] = min_ed
n_mst, comp_labels_mst = connected_components(csgraph=orange_adjacency_knn, directed=False,
return_labels=True)
if n_mst == 1: # if no disconnected components in the graph #now draw the shortest path along the knn of embedding points along the path
(orange_sources, orange_targets) = orange_adjacency_knn.nonzero()
orange_edgelist = list(zip(orange_sources.tolist(), orange_targets.tolist()))
G_orange = ig.Graph(n=orange_adjacency_knn.shape[0], edges=orange_edgelist,
edge_attrs={'weight': orange_adjacency_knn.data.tolist()}, )
path_orange = G_orange.get_shortest_paths(0, to=orange_adjacency_knn.shape[0] - 1, weights='weight')[0]
len_path_orange = len(path_orange)
for path_i in range(len_path_orange - 1):
path_x_start = orange_m[path_orange[path_i], 0]
path_x_end = orange_m[path_orange[path_i + 1], 0]
orange_x = [orange_m[path_orange[path_i], 0], orange_m[path_orange[path_i + 1], 0]]
orange_minx = min(orange_x)
orange_maxx = max(orange_x)
orange_y = [orange_m[path_orange[path_i], 1], orange_m[path_orange[path_i + 1], 1]]
orange_miny = min(orange_y)
orange_maxy = max(orange_y)
orange_embedding_sub = embedding[
((embedding[:, 0] <= orange_maxx) & (embedding[:, 0] >= orange_minx)) & (
(embedding[:, 1] <= orange_maxy) & ((embedding[:, 1] >= orange_miny)))]
if (orange_maxy - orange_miny > 5) | (orange_maxx - orange_minx > 5):
orange_n_reps = 150
else:
orange_n_reps = 100
or_reps = np.repeat(np.array([[orange_x[0], orange_y[0]]]), orange_n_reps, axis=0)
orange_embedding_sub = np.concatenate((orange_embedding_sub, or_reps), axis=0)
or_reps = np.repeat(np.array([[orange_x[1], orange_y[1]]]), orange_n_reps, axis=0)
orange_embedding_sub = np.concatenate((orange_embedding_sub, or_reps), axis=0)
orangeGam = pg.LinearGAM(n_splines=8, spline_order=3, lam=10, verbose=False).fit(orange_embedding_sub[:, 0],
orange_embedding_sub[:, 1])
nx_spacing = 100
orange_GAM_xval = np.linspace(orange_minx, orange_maxx, nx_spacing * 2)
yg_orange = orangeGam.predict(X=orange_GAM_xval)
ax.plot(orange_GAM_xval, yg_orange, color='dimgrey', linewidth=2, zorder=3, linestyle=(0, (5, 2, 1, 2)),
dash_capstyle='round')
cur_x1 = orange_GAM_xval[-1]
cur_y1 = yg_orange[-1]
cur_x2 = orange_GAM_xval[0]
cur_y2 = yg_orange[0]
if path_i >= 1:
for mmddi in range(2):
xy11 = euclidean_distances(np.array([cur_x1, cur_y1]).reshape(1, -1),
np.array([prev_x1, prev_y1]).reshape(1, -1))
xy12 = euclidean_distances(np.array([cur_x1, cur_y1]).reshape(1, -1),
np.array([prev_x2, prev_y2]).reshape(1, -1))
xy21 = euclidean_distances(np.array([cur_x2, cur_y2]).reshape(1, -1),
np.array([prev_x1, prev_y1]).reshape(1, -1))
xy22 = euclidean_distances(np.array([cur_x2, cur_y2]).reshape(1, -1),
np.array([prev_x2, prev_y2]).reshape(1, -1))
mmdd_temp_array = np.asarray([xy11, xy12, xy21, xy22])
mmdd_loc = np.where(mmdd_temp_array == np.min(mmdd_temp_array))[0][0]
if mmdd_loc == 0:
ax.plot([cur_x1, prev_x1], [cur_y1, prev_y1], color='black', linestyle=(0, (5, 2, 1, 2)),
dash_capstyle='round')
if mmdd_loc == 1:
ax.plot([cur_x1, prev_x2], [cur_y1, prev_y2], color='black', linestyle=(0, (5, 2, 1, 2)),
dash_capstyle='round')
if mmdd_loc == 2:
ax.plot([cur_x2, prev_x1], [cur_y2, prev_y1], color='black', linestyle=(0, (5, 2, 1, 2)),
dash_capstyle='round')
if mmdd_loc == 3:
ax.plot([cur_x2, prev_x2], [cur_y2, prev_y2], color='black', linestyle=(0, (5, 2, 1, 2)),
dash_capstyle='round')
if (path_x_start > path_x_end): direction_arrow_orange = -1 # going LEFT
if (path_x_start <= path_x_end): direction_arrow_orange = 1 # going RIGHT
if (abs(
path_x_start - path_x_end) > 2.5): # |(abs(orange_m[path_i, 2] - orange_m[path_i + 1, 1]) > 1)):
if (direction_arrow_orange == -1): # & :
ax.arrow(orange_GAM_xval[nx_spacing], yg_orange[nx_spacing],
orange_GAM_xval[nx_spacing - 1] - orange_GAM_xval[nx_spacing],
yg_orange[nx_spacing - 1] - yg_orange[nx_spacing], shape='full', lw=0,
length_includes_head=True,
head_width=0.5, color='dimgray', zorder=3)
if (direction_arrow_orange == 1): # &(abs(orange_m[path_i,0]-orange_m[path_i+1,0])>0.5):
ax.arrow(orange_GAM_xval[nx_spacing], yg_orange[nx_spacing],
orange_GAM_xval[nx_spacing + 1] - orange_GAM_xval[nx_spacing],
yg_orange[nx_spacing + 1] - yg_orange[nx_spacing], shape='full', lw=0,
length_includes_head=True,
head_width=0.5,
color='dimgray', zorder=3)
prev_x1 = cur_x1
prev_y1 = cur_y1
prev_x2 = cur_x2
prev_y2 = cur_y2
ax.scatter(x_sc, y_sc, color='pink', zorder=3, label=str(ti), s=22)
return
def get_biased_weights(edgelist, weights, pt, round_no=1):
# small nu means less biasing (0.5 is quite mild)
# larger nu (in our case 1/nu) means more aggressive biasing https://en.wikipedia.org/wiki/Generalised_logistic_function
bias_weight = []
if round_no == 1: # using the pseudotime calculated from lazy-jumping walk
b = 1
else: # using the refined MCMC Psuedotimes before calculating lineage likelihood paths
b = 20
K = 1
c = 0
C = 1
nu = 1
high_weights_th = np.mean(weights)
high_pt_th = np.percentile(np.asarray(pt), 80)
loc_high_weights = np.where(weights > high_weights_th)[0]
loc_high_pt = np.where(np.asarray(pt) > high_pt_th)[0]
for i in loc_high_weights:
start = edgelist[i][0]
end = edgelist[i][1]
if (start in loc_high_pt) | (end in loc_high_pt):
weights[i] = 0.5 * np.mean(weights)
upper_lim = np.percentile(weights, 90) # 80
lower_lim = np.percentile(weights, 10) # 20
weights = [i if i <= upper_lim else upper_lim for i in weights]
weights = [i if i >= lower_lim else lower_lim for i in weights]
for i, (start, end) in enumerate(edgelist):
Pt_a = pt[start]
Pt_b = pt[end]
P_ab = weights[i]
t_ab = Pt_a - Pt_b
Bias_ab = K / ((C + math.exp(b * (t_ab + c)))) ** nu
new_weight = (Bias_ab * P_ab)
bias_weight.append(new_weight)
return list(bias_weight)
def expected_num_steps(start_i, N):
n_t = N.shape[0]
N_steps = np.dot(N, np.ones(n_t))
n_steps_i = N_steps[start_i]
return n_steps_i
def absorption_probability(N, R, absorption_state_j):
M = np.dot(N, R)
vec_prob_end_in_j = M[:, absorption_state_j]
return M, vec_prob_end_in_j
def draw_trajectory_gams(X_dimred, sc_supercluster_nn, cluster_labels, super_cluster_labels, super_edgelist, x_lazy,
alpha_teleport,
projected_sc_pt, true_label, knn, ncomp, final_super_terminal, sub_terminal_clusters,
title_str="hitting times", super_root=[0], draw_all_curves = True,arrow_width_scale_factor=15):
X_dimred=X_dimred*1./np.max(X_dimred, axis=0)
x = X_dimred[:, 0]
y = X_dimred[:, 1]
max_x = np.percentile(x, 90)
noise0 = max_x / 1000
df = pd.DataFrame({'x': x, 'y': y, 'cluster': cluster_labels, 'super_cluster': super_cluster_labels,
'projected_sc_pt': projected_sc_pt},
columns=['x', 'y', 'cluster', 'super_cluster', 'projected_sc_pt'])
df_mean = df.groupby('cluster', as_index=False).mean()
sub_cluster_isin_supercluster = df_mean[['cluster', 'super_cluster']]
sub_cluster_isin_supercluster = sub_cluster_isin_supercluster.sort_values(by='cluster')
sub_cluster_isin_supercluster['int_supercluster'] = sub_cluster_isin_supercluster['super_cluster'].round(0).astype(
int)
df_super_mean = df.groupby('super_cluster', as_index=False).mean()
pt = df_super_mean['projected_sc_pt'].values
pt_int = [int(i) for i in pt]
pt_str = [str(i) for i in pt_int]
pt_sub = [str(int(i)) for i in df_mean['projected_sc_pt'].values]
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True,figsize=[20, 10])
num_true_group = len(set(true_label))
num_cluster = len(set(super_cluster_labels))
line = np.linspace(0, 1, num_true_group)
for color, group in zip(line, sorted(set(true_label))):
where = np.where(np.array(true_label) == group)[0]
ax1.scatter(X_dimred[where, 0], X_dimred[where, 1], label=group, c=np.asarray(plt.cm.jet(color)).reshape(-1, 4),
alpha=0.5, s=10) # 0.5 and 4
ax1.legend(fontsize=6)
ax1.set_title('true labels, ncomps:' + str(ncomp) + '. knn:' + str(knn))
G_orange = ig.Graph(n=num_cluster, edges=super_edgelist)
ll_ = [] #this can be activated if you intend to simplify the curves
for fst_i in final_super_terminal:
#print('draw traj gams:', G_orange.get_shortest_paths(super_root[0], to=fst_i))
path_orange = G_orange.get_shortest_paths(super_root[0], to=fst_i)[0]
len_path_orange = len(path_orange)
for enum_edge, edge_fst in enumerate(path_orange):
if enum_edge < (len_path_orange - 1):
ll_.append((edge_fst, path_orange[enum_edge + 1]))
if draw_all_curves == True: edges_to_draw = super_edgelist #default is drawing all super-edges
else: edges_to_draw = list(set(ll_))
for e_i, (start, end) in enumerate(edges_to_draw): # enumerate(list(set(ll_))):# : use the ll_ if you want to simplify the curves
#for e_i, (start, end) in enumerate(list(set(ll_))):# : use the ll_ if you want to simplify the curves
if pt[start] >= pt[end]:
temp = end
end = start
start = temp
x_i_start = df[df['super_cluster'] == start]['x'].values
y_i_start = df[df['super_cluster'] == start]['y'].values
x_i_end = df[df['super_cluster'] == end]['x'].values
y_i_end = df[df['super_cluster'] == end]['y'].values
direction_arrow = 1
super_start_x = X_dimred[sc_supercluster_nn[start], 0]
super_end_x = X_dimred[sc_supercluster_nn[end], 0]
super_start_y = X_dimred[sc_supercluster_nn[start], 1]
super_end_y = X_dimred[sc_supercluster_nn[end], 1]
if super_start_x > super_end_x: direction_arrow = -1
ext_maxx = False
minx = min(super_start_x, super_end_x)
maxx = max(super_start_x, super_end_x)
miny = min(super_start_y, super_end_y)
maxy = max(super_start_y, super_end_y)
x_val = np.concatenate([x_i_start, x_i_end])
y_val = np.concatenate([y_i_start, y_i_end])
idx_keep = np.where((x_val <= maxx) & (x_val >= minx))[
0]
idy_keep = np.where((y_val <= maxy) & (y_val >= miny))[
0]
idx_keep = np.intersect1d(idy_keep, idx_keep)
x_val = x_val[idx_keep]
y_val = y_val[idx_keep]
super_mid_x = (super_start_x + super_end_x) / 2
super_mid_y = (super_start_y + super_end_y) / 2
from scipy.spatial import distance
very_straight = False
if abs(minx - maxx) <= 1:
very_straight = True
straight_level = 10
noise = noise0
x_super = np.array(
[super_start_x, super_end_x, super_start_x, super_end_x, super_start_x, super_end_x, super_start_x,
super_end_x, super_start_x + noise, super_end_x + noise,
super_start_x - noise, super_end_x - noise, super_mid_x])
y_super = np.array(
[super_start_y, super_end_y, super_start_y, super_end_y, super_start_y, super_end_y, super_start_y,
super_end_y, super_start_y + noise, super_end_y + noise,
super_start_y - noise, super_end_y - noise, super_mid_y])
else:
straight_level = 3
noise = noise0
x_super = np.array(
[super_start_x, super_end_x, super_start_x, super_end_x, super_start_x, super_end_x, super_start_x,
super_end_x, super_start_x + noise, super_end_x + noise,
super_start_x - noise, super_end_x - noise])
y_super = np.array(
[super_start_y, super_end_y, super_start_y, super_end_y, super_start_y, super_end_y, super_start_y,
super_end_y, super_start_y + noise, super_end_y + noise,
super_start_y - noise, super_end_y - noise])
for i in range(straight_level): # DO THE SAME FOR A MIDPOINT TOO
y_super = np.concatenate([y_super, y_super])
x_super = np.concatenate([x_super, x_super])
list_selected_clus = list(zip(x_val, y_val))
if (len(list_selected_clus) >= 1) & (very_straight == True):
dist = distance.cdist([(super_mid_x, super_mid_y)], list_selected_clus, 'euclidean')
if len(list_selected_clus) >= 2:
k = 2
else:
k = 1
midpoint_loc = dist[0].argsort()[:k]
midpoint_xy = []
for i in range(k):
midpoint_xy.append(list_selected_clus[midpoint_loc[i]])
noise = noise0 * 2
if k == 1:
mid_x = np.array([midpoint_xy[0][0], midpoint_xy[0][0] + noise, midpoint_xy[0][
0] - noise])
mid_y = np.array([midpoint_xy[0][1], midpoint_xy[0][1] + noise, midpoint_xy[0][
1] - noise])
if k == 2:
mid_x = np.array(
[midpoint_xy[0][0], midpoint_xy[0][0] + noise, midpoint_xy[0][0] - noise, midpoint_xy[1][0],
midpoint_xy[1][0] + noise, midpoint_xy[1][0] - noise])
mid_y = np.array(
[midpoint_xy[0][1], midpoint_xy[0][1] + noise, midpoint_xy[0][1] - noise, midpoint_xy[1][1],
midpoint_xy[1][1] + noise, midpoint_xy[1][1] - noise])
for i in range(3):
mid_x = np.concatenate([mid_x, mid_x])
mid_y = np.concatenate([mid_y, mid_y])
x_super = np.concatenate([x_super, mid_x])
y_super = np.concatenate([y_super, mid_y])
x_val = np.concatenate([x_val, x_super])
y_val = np.concatenate([y_val, y_super])
x_val = x_val.reshape((len(x_val), -1))
y_val = y_val.reshape((len(y_val), -1))
xp = np.linspace(minx, maxx, 500)
gam50 = pg.LinearGAM(n_splines=4, spline_order=3, lam=10).gridsearch(x_val, y_val)
XX = gam50.generate_X_grid(term=0, n=500)
preds = gam50.predict(XX)
if ext_maxx == False:
idx_keep = np.where((xp <= (maxx)) & (xp >= (minx)))[0]
else:
idx_keep = np.where((xp <= (maxx)) & (xp >= (minx)))[0]
ax2.plot(XX, preds, linewidth=3.5, c='#323538')#1.5
mean_temp = np.mean(xp[idx_keep])
closest_val = xp[idx_keep][0]
closest_loc = idx_keep[0]
for i, xp_val in enumerate(xp[idx_keep]):
if abs(xp_val - mean_temp) < abs(closest_val - mean_temp):
closest_val = xp_val
closest_loc = idx_keep[i]
step = 1
head_width = noise * arrow_width_scale_factor #arrow_width needs to be adjusted sometimes # 40#30 ##0.2 #0.05 for mESC #0.00001 (#for 2MORGAN and others) # 0.5#1
if direction_arrow == 1:
ax2.arrow(xp[closest_loc], preds[closest_loc], xp[closest_loc + step] - xp[closest_loc],
preds[closest_loc + step] - preds[closest_loc], shape='full', lw=0, length_includes_head=False,
head_width=head_width, color='#323538') # , head_starts_at_zero = direction_arrow )
else:
ax2.arrow(xp[closest_loc], preds[closest_loc], xp[closest_loc - step] - xp[closest_loc],
preds[closest_loc - step] - preds[closest_loc], shape='full', lw=0, length_includes_head=False,
head_width=head_width, color='#323538') # dimgray head_width=head_width
c_edge = []
width_edge = []
pen_color = []
super_cluster_label = []
terminal_count_ = 0
dot_size = []
for i in sc_supercluster_nn:
if i in final_super_terminal:
print('super cluster', i, 'is a super terminal with sub_terminal cluster',
sub_terminal_clusters[terminal_count_])
width_edge.append(2)
c_edge.append('yellow') # ('yellow')
pen_color.append('black')
super_cluster_label.append('TS' + str(sub_terminal_clusters[terminal_count_])) # +'('+str(i)+')')
dot_size.append(60)
terminal_count_ = terminal_count_ + 1
else:
width_edge.append(0)
c_edge.append('black')
pen_color.append('red')
super_cluster_label.append(str(' ')) # i
dot_size.append(00) # 20
ax2.set_title('lazy:' + str(x_lazy) + ' teleport' + str(alpha_teleport) + 'super_knn:' + str(knn))
ax2.scatter(X_dimred[:, 0], X_dimred[:, 1], c=projected_sc_pt, cmap='viridis_r', alpha=0.8, s=12) # alpha=0.6, s=10
count_ = 0
loci = [sc_supercluster_nn[key] for key in sc_supercluster_nn]
for i, c, w, pc, dsz in zip(loci, c_edge, width_edge, pen_color, dot_size): # sc_supercluster_nn
ax2.scatter(X_dimred[i, 0], X_dimred[i, 1], c='black', s=dsz, edgecolors=c, linewidth=w)
count_ = count_ + 1
plt.title(title_str)
return
def csr_mst(adjacency_matrix):
# return minimum spanning tree from adjacency matrix (csr)
Tcsr = adjacency_matrix.copy()
n_components_mst, comp_labels_mst = connected_components(csgraph=Tcsr, directed=False, return_labels=True)
# print('number of components before mst', n_components_mst)
# print('len Tcsr data', len(Tcsr.data))
Tcsr.data = -1 * Tcsr.data
Tcsr.data = Tcsr.data - np.min(Tcsr.data)
Tcsr.data = Tcsr.data + 1
# print('len Tcsr data', len(Tcsr.data))
Tcsr = minimum_spanning_tree(Tcsr) # adjacency_matrix)
n_components_mst, comp_labels_mst = connected_components(csgraph=Tcsr, directed=False, return_labels=True)
# print('number of components after mst', n_components_mst)
Tcsr = (Tcsr + Tcsr.T) * 0.5 # make symmetric
# print('number of components after symmetric mst', n_components_mst)
# print('len Tcsr data', len(Tcsr.data))
return Tcsr
def connect_all_components(MSTcsr, cluster_graph_csr, adjacency_matrix):
# connect forest of MSTs (csr)
n_components, comp_labels = connected_components(csgraph=cluster_graph_csr, directed=False, return_labels=True)
while n_components > 1:
sub_td = MSTcsr[comp_labels == 0, :][:, comp_labels != 0]
# print('minimum value of link connecting components', np.min(sub_td.data))
locxy = scipy.sparse.find(MSTcsr == np.min(sub_td.data))
for i in range(len(locxy[0])):
if (comp_labels[locxy[0][i]] == 0) & (comp_labels[locxy[1][i]] != 0):
x = locxy[0][i]
y = locxy[1][i]
minval = adjacency_matrix[x, y]
cluster_graph_csr[x, y] = minval
n_components, comp_labels = connected_components(csgraph=cluster_graph_csr, directed=False, return_labels=True)
#print('number of connected components after reconnecting ', n_components)
return cluster_graph_csr
def pruning_clustergraph(adjacency_matrix, global_pruning_std=1, max_outgoing=30, preserve_disconnected=True,
preserve_disconnected_after_pruning=False):
# neighbors in the adjacency matrix (neighbor-matrix) are not listed in in any order of proximity
# larger pruning_std factor means less pruning
# the mst is only used to reconnect components that become disconnect due to pruning
#print('global pruning std', global_pruning_std, 'max outoing', max_outgoing)
from scipy.sparse.csgraph import minimum_spanning_tree
Tcsr = csr_mst(adjacency_matrix)
initial_links_n = len(adjacency_matrix.data)
n_components_0, comp_labels_0 = connected_components(csgraph=adjacency_matrix, directed=False, return_labels=True)
print('number of components before pruning', n_components_0)#, comp_labels_0)
adjacency_matrix = scipy.sparse.csr_matrix.todense(adjacency_matrix)
row_list = []
col_list = []
weight_list = []
n_cells = adjacency_matrix.shape[0]
rowi = 0
for i in range(adjacency_matrix.shape[0]):
row = np.asarray(adjacency_matrix[i, :]).flatten()
n_nonz = np.sum(row > 0)
n_nonz = min(n_nonz, max_outgoing)
to_keep_index = np.argsort(row)[::-1][0:n_nonz] # np.where(row>np.mean(row))[0]#
# print('to keep', to_keep_index)
updated_nn_weights = list(row[to_keep_index])
for ik in range(len(to_keep_index)):
row_list.append(rowi)
col_list.append(to_keep_index[ik])
dist = updated_nn_weights[ik]
weight_list.append(dist)
rowi = rowi + 1
final_links_n = len(weight_list)
cluster_graph_csr = csr_matrix((np.array(weight_list), (np.array(row_list), np.array(col_list))),
shape=(n_cells, n_cells))
sources, targets = cluster_graph_csr.nonzero()
mask = np.zeros(len(sources), dtype=bool)
cluster_graph_csr.data = cluster_graph_csr.data / (np.std(cluster_graph_csr.data)) # normalize
threshold_global = np.mean(cluster_graph_csr.data) - global_pruning_std * np.std(cluster_graph_csr.data)
# print('threshold global', threshold_global, ' mean:', np.mean(cluster_graph_csr.data))
mask |= (cluster_graph_csr.data < (threshold_global)) # smaller Jaccard weight means weaker edge
cluster_graph_csr.data[mask] = 0
cluster_graph_csr.eliminate_zeros()
# print('shape of cluster graph', cluster_graph_csr.shape)
n_components, comp_labels = connected_components(csgraph=cluster_graph_csr, directed=False, return_labels=True)
#print('number of connected components after pruning', n_components)
#print('number of connected components after pruning', comp_labels)
# print('n_components_0', n_components_0)
n_components_preserve = n_components_0
if preserve_disconnected_after_pruning == True: n_components_preserve = n_components
# if (n_components > n_components_0): print('n_components > n_components_0',n_components ,'is bigger than', n_components_0)
if (preserve_disconnected == True) & (n_components > n_components_0): # preserve initial disconnected components
Td = Tcsr.todense()
Td[Td == 0] = 999.999
n_components_ = n_components
while n_components_ > n_components_preserve:
for i in range(n_components_preserve):
loc_x = np.where(comp_labels_0 == i)[0]
len_i = len(set(comp_labels[loc_x]))
while len_i > 1:
s = list(set(comp_labels[loc_x]))
loc_notxx = np.intersect1d(loc_x, np.where((comp_labels != s[0]))[0])
loc_xx = np.intersect1d(loc_x, np.where((comp_labels == s[0]))[0])
sub_td = Td[loc_xx, :][:, loc_notxx]
locxy = np.where(Td == np.min(sub_td))
for i in range(len(locxy[0])):
if (comp_labels[locxy[0][i]] != comp_labels[locxy[1][i]]):
x = locxy[0][i]
y = locxy[1][i]
minval = adjacency_matrix[x, y]
cluster_graph_csr[x, y] = minval
n_components_, comp_labels = connected_components(csgraph=cluster_graph_csr, directed=False,
return_labels=True)
loc_x = np.where(comp_labels_0 == i)[0]
len_i = len(set(comp_labels[loc_x]))
print('number of connected componnents after reconnecting ', n_components_)
if (n_components > 1) & (preserve_disconnected == False):
cluster_graph_csr = connect_all_components(Tcsr, cluster_graph_csr, adjacency_matrix)
n_components, comp_labels = connected_components(csgraph=cluster_graph_csr, directed=False, return_labels=True)
sources, targets = cluster_graph_csr.nonzero()
edgelist = list(zip(sources, targets))
edgeweights = cluster_graph_csr.data / (np.std(cluster_graph_csr.data))
trimmed_n = (initial_links_n - final_links_n) * 100 / initial_links_n
trimmed_n_glob = (initial_links_n - len(edgeweights))*100 / initial_links_n
if global_pruning_std < 0.5:
print("percentage links trimmed from local pruning relative to start {:.1f}".format(trimmed_n))
print("percentage links trimmed from global pruning relative to start {:.1f}".format(trimmed_n_glob))
return edgeweights, edgelist, comp_labels
def get_sparse_from_igraph(graph, weight_attr=None):
edges = graph.get_edgelist()
if weight_attr is None:
weights = [1] * len(edges)
else:
weights = graph.es[weight_attr]
if not graph.is_directed():
edges.extend([(v, u) for u, v in edges])
weights.extend(weights)
shape = graph.vcount()
shape = (shape, shape)
if len(edges) > 0:
return csr_matrix((weights, zip(*edges)), shape=shape)
else:
return csr_matrix(shape)
class VIA:
def __init__(self, data, true_label=None, anndata=None, dist_std_local=2, jac_std_global='median',
keep_all_local_dist='auto',
too_big_factor=0.4, resolution_parameter=1.0, partition_type="ModularityVP", small_pop=10,
jac_weighted_edges=True, knn=30, n_iter_leiden=5, random_seed=42,
num_threads=-1, distance='l2', time_smallpop=15,
super_cluster_labels=False,
super_node_degree_list=False, super_terminal_cells=False, x_lazy=0.95, alpha_teleport=0.99,
root_user="root_cluster", preserve_disconnected=True, dataset="humanCD34", super_terminal_clusters=[],
do_impute_bool=False, is_coarse=True, csr_full_graph='', csr_array_locally_pruned='', ig_full_graph='',
full_neighbor_array='', full_distance_array='', embedding=None, df_annot=None,
preserve_disconnected_after_pruning=False,
secondary_annotations=None, pseudotime_threshold_TS=30,cluster_graph_pruning_std = 0.15, visual_cluster_graph_pruning = 0.15,neighboring_terminal_states_threshold=2,num_mcmc_simulations=1300):
# higher dist_std_local means more edges are kept
# highter jac_std_global means more edges are kept
if keep_all_local_dist == 'auto':
if data.shape[0] > 50000:
keep_all_local_dist = True # skips local pruning to increase speed
else:
keep_all_local_dist = False
if resolution_parameter != 1:
partition_type = "RBVP" # Reichardt and Bornholdt’s Potts model. Note that this is the same as ModularityVertexPartition when setting 𝛾 = 1 and normalising by 2m
self.data = data
self.true_label = true_label
self.anndata = anndata
self.dist_std_local = dist_std_local
self.jac_std_global = jac_std_global ##0.15 is also a recommended value performing empirically similar to 'median'
self.keep_all_local_dist = keep_all_local_dist
self.too_big_factor = too_big_factor ##if a cluster exceeds this share of the entire cell population, then the PARC will be run on the large cluster. at 0.4 it does not come into play
self.resolution_parameter = resolution_parameter
self.partition_type = partition_type
self.small_pop = small_pop # smallest cluster population to be considered a community
self.jac_weighted_edges = jac_weighted_edges
self.knn = knn
self.n_iter_leiden = n_iter_leiden
self.random_seed = random_seed # enable reproducible Leiden clustering
self.num_threads = num_threads # number of threads used in KNN search/construction
self.distance = distance # Euclidean distance 'l2' by default; other options 'ip' and 'cosine'
self.time_smallpop = time_smallpop
self.super_cluster_labels = super_cluster_labels
self.super_node_degree_list = super_node_degree_list
self.super_terminal_cells = super_terminal_cells
self.x_lazy = x_lazy # 1-x = probability of staying in same node
self.alpha_teleport = alpha_teleport # 1-alpha is probability of jumping
self.root_user = root_user
self.preserve_disconnected = preserve_disconnected
self.dataset = dataset
self.super_terminal_clusters = super_terminal_clusters
self.do_impute_bool = do_impute_bool
self.is_coarse = is_coarse
self.csr_full_graph = csr_full_graph
self.ig_full_graph = ig_full_graph
self.csr_array_locally_pruned = csr_array_locally_pruned
self.full_neighbor_array = full_neighbor_array
self.full_distance_array = full_distance_array
self.embedding = embedding
self.df_annot = df_annot
self.preserve_disconnected_after_pruning = preserve_disconnected_after_pruning
self.secondary_annotations = secondary_annotations
self.pseudotime_threshold_TS = pseudotime_threshold_TS
self.cluster_graph_pruning_std = cluster_graph_pruning_std
self.visual_cluster_graph_pruning = visual_cluster_graph_pruning
self.neighboring_terminal_states_threshold = neighboring_terminal_states_threshold #number of neighbors of a terminal state has before it is eliminated as a TS
self.num_mcmc_simulations = num_mcmc_simulations #number of mcmc simulations in second state of pseudotime computation
def knngraph_visual(self, data_visual, knn_umap=15, downsampled=False):
k_umap = knn_umap
t0 = time.time()
# neighbors in array are not listed in in any order of proximity
if downsampled == False:
self.knn_struct.set_ef(k_umap + 1)
neighbor_array, distance_array = self.knn_struct.knn_query(self.data, k=k_umap)
else:
knn_struct_umap = self.make_knn_struct(visual=True, data_visual=data_visual)
knn_struct_umap.set_ef(k_umap + 1)
neighbor_array, distance_array = knn_struct_umap.knn_query(data_visual, k=k_umap)
row_list = []
n_neighbors = neighbor_array.shape[1]
n_cells = neighbor_array.shape[0]
print('ncells and neighs', n_cells, n_neighbors)
dummy = np.transpose(np.ones((n_neighbors, n_cells)) * range(0, n_cells)).flatten()
print('dummy size', dummy.size)
row_list.extend(list(np.transpose(np.ones((n_neighbors, n_cells)) * range(0, n_cells)).flatten()))
row_min = np.min(distance_array, axis=1)
row_sigma = np.std(distance_array, axis=1)
distance_array = (distance_array - row_min[:, np.newaxis]) / row_sigma[:, np.newaxis]
col_list = neighbor_array.flatten().tolist()
distance_array = distance_array.flatten()
distance_array = np.sqrt(distance_array)
distance_array = distance_array * -1
weight_list = np.exp(distance_array)
threshold = np.mean(weight_list) + 2 * np.std(weight_list)
weight_list[weight_list >= threshold] = threshold
weight_list = weight_list.tolist()
print('weight list', len(weight_list))
graph = csr_matrix((np.array(weight_list), (np.array(row_list), np.array(col_list))),
shape=(n_cells, n_cells))
graph_transpose = graph.T
prod_matrix = graph.multiply(graph_transpose)
graph = graph_transpose + graph - prod_matrix
return graph
def run_umap_hnsw(self, X_input, graph, n_components=2, alpha: float = 1.0, negative_sample_rate: int = 5,
gamma: float = 1.0, spread=1.0, min_dist=0.1, init_pos='spectral', random_state=1, ):
from umap.umap_ import find_ab_params, simplicial_set_embedding
import matplotlib.pyplot as plt
a, b = find_ab_params(spread, min_dist)
print('a,b, spread, dist', a, b, spread, min_dist)
t0 = time.time()
X_umap = simplicial_set_embedding(data=X_input, graph=graph, n_components=n_components, initial_alpha=alpha,
a=a, b=b, n_epochs=0, metric_kwds={}, gamma=gamma,
negative_sample_rate=negative_sample_rate, init=init_pos,
random_state=np.random.RandomState(random_state), metric='euclidean',
verbose=1, densmap = False, output_dens=False, densmap_kwds={})
return X_umap
def get_terminal_clusters(self, A, markov_pt, root_ai):
n_ = A.shape[0] #number of states in the graph component
if n_ <= 10: n_outlier_std = 3
if (n_ <= 40) & (n_ > 10): n_outlier_std = 2
if n_ >= 40: n_outlier_std = 2 # 1
pop_list = []
# print('get terminal', set(self.labels), np.where(self.labels == 0))
for i in list(set(self.labels)):
pop_list.append(len(np.where(self.labels == i)[0]))
# we weight the out-degree based on the population of clusters to avoid allowing small clusters to become the terminals based on population alone
A_new = A.copy()
for i in range(A.shape[0]):
for j in range(A.shape[0]):
A_new[i, j] = A[i, j] * (pop_list[i] + pop_list[j]) / (pop_list[i] * pop_list[j])
# make an igraph graph to compute the closeness
g_dis = ig.Graph.Adjacency(
(A_new > 0).tolist()) # need to manually add the weights as igraph treates A>0 as boolean
g_dis.es['weights'] = 1 / A_new[A_new.nonzero()] # we want "distances" not weights for closeness and betweeness
betweenness_score = g_dis.betweenness(weights='weights')
betweenness_score_array = np.asarray(betweenness_score)
betweenness_score_takeout_outlier = betweenness_score_array[betweenness_score_array < (
np.mean(betweenness_score_array) + n_outlier_std * np.std(betweenness_score_array))]
betweenness_list = [i for i, score in enumerate(betweenness_score) if score < (
np.mean(betweenness_score_takeout_outlier) - 0 * np.std(betweenness_score_takeout_outlier))]
closeness_score = g_dis.closeness(mode='ALL', cutoff=None, weights='weights', normalized=True)
closeness_score_array = np.asarray(closeness_score)
closeness_score_takeout_outlier = closeness_score_array[
closeness_score_array < (np.mean(closeness_score_array) + n_outlier_std * np.std(closeness_score_array))]
closeness_list = [i for i, score in enumerate(closeness_score) if
score < (np.mean(closeness_score_takeout_outlier) - 0 * np.std(
closeness_score_takeout_outlier))]
out_deg = A_new.sum(axis=1)
out_deg = np.asarray(out_deg)
outdegree_score_takeout_outlier = out_deg[out_deg < (np.mean(out_deg) + n_outlier_std * np.std(out_deg))]
loc_deg = [i for i, score in enumerate(out_deg) if
score < (np.mean(outdegree_score_takeout_outlier) - 0 * np.std(outdegree_score_takeout_outlier))]
print('closeness shortlist', closeness_list)
print('betweeness shortlist', betweenness_list)
print('out degree shortlist', loc_deg)
markov_pt = np.asarray(markov_pt)
if n_ <= 10:
loc_pt = np.where(markov_pt >= np.percentile(markov_pt, 50))[0]
if (n_ <= 40) & (n_ > 10):
loc_pt = np.where(markov_pt >= np.percentile(markov_pt, 10))[0]
if n_ > 40:
loc_pt = np.where(markov_pt >= np.percentile(markov_pt, 30))[0]
terminal_clusters_1 = list(set(closeness_list) & set(betweenness_list))
terminal_clusters_2 = list(set(closeness_list) & set(loc_deg))
terminal_clusters_3 = list(set(betweenness_list) & set(loc_deg))
terminal_clusters = list(set(terminal_clusters_1) | set(terminal_clusters_2))
terminal_clusters = list(set(terminal_clusters) | set(terminal_clusters_3))
terminal_clusters = list(set(terminal_clusters) & set(loc_pt))
terminal_org = terminal_clusters.copy()
#print('original terminal clusters', terminal_org)
for terminal_i in terminal_org:
if terminal_i in terminal_clusters:
removed_terminal_i = False
else:
removed_terminal_i = True
# print('terminal state', terminal_i)
count_nn = 0
ts_neigh = []
neigh_terminal = np.where(A[:, terminal_i] > 0)[0]
if neigh_terminal.size > 0:
for item in neigh_terminal:
if item in terminal_clusters:
ts_neigh.append(item)
count_nn = count_nn + 1
if n_ >= 10:
if item == root_ai: # if the terminal state is a neighbor of
if terminal_i in terminal_clusters:
terminal_clusters.remove(terminal_i)
print('we removed cluster', terminal_i, 'from the shortlist of terminal states ')
removed_terminal_i = True
if count_nn >=self.neighboring_terminal_states_threshold: #2
if removed_terminal_i == False:
temp_remove = terminal_i
temp_time = markov_pt[terminal_i]
for to_remove_i in ts_neigh:
if markov_pt[to_remove_i] < temp_time:
temp_remove = to_remove_i
temp_time = markov_pt[to_remove_i]
terminal_clusters.remove(temp_remove)
print('TS', terminal_i, 'had', self.neighboring_terminal_states_threshold,'or more neighboring terminal states, namely', ts_neigh, ' and so we removed,', temp_remove)
#print('terminal_clusters', terminal_clusters)
return terminal_clusters
def compute_hitting_time(self, sparse_graph, root, x_lazy, alpha_teleport, number_eig=0):
# 1- alpha is the probabilty of teleporting
# 1- x_lazy is the probability of staying in current state (be lazy)
beta_teleport = 2 * (1 - alpha_teleport) / (2 - alpha_teleport)
N = sparse_graph.shape[0]
print('start computing lazy-teleporting Expected Hitting Times')
A = scipy.sparse.csr_matrix.todense(sparse_graph) # A is the adjacency matrix
#print('is undirected graph symmetric', (A.transpose() == A).all())
lap = csgraph.laplacian(sparse_graph,
normed=False) # compute regular laplacian (normed = False) to infer the degree matrix where D = L+A
# see example and definition in the SciPy ref https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csgraph.laplacian.html
A = scipy.sparse.csr_matrix.todense(lap)
#print('is laplacian symmetric', (A.transpose() == A).all())
deg = sparse_graph + lap # Recall that L=D-A (modified for weighted where D_ii is sum of edge weights and A_ij is the weight of particular edge)
deg.data = 1 / np.sqrt(deg.data) ##inv sqrt of degree matrix
deg[deg == np.inf] = 0
norm_lap = csgraph.laplacian(sparse_graph, normed=True) # returns symmetric normalized D^-.5 xL x D^-.5
Id = np.zeros((N, N), float)
np.fill_diagonal(Id, 1)
norm_lap = scipy.sparse.csr_matrix.todense(norm_lap)
eig_val, eig_vec = np.linalg.eig(
norm_lap) # eig_vec[:,i] is eigenvector for eigenvalue eig_val[i] not eigh as this is only for symmetric. the eig vecs are not in decsending order
if number_eig == 0: number_eig = eig_vec.shape[1]
Greens_matrix = np.zeros((N, N), float)
beta_norm_lap = np.zeros((N, N), float)
Xu = np.zeros((N, N))
Xu[:, root] = 1
Id_Xv = np.zeros((N, N), int)
np.fill_diagonal(Id_Xv, 1)
Xv_Xu = Id_Xv - Xu
start_ = 0
if alpha_teleport == 1:
start_ = 1 # if there are no jumps (alph_teleport ==1), then the first term in beta-normalized Green's function will have 0 in denominator (first eigenvalue==0)
for i in range(start_, number_eig): # 0 instead of 1th eg
vec_i = eig_vec[:, i]
factor = beta_teleport + 2 * eig_val[i] * x_lazy * (1 - beta_teleport)
vec_i = np.reshape(vec_i, (-1, 1))
eigen_vec_mult = vec_i.dot(vec_i.T)
Greens_matrix = Greens_matrix + (
eigen_vec_mult / factor) # Greens function is the inverse of the beta-normalized laplacian
beta_norm_lap = beta_norm_lap + (eigen_vec_mult * factor) # beta-normalized laplacian
deg = scipy.sparse.csr_matrix.todense(deg)
temp = Greens_matrix.dot(deg)
temp = deg.dot(temp) * beta_teleport
hitting_matrix = np.zeros((N, N), float)
diag_row = np.diagonal(temp)
for i in range(N):
hitting_matrix[i, :] = diag_row - temp[i, :]
roundtrip_commute_matrix = hitting_matrix + hitting_matrix.T
temp = Xv_Xu.dot(temp)
final_hitting_times = np.diagonal(
temp) ## number_eig x 1 vector of hitting times from root (u) to number_eig of other nodes
roundtrip_times = roundtrip_commute_matrix[root, :]
return abs(final_hitting_times), roundtrip_times
def simulate_branch_probability(self, terminal_state, all_terminal_states, A, root, pt, num_sim=500):
n_states = A.shape[0]
ncpu = multiprocessing.cpu_count()
if (ncpu == 1) | (ncpu == 2):
n_jobs = 1
elif ncpu > 2:
n_jobs = min(ncpu - 1, 5)
# print('njobs', n_jobs)
num_sim_pp = int(num_sim / n_jobs) # num of simulations per process
jobs = []
manager = multiprocessing.Manager()
q = manager.list()
seed_list = list(range(n_jobs))
for i in range(n_jobs):
cumstateChangeHist = np.zeros((1, n_states))
cumstateChangeHist_all = np.zeros((1, n_states))
process = multiprocessing.Process(target=prob_reaching_terminal_state1, args=(
terminal_state, all_terminal_states, A, root, pt, num_sim_pp, q, cumstateChangeHist,
cumstateChangeHist_all,
seed_list[i]))
jobs.append(process)
for j in jobs:
j.start()
for j in jobs:
j.join()
cumhistory_vec = q[0][0]
cumhistory_vec_all = q[0][1]
count_reached = cumhistory_vec_all[0, terminal_state]
for i in range(1, len(q)):
cumhistory_vec = cumhistory_vec + q[i][0]
cumhistory_vec_all = cumhistory_vec_all + q[i][1]
count_reached = count_reached + q[i][1][0, terminal_state]
print('From root', root, ' to Terminal state', terminal_state, 'is found', int(count_reached), ' times.')
cumhistory_vec_all[cumhistory_vec_all == 0] = 1
prob_ = cumhistory_vec / cumhistory_vec_all
np.set_printoptions(precision=3)
if count_reached == 0:
prob_[:, terminal_state] = 0
print('never reached state', terminal_state)
else:
loc_1 = np.where(prob_ == 1)
loc_1 = loc_1[1]
prob_[0, loc_1] = 0
# print('zerod out prob', prob_)
temp_ = np.max(prob_)
if temp_ == 0: temp_ = 1
prob_ = prob_ / min(1, 1.1 * temp_)
prob_[0, loc_1] = 1
return list(prob_)[0]
def simulate_markov(self, A, root):
n_states = A.shape[0]
P = A / A.sum(axis=1).reshape((n_states, 1))
# print('row normed P',P.shape, P, P.sum(axis=1))
x_lazy = self.x_lazy # 1-x is prob lazy
alpha_teleport = self.alpha_teleport
# bias_P is the transition probability matrix
P = x_lazy * P + (1 - x_lazy) * np.identity(n_states)
# print(P, P.sum(axis=1))
P = alpha_teleport * P + ((1 - alpha_teleport) * (1 / n_states) * (np.ones((n_states, n_states))))
# print('check prob of each row sum to one', P.sum(axis=1))
currentState = root
state = np.zeros((1, n_states))
state[0, currentState] = 1
num_sim = self.num_mcmc_simulations #1300 # 1000
ncpu = multiprocessing.cpu_count()
if (ncpu == 1) | (ncpu == 2):
n_jobs = 1
elif ncpu > 2:
n_jobs = min(ncpu - 1, 5)
#print('njobs', n_jobs)
num_sim_pp = int(num_sim / n_jobs) # num of simulations per process
n_steps = int(2 * n_states)
jobs = []
manager = multiprocessing.Manager()
q = manager.list()
for i in range(n_jobs):
hitting_array = np.ones((P.shape[0], 1)) * 1000
process = multiprocessing.Process(target=simulate_markov_sub,
args=(P, num_sim_pp, hitting_array, q, root))
jobs.append(process)
for j in jobs:
j.start()
for j in jobs:
j.join()
print('ended all multiprocesses, will retrieve and reshape')
hitting_array = q[0]
for qi in q[1:]:
hitting_array = np.append(hitting_array, qi, axis=1) # .get(), axis=1)
# print('finished getting from queue', hitting_array.shape)
hitting_array_final = np.zeros((1, n_states))
no_times_state_reached_array = np.zeros((1, n_states))
for i in range(n_states):
rowtemp = hitting_array[i, :]
no_times_state_reached_array[0, i] = np.sum(rowtemp != (n_steps + 1))
for i in range(n_states):
rowtemp = hitting_array[i, :]
no_times_state_reached = np.sum(rowtemp != (n_steps + 1))
if no_times_state_reached != 0:
perc = np.percentile(rowtemp[rowtemp != n_steps + 1], 15) + 0.001 # 15 for Human and Toy
# print('state ', i,' has perc' ,perc)
hitting_array_final[0, i] = np.mean(rowtemp[rowtemp <= perc])
else:
hitting_array_final[0, i] = (n_steps + 1)
return hitting_array_final[0]
def compute_hitting_time_onbias(self, laplacian, inv_sqr_deg, root, x_lazy, alpha_teleport, number_eig=0):
# 1- alpha is the probabilty of teleporting
# 1- x_lazy is the probability of staying in current state (be lazy)
beta_teleport = 2 * (1 - alpha_teleport) / (2 - alpha_teleport)
N = laplacian.shape[0]
print('is laplacian of biased symmetric', (laplacian.transpose() == laplacian).all())
Id = np.zeros((N, N), float)
np.fill_diagonal(Id, 1)
# norm_lap = scipy.sparse.csr_matrix.todense(laplacian)
eig_val, eig_vec = np.linalg.eig(
laplacian) # eig_vec[:,i] is eigenvector for eigenvalue eig_val[i] not eigh as this is only for symmetric. the eig vecs are not in decsending order
print('eig val', eig_val.shape)
if number_eig == 0: number_eig = eig_vec.shape[1]
print('number of eig vec', number_eig)
Greens_matrix = np.zeros((N, N), float)
beta_norm_lap = np.zeros((N, N), float)
Xu = np.zeros((N, N))
Xu[:, root] = 1
Id_Xv = np.zeros((N, N), int)
np.fill_diagonal(Id_Xv, 1)
Xv_Xu = Id_Xv - Xu
start_ = 0
if alpha_teleport == 1:
start_ = 1 # if there are no jumps (alph_teleport ==1), then the first term in beta-normalized Green's function will have 0 in denominator (first eigenvalue==0)
for i in range(start_, number_eig): # 0 instead of 1st eg
# print(i, 'th eigenvalue is', eig_val[i])
vec_i = eig_vec[:, i]
factor = beta_teleport + 2 * eig_val[i] * x_lazy * (1 - beta_teleport)
vec_i = np.reshape(vec_i, (-1, 1))
eigen_vec_mult = vec_i.dot(vec_i.T)
Greens_matrix = Greens_matrix + (
eigen_vec_mult / factor) # Greens function is the inverse of the beta-normalized laplacian
beta_norm_lap = beta_norm_lap + (eigen_vec_mult * factor) # beta-normalized laplacian
temp = Greens_matrix.dot(inv_sqr_deg)
temp = inv_sqr_deg.dot(temp) * beta_teleport
hitting_matrix = np.zeros((N, N), float)
diag_row = np.diagonal(temp)
for i in range(N):
hitting_matrix[i, :] = diag_row - temp[i, :]
roundtrip_commute_matrix = hitting_matrix + hitting_matrix.T
temp = Xv_Xu.dot(temp)
final_hitting_times = np.diagonal(
temp) ## number_eig x 1 vector of hitting times from root (u) to number_eig of other nodes
roundtrip_times = roundtrip_commute_matrix[root, :]
return abs(final_hitting_times), roundtrip_times
def project_branch_probability_sc(self, bp_array_clus, pt):
print('start single cell projections of pseudotime and lineage likelihood')
n_clus = len(list(set(self.labels)))
labels = np.asarray(self.labels)
n_cells = self.data.shape[0]
if self.data.shape[0] > 1000:
knn_sc = 3
else:
knn_sc = 10
neighbor_array, distance_array = self.knn_struct.knn_query(self.data, k=knn_sc)
#print('shape of neighbor in project onto sc', neighbor_array.shape)
#print('start initalize coo', time.ctime())
# weight_array = coo_matrix((n_cells, n_clus)).tocsr() # csr_matrix
row_list = []
col_list = []
weight_list = []
irow = 0
#print('filling in weight array', time.ctime())
for row in neighbor_array:
neighboring_clus = labels[row]
for clus_i in set(list(neighboring_clus)):
num_clus_i = np.sum(neighboring_clus == clus_i)
wi = num_clus_i / knn_sc
weight_list.append(wi)
row_list.append(irow)
col_list.append(clus_i)
# weight_array[irow, clus_i] = wi
irow = irow + 1
#print('start dot product', time.ctime())
weight_array = csr_matrix((np.array(weight_list), (np.array(row_list), np.array(col_list))),
shape=(n_cells, n_clus))
bp_array_sc = weight_array.dot(bp_array_clus)
bp_array_sc = bp_array_sc * 1. / np.max(bp_array_sc, axis=0) # divide cell by max value in that column
# print('column max:',np.max(bp_array_sc, axis=0))
# print('sc bp array max', np.max(bp_array_sc))
# bp_array_sc = bp_array_sc/np.max(bp_array_sc)
for i, label_ts in enumerate(list(self.terminal_clusters)):
loc_i = np.where(np.asarray(self.labels) == label_ts)[0]
loc_noti = np.where(np.asarray(self.labels) != label_ts)[0]
if np.max(bp_array_sc[loc_noti, i]) > 0.8: bp_array_sc[loc_i, i] = 1.2
pt = np.asarray(pt)
pt = np.reshape(pt, (n_clus, 1))
pt_sc = weight_array.dot(pt)
pt_sc=pt_sc/np.amax(pt_sc)
self.single_cell_bp = bp_array_sc
self.single_cell_pt_markov = pt_sc.flatten()
df_via_pt = pd.DataFrame(self.single_cell_pt_markov)
return
def project_branch_probability_sc_old(self, bp_array_clus, pt):
labels = np.asarray(self.labels)
n_cells = self.data.shape[0]
if n_cells > 1000:
knn_sc = 3
else:
knn_sc = 10
neighbor_array, distance_array = self.knn_struct.knn_query(self.data, k=knn_sc)
print('shape of neighbor in project onto single cell', neighbor_array.shape)
n_clus = len(list(set(labels)))
print('initialize csr')
weight_array = csr_matrix((n_cells, n_clus)) # np.zeros((len(labels), n_clus))
print('filling in weight array')
for irow, row in enumerate(neighbor_array):
neighboring_clus = labels[row]
for clus_i in set(list(neighboring_clus)):
num_clus_i = np.sum(neighboring_clus == clus_i)
wi = num_clus_i / knn_sc
weight_array[irow, clus_i] = wi
print('convert weight array in sc_project_bp to csr_matrix')
bp_array_sc = weight_array.dot(bp_array_clus)
bp_array_sc = bp_array_sc * 1. / np.max(bp_array_sc, axis=0) # divide cell by max value in that column
for i, label_ts in enumerate(list(self.terminal_clusters)):
loc_i = np.where(np.asarray(self.labels) == label_ts)[0]
loc_noti = np.where(np.asarray(self.labels) != label_ts)[0]
if np.max(bp_array_sc[loc_noti, i]) > 0.8: bp_array_sc[loc_i, i] = 1.2
pt = np.asarray(pt)
pt = np.reshape(pt, (n_clus, 1))
pt_sc = weight_array.dot(pt)
self.single_cell_bp = bp_array_sc
self.single_cell_pt_markov = pt_sc.flatten()
return
def make_knn_struct(self, too_big=False, big_cluster=None, visual=False, data_visual=None):
if visual == False:
data = self.data
else:
data = data_visual
if self.knn > 190: print(colored('please provide a lower K_in for KNN graph construction', 'red'))
ef_query = max(100, self.knn + 1) # ef always should be >K. higher ef, more accuate query
if too_big == False:
num_dims = data.shape[1]
n_elements = data.shape[0]
p = hnswlib.Index(space=self.distance, dim=num_dims) # default to Euclidean distance
p.set_num_threads(self.num_threads) # allow user to set threads used in KNN construction
if n_elements < 10000:
ef_param_const = min(n_elements - 10, 500)
ef_query = ef_param_const
else:
ef_param_const = 200
if (num_dims > 30) & (n_elements <= 50000):
p.init_index(max_elements=n_elements, ef_construction=ef_param_const,
M=48) ## good for scRNA seq where dimensionality is high
else:
p.init_index(max_elements=n_elements, ef_construction=ef_param_const, M=30)
p.add_items(data)
if too_big == True:
num_dims = big_cluster.shape[1]
n_elements = big_cluster.shape[0]
p = hnswlib.Index(space='l2', dim=num_dims)
p.init_index(max_elements=n_elements, ef_construction=200, M=30)
p.add_items(big_cluster)
p.set_ef(ef_query) # ef should always be > k
return p
def make_csrmatrix_noselfloop(self, neighbor_array, distance_array, auto_=True):
if auto_ == True:
local_pruning_bool = not (self.keep_all_local_dist)
#if local_pruning_bool == True: print(colored('commencing local pruning based on l2 (squared) at', 'blue'),colored(str(self.dist_std_local) + 's.dev above mean', 'green'))
if auto_ == False: local_pruning_bool = False
row_list = []
col_list = []
weight_list = []
neighbor_array = neighbor_array # not listed in in any order of proximity
num_neigh = neighbor_array.shape[1]
distance_array = np.sqrt(distance_array)
n_neighbors = neighbor_array.shape[1]
n_cells = neighbor_array.shape[0]
rowi = 0
count_0dist = 0
discard_count = 0
if local_pruning_bool == True: # do some local pruning based on distance
for row in neighbor_array:
distlist = distance_array[rowi, :]
to_keep = np.where(distlist <= np.mean(distlist) + self.dist_std_local * np.std(distlist))[0] # 0*std
updated_nn_ind = row[np.ix_(to_keep)]
updated_nn_weights = distlist[np.ix_(to_keep)]
discard_count = discard_count + (num_neigh - len(to_keep))
for ik in range(len(updated_nn_ind)):
if rowi != row[ik]: # remove self-loops
row_list.append(rowi)
col_list.append(updated_nn_ind[ik])
dist = updated_nn_weights[ik]
if dist == 0:
count_0dist = count_0dist + 1
weight_list.append(dist)
rowi = rowi + 1
weight_list = np.asarray(weight_list)
weight_list = 1. / (weight_list + 0.01)
if local_pruning_bool == False: # dont prune based on distance
row_list.extend(list(np.transpose(np.ones((n_neighbors, n_cells)) * range(0, n_cells)).flatten()))
col_list = neighbor_array.flatten().tolist()
weight_list = (1. / (distance_array.flatten() + 0.01))
weight_list = weight_list * (np.mean(distance_array) ** 2)
weight_list = weight_list.tolist()
csr_graph = csr_matrix((np.array(weight_list), (np.array(row_list), np.array(col_list))),
shape=(n_cells, n_cells))
return csr_graph
def func_mode(self, ll):
# return MODE of list
# If multiple items are maximal, the function returns the first one encountered.
return max(set(ll), key=ll.count)
def run_toobig_subPARC(self, X_data, jac_std_toobig=1,
jac_weighted_edges=True):
n_elements = X_data.shape[0]
hnsw = self.make_knn_struct(too_big=True, big_cluster=X_data)
if self.knn >= 0.8 * n_elements:
k = int(0.5 * n_elements)
else:
k = self.knn
neighbor_array, distance_array = hnsw.knn_query(X_data, k=k)
csr_array = self.make_csrmatrix_noselfloop(neighbor_array, distance_array)
sources, targets = csr_array.nonzero()
mask = np.zeros(len(sources), dtype=bool)
mask |= (csr_array.data > (
np.mean(csr_array.data) + np.std(csr_array.data) * 5)) # smaller distance means stronger edge
csr_array.data[mask] = 0
csr_array.eliminate_zeros()
sources, targets = csr_array.nonzero()
edgelist = list(zip(sources.tolist(), targets.tolist()))
edgelist_copy = edgelist.copy()
G = ig.Graph(edgelist, edge_attrs={'weight': csr_array.data.tolist()})
sim_list = G.similarity_jaccard(pairs=edgelist_copy) # list of jaccard weights
new_edgelist = []
sim_list_array = np.asarray(sim_list)
if jac_std_toobig == 'median':
threshold = np.median(sim_list)
else:
threshold = np.mean(sim_list) - jac_std_toobig * np.std(sim_list)
strong_locs = np.where(sim_list_array > threshold)[0]
for ii in strong_locs: new_edgelist.append(edgelist_copy[ii])
sim_list_new = list(sim_list_array[strong_locs])
if jac_weighted_edges == True:
G_sim = ig.Graph(n=n_elements, edges=list(new_edgelist), edge_attrs={'weight': sim_list_new})
else:
G_sim = ig.Graph(n=n_elements, edges=list(new_edgelist))
G_sim.simplify(combine_edges='sum')
if jac_weighted_edges == True:
if self.partition_type == 'ModularityVP':
partition = leidenalg.find_partition(G_sim, leidenalg.ModularityVertexPartition, weights='weight',
n_iterations=self.n_iter_leiden, seed=self.random_seed)
#print('partition type MVP')
else:
partition = leidenalg.find_partition(G_sim, leidenalg.RBConfigurationVertexPartition, weights='weight',
n_iterations=self.n_iter_leiden, seed=self.random_seed,
resolution_parameter=self.resolution_parameter)
else:
if self.partition_type == 'ModularityVP':
#print('partition type MVP')
partition = leidenalg.find_partition(G_sim, leidenalg.ModularityVertexPartition,
n_iterations=self.n_iter_leiden, seed=self.random_seed)
else:
print('partition type RBC')
partition = leidenalg.find_partition(G_sim, leidenalg.RBConfigurationVertexPartition,
n_iterations=self.n_iter_leiden, seed=self.random_seed,
resolution_parameter=self.resolution_parameter)
PARC_labels_leiden = np.asarray(partition.membership)
PARC_labels_leiden = np.reshape(PARC_labels_leiden, (n_elements, 1))
small_pop_list = []
small_cluster_list = []
small_pop_exist = False
dummy, PARC_labels_leiden = np.unique(list(PARC_labels_leiden.flatten()), return_inverse=True)
for cluster in set(PARC_labels_leiden):
population = len(np.where(PARC_labels_leiden == cluster)[0])
if population < 5:
small_pop_exist = True
small_pop_list.append(list(np.where(PARC_labels_leiden == cluster)[0]))
small_cluster_list.append(cluster)
for small_cluster in small_pop_list:
for single_cell in small_cluster:
old_neighbors = neighbor_array[single_cell, :]
group_of_old_neighbors = PARC_labels_leiden[old_neighbors]
group_of_old_neighbors = list(group_of_old_neighbors.flatten())
available_neighbours = set(group_of_old_neighbors) - set(small_cluster_list)
if len(available_neighbours) > 0:
available_neighbours_list = [value for value in group_of_old_neighbors if
value in list(available_neighbours)]
best_group = max(available_neighbours_list, key=available_neighbours_list.count)
PARC_labels_leiden[single_cell] = best_group
do_while_time = time.time()
while (small_pop_exist == True) & (time.time() - do_while_time < 5):
small_pop_list = []
small_pop_exist = False
for cluster in set(list(PARC_labels_leiden.flatten())):
population = len(np.where(PARC_labels_leiden == cluster)[0])
if population < 10:
small_pop_exist = True
small_pop_list.append(np.where(PARC_labels_leiden == cluster)[0])
for small_cluster in small_pop_list:
for single_cell in small_cluster:
old_neighbors = neighbor_array[single_cell, :]
group_of_old_neighbors = PARC_labels_leiden[old_neighbors]
group_of_old_neighbors = list(group_of_old_neighbors.flatten())
best_group = max(set(group_of_old_neighbors), key=group_of_old_neighbors.count)
PARC_labels_leiden[single_cell] = best_group
dummy, PARC_labels_leiden = np.unique(list(PARC_labels_leiden.flatten()), return_inverse=True)
self.labels = PARC_labels_leiden
return PARC_labels_leiden
def recompute_weights(self, clustergraph_ig, pop_list_raw):
sparse_clustergraph = get_sparse_from_igraph(clustergraph_ig, weight_attr='weight')
n = sparse_clustergraph.shape[0]
sources, targets = sparse_clustergraph.nonzero()
edgelist = list(zip(sources, targets))
weights = sparse_clustergraph.data
new_weights = []
i = 0
for s, t in edgelist:
pop_s = pop_list_raw[s]
pop_t = pop_list_raw[t]
w = weights[i]
nw = w * (pop_s + pop_t) / (pop_s * pop_t) # *
new_weights.append(nw)
i = i + 1
scale_factor = max(new_weights) - min(new_weights)
wmin = min(new_weights)
new_weights = [(wi + wmin) / scale_factor for wi in new_weights]
sparse_clustergraph = csr_matrix((np.array(new_weights), (sources, targets)), shape=(n, n))
sources, targets = sparse_clustergraph.nonzero()
edgelist = list(zip(sources, targets))
return sparse_clustergraph, edgelist
def find_root_iPSC(self, graph_dense, PARC_labels_leiden, root_user, true_labels, super_cluster_labels_sub,
super_node_degree_list):
majority_truth_labels = np.empty((len(PARC_labels_leiden), 1), dtype=object)
graph_node_label = []
min_deg = 1000
super_min_deg = 1000
found_super_and_sub_root = False
found_any_root = False
true_labels = np.asarray(true_labels)
deg_list = graph_dense.sum(axis=1).reshape((1, -1)).tolist()[0]
for ci, cluster_i in enumerate(sorted(list(set(PARC_labels_leiden)))):
cluster_i_loc = np.where(np.asarray(PARC_labels_leiden) == cluster_i)[0]
majority_truth = str(self.func_mode(list(true_labels[cluster_i_loc])))
if self.super_cluster_labels != False:
super_majority_cluster = self.func_mode(list(np.asarray(super_cluster_labels_sub)[cluster_i_loc]))
super_majority_cluster_loc = np.where(np.asarray(super_cluster_labels_sub) == super_majority_cluster)[0]
super_majority_truth = self.func_mode(list(true_labels[super_majority_cluster_loc]))
super_node_degree = super_node_degree_list[super_majority_cluster]
if (str(root_user) == majority_truth) & (str(root_user) == str(super_majority_truth)):
if super_node_degree < super_min_deg:
found_super_and_sub_root = True
root = cluster_i
found_any_root = True
min_deg = deg_list[ci]
super_min_deg = super_node_degree
print('new root is', root)
majority_truth_labels[cluster_i_loc] = str(majority_truth) + 'c' + str(cluster_i)
graph_node_label.append(str(majority_truth) + 'c' + str(cluster_i))
if (self.super_cluster_labels == False) | (found_super_and_sub_root == False):
for ic, cluster_i in enumerate(sorted(list(set(PARC_labels_leiden)))):
cluster_i_loc = np.where(np.asarray(PARC_labels_leiden) == cluster_i)[0]
true_labels = np.asarray(true_labels)
majority_truth = str(self.func_mode(list(true_labels[cluster_i_loc])))
if (str(root_user) == str(majority_truth)):
if deg_list[ic] < min_deg:
root = cluster_i
found_any_root = True
min_deg = deg_list[ic]
print('new root is', root, ' with degree%.2f' % min_deg, majority_truth)
if found_any_root == False:
print('setting arbitrary root', cluster_i)
root = cluster_i
return graph_node_label, majority_truth_labels, deg_list, root
def find_root_HumanCD34(self, graph_dense, PARC_labels_leiden, root_idx, true_labels):
# single cell index given corresponding to user defined root cell
majority_truth_labels = np.empty((len(PARC_labels_leiden), 1), dtype=object)
graph_node_label = []
true_labels = np.asarray(true_labels)
deg_list = graph_dense.sum(axis=1).reshape((1, -1)).tolist()[0]
for ci, cluster_i in enumerate(sorted(list(set(PARC_labels_leiden)))):
cluster_i_loc = np.where(np.asarray(PARC_labels_leiden) == cluster_i)[0]
majority_truth = self.func_mode(list(true_labels[cluster_i_loc]))
majority_truth_labels[cluster_i_loc] = str(majority_truth) + 'c' + str(cluster_i)
root = PARC_labels_leiden[root_idx]
graph_node_label.append(str(majority_truth)[0:5] + 'c' + str(cluster_i))
return graph_node_label, majority_truth_labels, deg_list, root
def find_root_bcell(self, graph_dense, PARC_labels_leiden, root_user, true_labels):
# root-user is the singlecell index given by the user when running VIA
majority_truth_labels = np.empty((len(PARC_labels_leiden), 1), dtype=object)
graph_node_label = []
true_labels = np.asarray(true_labels)
deg_list = graph_dense.sum(axis=1).reshape((1, -1)).tolist()[0]
for ci, cluster_i in enumerate(sorted(list(set(PARC_labels_leiden)))):
# print('cluster i', cluster_i)
cluster_i_loc = np.where(np.asarray(PARC_labels_leiden) == cluster_i)[0]
majority_truth = self.func_mode(list(true_labels[cluster_i_loc]))
majority_truth_labels[cluster_i_loc] = str(majority_truth) + 'c' + str(cluster_i)
graph_node_label.append(str(majority_truth) + 'c' + str(cluster_i))
root = PARC_labels_leiden[root_user]
return graph_node_label, majority_truth_labels, deg_list, root
def find_root_2Morgan(self, graph_dense, PARC_labels_leiden, root_idx, true_labels):
# single cell index given corresponding to user defined root cell
majority_truth_labels = np.empty((len(PARC_labels_leiden), 1), dtype=object)
graph_node_label = []
true_labels = np.asarray(true_labels)
deg_list = graph_dense.sum(axis=1).reshape((1, -1)).tolist()[0]
secondary_annotations = np.asarray(self.secondary_annotations)
for ci, cluster_i in enumerate(sorted(list(set(PARC_labels_leiden)))):
cluster_i_loc = np.where(np.asarray(PARC_labels_leiden) == cluster_i)[0]
majority_truth = self.func_mode(list(true_labels[cluster_i_loc]))
majority_truth_secondary = str(self.func_mode(list(secondary_annotations[cluster_i_loc])))
majority_truth_labels[cluster_i_loc] = str(majority_truth) + 'c' + str(cluster_i)
# graph_node_label.append(str(majority_truth) + 'c' + str(cluster_i))
root = PARC_labels_leiden[root_idx]
graph_node_label.append(str(majority_truth)[0:5] + 'c' + str(cluster_i) + str(majority_truth_secondary))
return graph_node_label, majority_truth_labels, deg_list, root
def find_root_toy(self, graph_dense, PARC_labels_leiden, root_user, true_labels, super_cluster_labels_sub,
super_node_degree_list):
# PARC_labels_leiden is the subset belonging to the component of the graph being considered. graph_dense is a component of the full graph
majority_truth_labels = np.empty((len(PARC_labels_leiden), 1), dtype=object)
graph_node_label = []
min_deg = 1000
super_min_deg = 1000
found_super_and_sub_root = False
found_any_root = False
true_labels = np.asarray(true_labels)
deg_list = graph_dense.sum(axis=1).reshape((1, -1)).tolist()[0]
for ci, cluster_i in enumerate(sorted(list(set(PARC_labels_leiden)))):
cluster_i_loc = np.where(np.asarray(PARC_labels_leiden) == cluster_i)[0]
majority_truth = str(self.func_mode(list(true_labels[cluster_i_loc])))
if self.super_cluster_labels != False:
super_majority_cluster = self.func_mode(list(np.asarray(super_cluster_labels_sub)[cluster_i_loc]))
super_majority_cluster_loc = np.where(np.asarray(super_cluster_labels_sub) == super_majority_cluster)[0]
super_majority_truth = self.func_mode(list(true_labels[super_majority_cluster_loc]))
super_node_degree = super_node_degree_list[super_majority_cluster]
if (str(root_user) in majority_truth) & (str(root_user) in str(super_majority_truth)):
if super_node_degree < super_min_deg:
found_super_and_sub_root = True
root = cluster_i
found_any_root = True
min_deg = deg_list[ci]
super_min_deg = super_node_degree
print('new root is', root, ' with degree %.2f' % min_deg, 'and super node degree %.2f' % super_min_deg)
majority_truth_labels[cluster_i_loc] = str(majority_truth) + 'c' + str(cluster_i)
graph_node_label.append(str(majority_truth) + 'c' + str(cluster_i))
if (self.super_cluster_labels == False) | (found_super_and_sub_root == False):
#print('self.super_cluster_labels', super_cluster_labels_sub, ' foundsuper_cluster_sub and super root',found_super_and_sub_root)
for ic, cluster_i in enumerate(sorted(list(set(PARC_labels_leiden)))):
cluster_i_loc = np.where(np.asarray(PARC_labels_leiden) == cluster_i)[0]
# print('cluster', cluster_i, 'set true labels', set(true_labels))
true_labels = np.asarray(true_labels)
majority_truth = str(self.func_mode(list(true_labels[cluster_i_loc])))
# print('cluster', cluster_i, 'has majority', majority_truth, 'with degree list', deg_list)
if (str(root_user) in str(majority_truth)):
if deg_list[ic] < min_deg:
root = cluster_i
found_any_root = True
min_deg = deg_list[ic]
print('new root is', root, ' with degree %.2f' % min_deg, majority_truth)
# print('len graph node label', graph_node_label)
if found_any_root == False:
print('setting arbitrary root', cluster_i)
root = cluster_i
return graph_node_label, majority_truth_labels, deg_list, root
def full_graph_paths(self, X_data, n_components_original=1):
# make igraph object of very low-K KNN using the knn_struct PCA-dimension space made in PARC.
# This is later used by find_shortest_path for sc_bp visual
# neighbor array is not listed in in any order of proximity
print('number of components in the original full graph', n_components_original)
print('for downstream visualization purposes we are also constructing a low knn-graph ')
neighbor_array, distance_array = self.knn_struct.knn_query(X_data, k=3)
csr_array = self.make_csrmatrix_noselfloop(neighbor_array, distance_array, auto_=False)
n_comp, comp_labels = connected_components(csr_array, return_labels=True)
k_0 = 2 # 3
if n_components_original == 1:
while (n_comp > 1):
k_0 = k_0 + 1
neighbor_array, distance_array = self.knn_struct.knn_query(X_data, k=k_0)
csr_array = self.make_csrmatrix_noselfloop(neighbor_array, distance_array,
auto_=False) # do not automatically use the local-pruning of Via
n_comp, comp_labels = connected_components(csr_array, return_labels=True)
if n_components_original > 1:
while (k_0 <= 5) & (n_comp > n_components_original):
k_0 = k_0 + 1
neighbor_array, distance_array = self.knn_struct.knn_query(X_data, k=k_0)
csr_array = self.make_csrmatrix_noselfloop(neighbor_array, distance_array,
auto_=False) # do not automatically use the local-pruning of Via)
n_comp, comp_labels = connected_components(csr_array, return_labels=True)
row_list = []
print('size neighbor array in low-KNN in pca-space for visualization', neighbor_array.shape)
n_neighbors = neighbor_array.shape[1]
n_cells = neighbor_array.shape[0]
row_list.extend(list(np.transpose(np.ones((n_neighbors, n_cells)) * range(0, n_cells)).flatten()))
col_list = neighbor_array.flatten().tolist()
weight_list = (distance_array.flatten()).tolist()
csr_full_graph = csr_matrix((np.array(weight_list), (np.array(row_list), np.array(col_list))),
shape=(n_cells, n_cells))
sources, targets = csr_full_graph.nonzero()
edgelist = list(zip(sources.tolist(), targets.tolist()))
Gr = ig.Graph(edgelist) # , edge_attrs={'weight': csr_full_graph.data.tolist()})
Gr.simplify(combine_edges='sum')
return Gr
def get_gene_expression(self, gene_exp, title_gene=""):
fig_0, ax = plt.subplots()
sc_pt = self.single_cell_pt_markov
sc_bp_original = self.single_cell_bp
n_terminal_states = sc_bp_original.shape[1]
jet = cm.get_cmap('jet', n_terminal_states)
cmap_ = jet(range(n_terminal_states))
for i in range(n_terminal_states): # [0]:
sc_bp = sc_bp_original.copy()
if len(np.where(sc_bp[:, i] > 0.8)[
0]) > 0: # check in case this terminal state i cannot be reached (sc_bp is all 0)
# loc_terminal_i = np.where(np.asarray(self.labels) == self.terminal_clusters[i])[0]
loc_i = np.where(sc_bp[:, i] > 0.95)[0] # 0.8
val_pt = [sc_pt[pt_i] for pt_i in loc_i] # TODO, replace with array to speed up
max_val_pt = max(val_pt)
loc_i_bp = np.where(sc_bp[:, i] > 0.000)[0] # 0.000
loc_i_sc = np.where(np.asarray(sc_pt) <= max_val_pt)[0]
loc_ = np.intersect1d(loc_i_bp, loc_i_sc)
gam_in = np.asarray(sc_pt)[loc_]
gam_in = gam_in/max(gam_in)
x = gam_in.reshape(-1, 1)
y = np.asarray(gene_exp)[loc_].reshape(-1, 1)
weights = np.asarray(sc_bp[:, i])[loc_].reshape(-1, 1)
if len(loc_) > 1:
geneGAM = pg.LinearGAM(n_splines=10, spline_order=4, lam=10).fit(x, y, weights=weights)
nx_spacing = 100
#xval = np.linspace(min(sc_pt), max_val_pt, nx_spacing * 2)
xval = np.linspace(min(sc_pt), 1, nx_spacing * 2)
yg = geneGAM.predict(X=xval)
else:
print('loc_ has length zero')
ax.plot(xval, yg, color=cmap_[i], linewidth=3.5, zorder=3, label='TS:' + str(
self.terminal_clusters[i])) # linestyle=(0, (5, 2, 1, 2)), dash_capstyle='round'
plt.legend()
plt.title('Trend:' + title_gene)
return
def get_gene_expression_multi(self, ax, gene_exp, title_gene=""):
sc_pt = self.single_cell_pt_markov
sc_bp_original = self.single_cell_bp
n_terminal_states = sc_bp_original.shape[1]
jet = cm.get_cmap('jet', n_terminal_states)
cmap_ = jet(range(n_terminal_states))
for i in [0]:#range(n_terminal_states):
sc_bp = sc_bp_original.copy()
if len(np.where(sc_bp[:, i] > 0.9)[
0]) > 0: # check in case this terminal state i cannot be reached (sc_bp is all 0)
loc_i = np.where(sc_bp[:, i] > 0.9)[0]
val_pt = [sc_pt[pt_i] for pt_i in loc_i] # TODO, replace with array to speed up
max_val_pt = max(val_pt)
loc_i_bp = np.where(sc_bp[:, i] > 0.000)[0] # 0.001
loc_i_sc = np.where(np.asarray(sc_pt) <= max_val_pt)[0]
loc_ = np.intersect1d(loc_i_bp, loc_i_sc)
gam_in = np.asarray(sc_pt)[loc_]
x = gam_in.reshape(-1, 1)
y = np.asarray(gene_exp)[loc_].reshape(-1, 1)
weights = np.asarray(sc_bp[:, i])[loc_].reshape(-1, 1)
if len(loc_) > 1:
# geneGAM = pg.LinearGAM(n_splines=20, spline_order=5, lam=10).fit(x, y, weights=weights)
# geneGAM = pg.LinearGAM(n_splines=10, spline_order=4, lam=10).fit(x, y, weights=weights)
geneGAM = pg.LinearGAM(n_splines=10, spline_order=4, lam=10).fit(x, y, weights=weights)
nx_spacing = 100
xval = np.linspace(min(sc_pt), max_val_pt, nx_spacing * 2)
yg = geneGAM.predict(X=xval)
else:
print('loc_ has length zero')
# cmap_[i]
ax.plot(xval, yg, color='navy', linewidth=3.5, zorder=3,
label='TS:' + str(self.terminal_clusters[i]))
#plt.legend()
ax.set_title(title_gene)
return
def do_impute(self, df_gene, magic_steps=3, gene_list=[]):
# ad_gene is an ann data object from scanpy
if self.do_impute_bool == False:
print(colored('please re-run Via with do_impute set to True', 'red'))
return
else:
from sklearn.preprocessing import normalize
transition_full_graph = normalize(self.csr_full_graph, norm='l1',
axis=1) ** magic_steps # normalize across columns to get Transition matrix.
print('shape of transition matrix raised to power', magic_steps, transition_full_graph.shape)
subset = df_gene[gene_list].values
dot_ = transition_full_graph.dot(subset)
df_imputed_gene = pd.DataFrame(dot_, index=df_gene.index, columns=gene_list)
print('shape of imputed gene matrix', df_imputed_gene.shape)
return df_imputed_gene
def run_subPARC(self):
root_user = self.root_user
X_data = self.data
too_big_factor = self.too_big_factor
small_pop = self.small_pop
jac_std_global = self.jac_std_global
jac_weighted_edges = self.jac_weighted_edges
n_elements = X_data.shape[0]
if self.is_coarse == True:
# graph for PARC
neighbor_array, distance_array = self.knn_struct.knn_query(X_data, k=self.knn)
csr_array_locally_pruned = self.make_csrmatrix_noselfloop(neighbor_array,
distance_array) # incorporates local distance pruning
else:
neighbor_array = self.full_neighbor_array
distance_array = self.full_distance_array
csr_array_locally_pruned = self.csr_array_locally_pruned
sources, targets = csr_array_locally_pruned.nonzero()
edgelist = list(zip(sources, targets))
edgelist_copy = edgelist.copy()
G = ig.Graph(n=X_data.shape[0], edges=edgelist,
edge_attrs={'weight': csr_array_locally_pruned.data.tolist()}) # used for PARC
# print('average degree of prejacard graph is %.1f'% (np.mean(G.degree())))
# print('computing Jaccard metric')
sim_list = G.similarity_jaccard(pairs=edgelist_copy)
print('time is', time.ctime())
print('commencing global pruning')
sim_list_array = np.asarray(sim_list)
edge_list_copy_array = np.asarray(edgelist_copy)
if jac_std_global == 'median':
threshold = np.median(sim_list)
else:
threshold = np.mean(sim_list) - jac_std_global * np.std(sim_list)
strong_locs = np.where(sim_list_array > threshold)[0]
print('Share of edges kept after Global Pruning %.2f' % (len(strong_locs) * 100 / len(sim_list)), '%')
new_edgelist = list(edge_list_copy_array[strong_locs])
sim_list_new = list(sim_list_array[strong_locs])
G_sim = ig.Graph(n=n_elements, edges=list(new_edgelist), edge_attrs={'weight': sim_list_new})
G_sim.simplify(combine_edges='sum')
if self.is_coarse == True:
#### construct full graph that has no pruning to be used for Clustergraph edges, # not listed in in any order of proximity
row_list = []
n_neighbors = neighbor_array.shape[1]
n_cells = neighbor_array.shape[0]
row_list.extend(list(np.transpose(np.ones((n_neighbors, n_cells)) * range(0, n_cells)).flatten()))
col_list = neighbor_array.flatten().tolist()
distance_array = np.sqrt(distance_array)
weight_list = (1. / (distance_array.flatten() + 0.05)) # 0.05
mean_sqrt_dist_array = np.mean(distance_array)
weight_list = weight_list * (mean_sqrt_dist_array ** 2)
# we scale weight_list by the mean_distance_value because inverting the distances makes the weights range between 0-1
# and hence too many good neighbors end up having a weight near 0 which is misleading and non-neighbors have weight =0
weight_list = weight_list.tolist()
# print('distance values', np.percentile(distance_array, 5), np.percentile(distance_array, 95), np.mean(distance_array))
csr_full_graph = csr_matrix((np.array(weight_list), (np.array(row_list), np.array(col_list))),
shape=(n_cells, n_cells))
n_original_comp, n_original_comp_labels = connected_components(csr_full_graph, directed=False)
sources, targets = csr_full_graph.nonzero()
edgelist = list(zip(sources.tolist(), targets.tolist()))
G = ig.Graph(edgelist, edge_attrs={'weight': csr_full_graph.data.tolist()})
sim_list = G.similarity_jaccard(pairs=edgelist) # list of jaccard weights
ig_fullgraph = ig.Graph(list(edgelist), edge_attrs={'weight': sim_list})
ig_fullgraph.simplify(combine_edges='sum')
# self.csr_array_pruned = G_sim # this graph is pruned (locally and globally) for use in PARC
self.csr_array_locally_pruned = csr_array_locally_pruned
self.ig_full_graph = ig_fullgraph # for VIA we prune the vertex cluster graph *after* making the clustergraph
self.csr_full_graph = csr_full_graph
self.full_neighbor_array = neighbor_array
self.full_distance_array = distance_array
if self.is_coarse == True:
# knn graph used for making trajectory drawing on the visualization
# print('skipping full_graph_shortpath')
self.full_graph_shortpath = self.full_graph_paths(X_data, n_original_comp)
neighbor_array = self.full_neighbor_array
if self.is_coarse == False:
ig_fullgraph = self.ig_full_graph # for Trajectory
# G_sim = self.csr_array_pruned # for PARC
# neighbor_array = self.full_neighbor_array # needed to assign spurious outliers to clusters
# print('average degree of SIMPLE graph is %.1f' % (np.mean(G_sim.degree())))
print('commencing community detection')
start_leiden = time.time()
if jac_weighted_edges == True:
if self.partition_type == 'ModularityVP':
partition = leidenalg.find_partition(G_sim, leidenalg.ModularityVertexPartition, weights='weight',
n_iterations=self.n_iter_leiden, seed=self.random_seed)
#print('partition type MVP')
else:
partition = leidenalg.find_partition(G_sim, leidenalg.RBConfigurationVertexPartition, weights='weight',
n_iterations=self.n_iter_leiden, seed=self.random_seed,
resolution_parameter=self.resolution_parameter)
print('partition type RBC')
else:
if self.partition_type == 'ModularityVP':
#print('partition type MVP')
partition = leidenalg.find_partition(G_sim, leidenalg.ModularityVertexPartition,
n_iterations=self.n_iter_leiden, seed=self.random_seed)
else:
#print('partition type RBC')
partition = leidenalg.find_partition(G_sim, leidenalg.RBConfigurationVertexPartition,
n_iterations=self.n_iter_leiden, seed=self.random_seed,
resolution_parameter=self.resolution_parameter)
print(round(time.time() - start_leiden), ' seconds for leiden')
time_end_PARC = time.time()
# print('Q= %.1f' % (partition.quality()))
PARC_labels_leiden = np.asarray(partition.membership)
PARC_labels_leiden = np.reshape(PARC_labels_leiden, (n_elements, 1))
print('time is', time.ctime())
print(len(set(PARC_labels_leiden.flatten())), ' clusters before handling small/big')
pop_list_1 = []
count_big_pops = 0
num_times_expanded = 0
for item in set(list(PARC_labels_leiden.flatten())):
count_item = list(PARC_labels_leiden.flatten()).count(item)
if count_item > self.too_big_factor * n_elements:
count_big_pops = count_big_pops + 1
pop_list_1.append([item, count_item])
print(colored('There are ' + str(count_big_pops) + ' clusters that are too big', 'blue'))
too_big = False
cluster_i_loc = np.where(PARC_labels_leiden == 0)[
0] # the 0th cluster is the largest one. so if cluster 0 is not too big, then the others wont be too big either
pop_i = len(cluster_i_loc)
if pop_i > too_big_factor * n_elements:
too_big = True
print('too big is', too_big, ' cluster 0 will be Expanded')
num_times_expanded = num_times_expanded + 1
cluster_big_loc = cluster_i_loc
list_pop_too_bigs = [pop_i]
time0_big = time.time()
while (too_big == True) & (not ((time.time() - time0_big > 200) & (num_times_expanded >= count_big_pops))):
X_data_big = X_data[cluster_big_loc, :]
PARC_labels_leiden_big = self.run_toobig_subPARC(X_data_big)
num_times_expanded = num_times_expanded + 1
PARC_labels_leiden_big = PARC_labels_leiden_big + 100000
pop_list = []
for item in set(list(PARC_labels_leiden_big.flatten())):
pop_list.append([item, list(PARC_labels_leiden_big.flatten()).count(item)])
jj = 0
for j in cluster_big_loc:
PARC_labels_leiden[j] = PARC_labels_leiden_big[jj]
jj = jj + 1
dummy, PARC_labels_leiden = np.unique(list(PARC_labels_leiden.flatten()), return_inverse=True)
pop_list_1 = []
for item in set(list(PARC_labels_leiden.flatten())):
pop_list_1.append([item, list(PARC_labels_leiden.flatten()).count(item)])
#print(pop_list_1, set(PARC_labels_leiden))
too_big = False
set_PARC_labels_leiden = set(PARC_labels_leiden)
PARC_labels_leiden = np.asarray(PARC_labels_leiden)
for cluster_ii in set_PARC_labels_leiden:
cluster_ii_loc = np.where(PARC_labels_leiden == cluster_ii)[0]
pop_ii = len(cluster_ii_loc)
not_yet_expanded = pop_ii not in list_pop_too_bigs
if (pop_ii > too_big_factor * n_elements) & (not_yet_expanded) == True:
too_big = True
# print('cluster', cluster_ii, 'is too big and has population', pop_ii)
cluster_big_loc = cluster_ii_loc
cluster_big = cluster_ii
big_pop = pop_ii
if too_big == True:
list_pop_too_bigs.append(big_pop)
print('cluster', cluster_big, 'is too big with population', big_pop, '. It will be expanded')
dummy, PARC_labels_leiden = np.unique(list(PARC_labels_leiden.flatten()), return_inverse=True)
small_pop_list = []
small_cluster_list = []
small_pop_exist = False
for cluster in set(PARC_labels_leiden):
population = len(np.where(PARC_labels_leiden == cluster)[0])
if population < small_pop: # 10
small_pop_exist = True
small_pop_list.append(list(np.where(PARC_labels_leiden == cluster)[0]))
small_cluster_list.append(cluster)
for small_cluster in small_pop_list:
for single_cell in small_cluster:
old_neighbors = neighbor_array[single_cell, :]
group_of_old_neighbors = PARC_labels_leiden[old_neighbors]
group_of_old_neighbors = list(group_of_old_neighbors.flatten())
available_neighbours = set(group_of_old_neighbors) - set(small_cluster_list)
if len(available_neighbours) > 0:
available_neighbours_list = [value for value in group_of_old_neighbors if
value in list(available_neighbours)]
best_group = max(available_neighbours_list, key=available_neighbours_list.count)
PARC_labels_leiden[single_cell] = best_group
time_smallpop = time.time()
while (small_pop_exist) == True & (time.time() - time_smallpop < 15):
small_pop_list = []
small_pop_exist = False
for cluster in set(list(PARC_labels_leiden.flatten())):
population = len(np.where(PARC_labels_leiden == cluster)[0])
if population < small_pop:
small_pop_exist = True
small_pop_list.append(np.where(PARC_labels_leiden == cluster)[0])
for small_cluster in small_pop_list:
for single_cell in small_cluster:
old_neighbors = neighbor_array[single_cell, :]
group_of_old_neighbors = PARC_labels_leiden[old_neighbors]
group_of_old_neighbors = list(group_of_old_neighbors.flatten())
best_group = max(set(group_of_old_neighbors), key=group_of_old_neighbors.count)
PARC_labels_leiden[single_cell] = best_group
dummy, PARC_labels_leiden = np.unique(list(PARC_labels_leiden.flatten()), return_inverse=True)
PARC_labels_leiden = list(PARC_labels_leiden.flatten())
pop_list = []
pop_list_raw = []
for item in range(len(set(PARC_labels_leiden))):
pop_item = PARC_labels_leiden.count(item)
pop_list.append((item, pop_item))
pop_list_raw.append(pop_item)
#print('list of cluster labels and populations', len(pop_list), pop_list)
df_temporary_save = pd.DataFrame(PARC_labels_leiden, columns=['parc'])
#df_temporary_save.to_csv("/home/shobi/Trajectory/Datasets/2MOrgan/Figures/Parc_.csv")
self.labels = PARC_labels_leiden # list
n_clus = len(set(self.labels))
## Make cluster-graph
vc_graph = ig.VertexClustering(ig_fullgraph,
membership=PARC_labels_leiden) # jaccard weights, bigger is better
vc_graph = vc_graph.cluster_graph(combine_edges='sum')
reweighted_sparse_vc, edgelist = self.recompute_weights(vc_graph, pop_list_raw)
if self.dataset == 'toy':
global_pruning_std = 1
print('Toy: global cluster graph pruning level', global_pruning_std)
# toy data is usually simpler so we dont need to significantly prune the links as the clusters are usually well separated such that spurious links dont exist
elif self.dataset == 'bcell':
global_pruning_std = 0.15
print('Bcell: global cluster graph pruning level', global_pruning_std)
elif self.dataset == 'iPSC':
global_pruning_std = 0.15
print('iPSC: global cluster graph pruning level', global_pruning_std)
elif self.dataset == 'EB':
global_pruning_std = 0.15
print('EB: global cluster graph pruning level', global_pruning_std)
elif self.dataset == 'mESC':
global_pruning_std = 0.0
print('mESC: global cluster graph pruning level', global_pruning_std)
elif self.dataset == '2M':
global_pruning_std = 0.15 # 0 for the knn20ncomp30 and knn30ncomp30 run on Aug 12 and Aug11
print('2M: global cluster graph pruning level', global_pruning_std)
elif self.dataset == 'scATAC':
global_pruning_std = 0.15
print('scATAC: global cluster graph pruning level', global_pruning_std)
elif self.dataset == 'droso':
global_pruning_std = 0.15
print('droso: global cluster graph pruning level', global_pruning_std)
elif self.dataset == 'faced':
global_pruning_std = 1
elif self.dataset == 'pancreas':
global_pruning_std = 0
print(self.dataset, ': global cluster graph pruning level', global_pruning_std)
elif self.dataset == 'cardiac':
global_pruning_std = 0.15
print(self.dataset, ': global cluster graph pruning level', global_pruning_std)
elif self.dataset == 'cardiac_merge':
global_pruning_std = .6
print(self.dataset, ': global cluster graph pruning level', global_pruning_std)
else:
global_pruning_std = self.cluster_graph_pruning_std
print(self.dataset, ': global cluster graph pruning level', global_pruning_std)
edgeweights, edgelist, comp_labels = pruning_clustergraph(reweighted_sparse_vc,
global_pruning_std=global_pruning_std,
preserve_disconnected=self.preserve_disconnected,
preserve_disconnected_after_pruning=self.preserve_disconnected_after_pruning)
self.connected_comp_labels = comp_labels
locallytrimmed_g = ig.Graph(edgelist, edge_attrs={'weight': edgeweights.tolist()})
locallytrimmed_g = locallytrimmed_g.simplify(combine_edges='sum')
locallytrimmed_sparse_vc = get_sparse_from_igraph(locallytrimmed_g, weight_attr='weight')
layout = locallytrimmed_g.layout_fruchterman_reingold(weights='weight') ##final layout based on locally trimmed
# layout = locallytrimmed_g.layout_kamada_kawai()
# layout = locallytrimmed_g.layout_graphopt(niter=500, node_charge=0.001, node_mass=5, spring_length=0, spring_constant=1,max_sa_movement=5, seed=None)
# globally trimmed link
sources, targets = locallytrimmed_sparse_vc.nonzero()
edgelist_simple = list(zip(sources.tolist(), targets.tolist()))
edgelist_unique = set(tuple(sorted(l)) for l in edgelist_simple) # keep only one of (0,1) and (1,0)
self.edgelist_unique = edgelist_unique
# print('edge list unique', edgelist_unique)
self.edgelist = edgelist
# print('edgelist', edgelist)
x_lazy = self.x_lazy
alpha_teleport = self.alpha_teleport
# number of components
n_components, labels_cc = connected_components(csgraph=locallytrimmed_sparse_vc, directed=False,
return_labels=True)
print('there are ', n_components, 'components in the graph')
df_graph = pd.DataFrame(locallytrimmed_sparse_vc.todense())
df_graph['cc'] = labels_cc
df_graph['pt'] = float('NaN')
df_graph['majority_truth'] = 'maj truth'
df_graph['graph_node_label'] = 'node label'
set_parc_labels = list(set(PARC_labels_leiden))
set_parc_labels.sort()
tsi_list = []
print('root user', root_user)
df_graph['markov_pt'] = float('NaN')
terminal_clus = []
node_deg_list = []
super_terminal_clus_revised = []
pd_columnnames_terminal = []
dict_terminal_super_sub_pairs = {}
self.root = []
large_components = []
for comp_i in range(n_components):
loc_compi = np.where(labels_cc == comp_i)[0]
if len(loc_compi) > 1:
large_components.append(comp_i)
for comp_i in large_components: # range(n_components):
loc_compi = np.where(labels_cc == comp_i)[0]
a_i = df_graph.iloc[loc_compi][loc_compi].values
a_i = csr_matrix(a_i, (a_i.shape[0], a_i.shape[0]))
cluster_labels_subi = [x for x in loc_compi]
# print('cluster_labels_subi', cluster_labels_subi)
sc_labels_subi = [PARC_labels_leiden[i] for i in range(len(PARC_labels_leiden)) if
(PARC_labels_leiden[i] in cluster_labels_subi)]
sc_truelabels_subi = [self.true_label[i] for i in range(len(PARC_labels_leiden)) if
(PARC_labels_leiden[i] in cluster_labels_subi)]
if ((self.dataset == 'toy') | (self.dataset == 'faced')):
if self.super_cluster_labels != False:
# find which sub-cluster has the super-cluster root
if 'T1_M1' in sc_truelabels_subi:
root_user_ = 'T1_M1'
elif 'T2_M1' in sc_truelabels_subi:
root_user_ = 'T2_M1'
else:
for ri in root_user:
if ri in sc_truelabels_subi:root_user_ = ri
super_labels_subi = [self.super_cluster_labels[i] for i in range(len(PARC_labels_leiden)) if
(PARC_labels_leiden[i] in cluster_labels_subi)]
# print('super node degree', self.super_node_degree_list)
# print('component', comp_i, 'has root', root_user[comp_i])
# print('super_labels_subi', super_labels_subi)
graph_node_label, majority_truth_labels, node_deg_list_i, root_i = self.find_root_toy(a_i,
sc_labels_subi,
root_user_,
sc_truelabels_subi,
super_labels_subi,
self.super_node_degree_list)
else:
if 'T1_M1' in sc_truelabels_subi:
root_user_ = 'T1_M1'
elif 'T2_M1' in sc_truelabels_subi:
root_user_ = 'T2_M1'
else:
for ri in root_user:
if ri in sc_truelabels_subi: root_user_ = ri
# print('component', comp_i, 'has root', root_user[comp_i])
graph_node_label, majority_truth_labels, node_deg_list_i, root_i = self.find_root_toy(a_i,
sc_labels_subi,
root_user_,
sc_truelabels_subi,
[], [])
elif (self.dataset == 'humanCD34'): # | (self.dataset == '2M'):
for ri in root_user:
if PARC_labels_leiden[ri] in cluster_labels_subi: root_user_ = ri
graph_node_label, majority_truth_labels, node_deg_list_i, root_i = self.find_root_HumanCD34(a_i,
sc_labels_subi,
root_user_,
sc_truelabels_subi)
elif (self.dataset == '2M'):
for ri in root_user:
if PARC_labels_leiden[ri] in cluster_labels_subi: root_user_ = ri
graph_node_label, majority_truth_labels, node_deg_list_i, root_i = self.find_root_2Morgan(a_i,
sc_labels_subi,
root_user_,
sc_truelabels_subi)
elif (self.dataset == 'bcell') | (self.dataset == 'EB'):
for ri in root_user:
if PARC_labels_leiden[ri] in cluster_labels_subi: root_user_ = ri
graph_node_label, majority_truth_labels, node_deg_list_i, root_i = self.find_root_bcell(a_i,
sc_labels_subi,
root_user_,
sc_truelabels_subi)
elif ((self.dataset == 'iPSC') | (self.dataset == 'mESC')):
for ri in root_user:
if PARC_labels_leiden[ri] in cluster_labels_subi: root_user_ = ri
if self.super_cluster_labels != False:
super_labels_subi = [self.super_cluster_labels[i] for i in range(len(PARC_labels_leiden)) if
(PARC_labels_leiden[i] in cluster_labels_subi)]
# print('super node degree', self.super_node_degree_list)
graph_node_label, majority_truth_labels, node_deg_list_i, root_i = self.find_root_iPSC(a_i,
sc_labels_subi,
root_user_,
sc_truelabels_subi,
super_labels_subi,
self.super_node_degree_list)
else:
graph_node_label, majority_truth_labels, node_deg_list_i, root_i = self.find_root_iPSC(a_i,
sc_labels_subi,
root_user_,
sc_truelabels_subi,
[],
[])
else:
if comp_i > len(root_user) - 1:
root_generic = 0
else:
for ri in root_user:
if PARC_labels_leiden[ri] in cluster_labels_subi: root_generic = ri
#root_generic = root_user[comp_i]
graph_node_label, majority_truth_labels, node_deg_list_i, root_i = self.find_root_bcell(a_i,
sc_labels_subi,
root_generic,
sc_truelabels_subi)
self.root.append(root_i)
for item in node_deg_list_i:
node_deg_list.append(item)
#print('a_i shape, true labels shape', a_i.shape, len(sc_truelabels_subi), len(sc_labels_subi))
new_root_index_found = False
for ii, llabel in enumerate(cluster_labels_subi):
if root_i == llabel:
new_root_index = ii
new_root_index_found = True
#print('new root index', new_root_index, ' original root cluster was', root_i)
if new_root_index_found == False:
print('cannot find the new root index')
new_root_index = 0
hitting_times, roundtrip_times = self.compute_hitting_time(a_i, root=new_root_index,
x_lazy=x_lazy,
alpha_teleport=alpha_teleport)
# rescale hitting times
very_high = np.mean(hitting_times) + 1.5 * np.std(hitting_times)
without_very_high_pt = [iii for iii in hitting_times if iii < very_high]
new_very_high = np.mean(without_very_high_pt) + np.std(without_very_high_pt)
# print('very high, and new very high', very_high, new_very_high)
new_hitting_times = [x if x < very_high else very_high for x in hitting_times]
hitting_times = np.asarray(new_hitting_times)
scaling_fac = 10 / max(hitting_times)
hitting_times = hitting_times * scaling_fac
s_ai, t_ai = a_i.nonzero()
edgelist_ai = list(zip(s_ai, t_ai))
edgeweights_ai = a_i.data
# print('edgelist ai', edgelist_ai)
# print('edgeweight ai', edgeweights_ai)
biased_edgeweights_ai = get_biased_weights(edgelist_ai, edgeweights_ai, hitting_times)
# biased_sparse = csr_matrix((biased_edgeweights, (row, col)))
adjacency_matrix_ai = np.zeros((a_i.shape[0], a_i.shape[0]))
for i, (start, end) in enumerate(edgelist_ai):
adjacency_matrix_ai[start, end] = biased_edgeweights_ai[i]
markov_hitting_times_ai = self.simulate_markov(adjacency_matrix_ai,
new_root_index) # +adjacency_matrix.T))
# for eee, ttt in enumerate(markov_hitting_times_ai):print('cluster ', eee, ' had markov time', ttt)
very_high = np.mean(markov_hitting_times_ai) + 1.5 * np.std(markov_hitting_times_ai) # 1.5
very_high = min(very_high, max(markov_hitting_times_ai))
without_very_high_pt = [iii for iii in markov_hitting_times_ai if iii < very_high]
new_very_high = min(np.mean(without_very_high_pt) + np.std(without_very_high_pt), very_high)
new_markov_hitting_times_ai = [x if x < very_high else very_high for x in markov_hitting_times_ai]
# for eee, ttt in enumerate(new_markov_hitting_times_ai): print('cluster ', eee, ' had markov time', ttt)
markov_hitting_times_ai = np.asarray(new_markov_hitting_times_ai)
scaling_fac = 10 / max(markov_hitting_times_ai)
markov_hitting_times_ai = markov_hitting_times_ai * scaling_fac
# for eee, ttt in enumerate(markov_hitting_times_ai):print('cluster ', eee, ' had markov time', ttt)
# print('markov hitting times', [(i, j) for i, j in enumerate(markov_hitting_times_ai)])
# print('hitting times', [(i, j) for i, j in enumerate(hitting_times)])
markov_hitting_times_ai = (markov_hitting_times_ai) # + hitting_times)*.5 #consensus
adjacency_matrix_csr_ai = sparse.csr_matrix(adjacency_matrix_ai)
(sources, targets) = adjacency_matrix_csr_ai.nonzero()
edgelist_ai = list(zip(sources, targets))
weights_ai = adjacency_matrix_csr_ai.data
bias_weights_2_ai = get_biased_weights(edgelist_ai, weights_ai, markov_hitting_times_ai, round_no=2)
adjacency_matrix2_ai = np.zeros((adjacency_matrix_ai.shape[0], adjacency_matrix_ai.shape[0]))
for i, (start, end) in enumerate(edgelist_ai):
adjacency_matrix2_ai[start, end] = bias_weights_2_ai[i]
if self.super_terminal_cells == False:
#print('new_root_index', new_root_index, ' before get terminal')
terminal_clus_ai = self.get_terminal_clusters(adjacency_matrix2_ai, markov_hitting_times_ai,
new_root_index)
temp_terminal_clus_ai = []
for i in terminal_clus_ai:
if markov_hitting_times_ai[i] > np.percentile(np.asarray(markov_hitting_times_ai),
self.pseudotime_threshold_TS):
# print(i, markov_hitting_times_ai[i],np.percentile(np.asarray(markov_hitting_times_ai), self.pseudotime_threshold_TS))
terminal_clus.append(cluster_labels_subi[i])
temp_terminal_clus_ai.append(i)
terminal_clus_ai = temp_terminal_clus_ai
elif len(self.super_terminal_clusters) > 0: # round2 of PARC
print('super_terminal_clusters', self.super_terminal_clusters)
sub_terminal_clus_temp_ = []
#print('cluster_labels_subi', cluster_labels_subi)
terminal_clus_ai = []
super_terminal_clusters_i = [stc_i for stc_i in self.super_terminal_clusters if
stc_i in cluster_labels_subi]
for i in self.super_terminal_clusters:
sub_terminal_clus_temp_loc = np.where(np.asarray(self.super_cluster_labels) == i)[0]
true_majority_i = [xx for xx in np.asarray(self.true_label)[sub_terminal_clus_temp_loc]]
#print(true_majority_i[0], 'true_majority_i', 'of cluster', i)
# 0:1 for single connected structure #when using Toy as true label has T1 or T2
temp_set = set(list(np.asarray(self.labels)[sub_terminal_clus_temp_loc])) #the clusters in second iteration that make up the super clusters in first iteration
temp_set = [t_s for t_s in temp_set if t_s in cluster_labels_subi]
temp_max_pt = 0
most_likely_sub_terminal = False
count_frequency_super_in_sub = 0
# If you have disconnected components and corresponding labels to identify which cell belongs to which components, then use the Toy 'T1_M1' format
CHECK_BOOL = False
if (self.dataset == 'toy'):
if (root_user_[0:2] in true_majority_i[0]) | (root_user_[0:1] == 'M'): CHECK_BOOL = True
# Find the sub-terminal cluster in second iteration of VIA that best corresponds to the super-terminal cluster (i)from iteration 1
if (CHECK_BOOL) | (
self.dataset != 'toy'): # 0:1 for single connected structure #when using Toy as true label has T1 or T2
for j in temp_set:
loc_j_in_sub_ai = np.where(loc_compi == j)[0]
super_cluster_composition_loc = np.where(np.asarray(self.labels) == j)[0]
super_cluster_composition = self.func_mode(
list(np.asarray(self.super_cluster_labels)[super_cluster_composition_loc]))
if (markov_hitting_times_ai[loc_j_in_sub_ai] > temp_max_pt) & (
super_cluster_composition == i):
temp_max_pt = markov_hitting_times_ai[loc_j_in_sub_ai]
most_likely_sub_terminal = j
if most_likely_sub_terminal == False:
print('no sub cluster has majority made of super-cluster ', i)
for j in temp_set:
super_cluster_composition_loc = np.where(np.asarray(self.labels) == j)[0]
count_frequency_super_in_sub_temp = list(
np.asarray(self.super_cluster_labels)[super_cluster_composition_loc]).count(i)
count_frequency_super_in_sub_temp_ratio = count_frequency_super_in_sub_temp / len(
super_cluster_composition_loc)
if (markov_hitting_times_ai[loc_j_in_sub_ai] > np.percentile(
np.asarray(markov_hitting_times_ai), 30)) & ( # 30
count_frequency_super_in_sub_temp_ratio > count_frequency_super_in_sub):
count_frequency_super_in_sub = count_frequency_super_in_sub_temp
most_likely_sub_terminal = j
sub_terminal_clus_temp_.append(most_likely_sub_terminal)
if (markov_hitting_times_ai[loc_j_in_sub_ai] > np.percentile(
np.asarray(markov_hitting_times_ai), self.pseudotime_threshold_TS)): # 30
dict_terminal_super_sub_pairs.update({i: most_likely_sub_terminal})
super_terminal_clus_revised.append(i)
terminal_clus.append(most_likely_sub_terminal)
terminal_clus_ai.append(
np.where(np.asarray(cluster_labels_subi) == most_likely_sub_terminal)[0][0]) # =i
#print('the sub terminal cluster that best captures the super terminal', i, 'is', most_likely_sub_terminal)
else:
print('the sub terminal cluster that best captures the super terminal', i, 'is',
most_likely_sub_terminal, 'but the pseudotime is too low')
else:
print('super terminal cells', self.super_terminal_cells)
temp = [self.labels[ti] for ti in self.super_terminal_cells if
self.labels[ti] in cluster_labels_subi]
terminal_clus_ai = []
for i in temp:
terminal_clus_ai.append(np.where(np.asarray(cluster_labels_subi) == i)[0][0])
terminal_clus.append(i)
dict_terminal_super_sub_pairs.update({i: most_likely_sub_terminal})
print('terminal clus in this component', terminal_clus_ai)
print('final terminal clus', terminal_clus)
for target_terminal in terminal_clus_ai:
prob_ai = self.simulate_branch_probability(target_terminal, terminal_clus_ai,
adjacency_matrix2_ai,
new_root_index, pt=markov_hitting_times_ai,
num_sim=500) # 50 ToDO change back to 500 = numsim
df_graph['terminal_clus' + str(cluster_labels_subi[target_terminal])] = 0.0000000
pd_columnnames_terminal.append('terminal_clus' + str(cluster_labels_subi[target_terminal]))
for k, prob_ii in enumerate(prob_ai):
df_graph.at[cluster_labels_subi[k], 'terminal_clus' + str(
cluster_labels_subi[target_terminal])] = prob_ii
bp_array = df_graph[pd_columnnames_terminal].values
bp_array[np.isnan(bp_array)] = 0.00000001
bp_array = bp_array / bp_array.sum(axis=1)[:, None]
bp_array[np.isnan(bp_array)] = 0.00000001
for ei, ii in enumerate(loc_compi):
df_graph.at[ii, 'pt'] = hitting_times[ei]
df_graph.at[ii, 'graph_node_label'] = graph_node_label[ei]
df_graph.at[ii, 'majority_truth'] = graph_node_label[ei]
df_graph.at[ii, 'markov_pt'] = markov_hitting_times_ai[ei]
locallytrimmed_g.vs["label"] = df_graph['graph_node_label'].values
hitting_times = df_graph['pt'].values
if len(super_terminal_clus_revised) > 0:
self.revised_super_terminal_clusters = super_terminal_clus_revised
else:
self.revised_super_terminal_clusters = self.super_terminal_clusters
self.hitting_times = hitting_times
self.markov_hitting_times = df_graph['markov_pt'].values # hitting_times#
self.terminal_clusters = terminal_clus
print('terminal clusters', terminal_clus)
self.node_degree_list = node_deg_list
print(colored('project onto single cell', 'red'))
#self.project_branch_probability_sc(bp_array, df_graph['pt'].values) #testing to see what the effect of not doing MCMC is
self.project_branch_probability_sc(bp_array, df_graph['markov_pt'].values)
self.dict_terminal_super_sub_pairs = dict_terminal_super_sub_pairs
hitting_times = self.markov_hitting_times
bias_weights_2_all = get_biased_weights(edgelist, edgeweights, self.markov_hitting_times, round_no=2)
row_list = []
col_list = []
for (rowi, coli) in edgelist:
row_list.append(rowi)
col_list.append(coli)
temp_csr = csr_matrix((np.array(bias_weights_2_all), (np.array(row_list), np.array(col_list))),
shape=(n_clus, n_clus))
if (self.dataset == '2M') | (self.dataset == 'mESC'):
visual_global_pruning_std = 1
max_outgoing = 3 # 4
elif self.dataset == 'scATAC':
visual_global_pruning_std = 0.15
max_outgoing = 2 # 4
elif self.dataset == 'faced':
visual_global_pruning_std = 1
max_outgoing = 2 # 4
elif self.dataset == 'droso':
visual_global_pruning_std = 0.15 # -0.1 # 1 (2-4hrs use 0.15
max_outgoing = 2 # 3 # 4
elif self.dataset == 'pancreas':
visual_global_pruning_std = 0.15#0.15#0 # 1
max_outgoing = 3 # 3 # 4
elif self.dataset == 'cardiac':
visual_global_pruning_std = .3 # .15 # 1
max_outgoing = 2 # 3 # 4
elif self.dataset == 'cardiac_merge':
visual_global_pruning_std = .15 # .15#.15 # 1
max_outgoing = 2 # 3 # 4
elif self.dataset =='toy':
visual_global_pruning_std = .5
max_outgoing = 2
else:
visual_global_pruning_std = 0.15 # 0.15 # 0.15#1 for 2Morgan # 0.15 in other caseses #0 for human #1 for mESC
max_outgoing = 2 # 3 for 2Morgan # 2 Aug12 changing to 3 from 2
# glob_std_pruning =0 and max_out = 2 for HumanCD34 to simplify structure
edgeweights_maxout_2, edgelist_maxout_2, comp_labels_2 = pruning_clustergraph(temp_csr,
global_pruning_std=visual_global_pruning_std,
max_outgoing=max_outgoing,
preserve_disconnected=self.preserve_disconnected)
row_list = []
col_list = []
for (rowi, coli) in edgelist_maxout_2:
row_list.append(rowi)
col_list.append(coli)
temp_csr = csr_matrix((np.array(edgeweights_maxout_2), (np.array(row_list), np.array(col_list))),
shape=(n_clus, n_clus))
temp_csr = temp_csr.transpose().todense() + temp_csr.todense()
temp_csr = np.tril(temp_csr, -1) # elements along the main diagonal and above are set to zero
temp_csr = csr_matrix(temp_csr)
edgeweights_maxout_2 = temp_csr.data
scale_factor = max(edgeweights_maxout_2) - min(edgeweights_maxout_2)
edgeweights_maxout_2 = [((wi + .1) * 2.5 / scale_factor) + 0.1 for wi in edgeweights_maxout_2]
sources, targets = temp_csr.nonzero()
edgelist_maxout_2 = list(zip(sources.tolist(), targets.tolist()))
self.edgelist_maxout = edgelist_maxout_2
# print('edgelist_maxout_2', edgelist_maxout_2)
self.edgeweights_maxout = edgeweights_maxout_2
remove_outliers = hitting_times
threshold = np.percentile(remove_outliers, 95) # np.mean(remove_outliers) + 1* np.std(remove_outliers)
th_hitting_times = [x if x < threshold else threshold for x in hitting_times]
remove_outliers_low = hitting_times[hitting_times < (np.mean(hitting_times) - 0.3 * np.std(hitting_times))]
# threshold_low = np.mean(remove_outliers_low) - 0.3 * np.std(remove_outliers_low)
if remove_outliers_low.size == 0:
threshold_low = 0
else:
threshold_low = np.percentile(remove_outliers_low, 5)
th_hitting_times = [x if x > threshold_low else threshold_low for x in th_hitting_times]
scaled_hitting_times = (th_hitting_times - np.min(th_hitting_times))
scaled_hitting_times = scaled_hitting_times * (1000 / np.max(scaled_hitting_times))
self.scaled_hitting_times = scaled_hitting_times
# self.single_cell_pt = self.project_hittingtimes_sc(self.hitting_times)
# self.single_cell_pt_stationary_bias = self.project_hittingtimes_sc(self.stationary_hitting_times.flatten())
# self.dijkstra_hitting_times = self.path_length_onbias(edgelist, biased_edgeweights)
# print('dijkstra hitting times', [(i,j) for i,j in enumerate(self.dijkstra_hitting_times)])
# self.single_cell_pt_dijkstra_bias = self.project_hittingtimes_sc(self.dijkstra_hitting_times)
scaled_hitting_times = scaled_hitting_times.astype(int)
pal = ig.drawing.colors.AdvancedGradientPalette(['yellow', 'green', 'blue'], n=1001)
all_colors = []
for i in scaled_hitting_times:
all_colors.append(pal.get(int(i))[0:3])
locallytrimmed_g.vs['hitting_times'] = scaled_hitting_times
locallytrimmed_g.vs['color'] = [pal.get(i)[0:3] for i in scaled_hitting_times]
self.group_color = [colors.to_hex(v) for v in locallytrimmed_g.vs['color']] # based on ygb scale
viridis_cmap = cm.get_cmap('viridis_r')
self.group_color_cmap = [colors.to_hex(v) for v in
viridis_cmap(scaled_hitting_times / 1000)] # based on ygb scale
self.graph_node_label = df_graph['graph_node_label'].values
self.edgeweight = [e['weight'] * 1 for e in locallytrimmed_g.es]
# print('self edge weight', len(self.edgeweight), self.edgeweight)
# print('self edge list', len(self.edgelist_unique), self.edgelist_unique)
self.graph_node_pos = layout.coords
f, ((ax, ax1)) = plt.subplots(1, 2, sharey=True,figsize=[20, 10])
self.draw_piechart_graph(ax, ax1)
plt.show()
# plt.scatter(self.embedding[:, 0], self.embedding[:, 1], c=self.single_cell_pt_markov, cmap='viridis', s=4, alpha=0.5)
# plt.scatter(self.embedding[root_user, 0], self.embedding[root_user, 1], c='orange', s=15)
# plt.title('root cell' + str(root_user))
# plt.show()
import statistics
from statistics import mode
for tsi in self.terminal_clusters:
loc_i = np.where(np.asarray(self.labels) == tsi)[0]
val_pt = [self.single_cell_pt_markov[i] for i in loc_i]
if self.dataset == '2M':
major_traj = [self.df_annot.loc[i, ['Main_trajectory']].values[0] for i in loc_i]
major_cell_type = [self.df_annot.loc[i, ['Main_cell_type']].values[0] for i in loc_i]
print(tsi, 'has major traj and cell type', mode(major_traj), mode(major_cell_type))
th_pt = np.percentile(val_pt, 50) # 50
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] >= th_pt]
temp = np.mean(self.data[loc_i], axis=0)
labelsq, distances = self.knn_struct.knn_query(temp, k=1)
tsi_list.append(labelsq[0][0])
if self.embedding != None:
plt.scatter(self.embedding[:, 0], self.embedding[:, 1], c=self.single_cell_pt_markov,
cmap='viridis', s=3, alpha=0.5)
plt.scatter(self.embedding[root_user, 0], self.embedding[root_user, 1], c='orange', s=20)
plt.title('root:' + str(root_user) + 'knn' + str(self.knn) + 'Ncomp' + str(self.ncomp))
for i in tsi_list:
# print(i, ' has traj and cell type', self.df_annot.loc[i, ['Main_trajectory', 'Main_cell_type']])
plt.text(self.embedding[i, 0], self.embedding[i, 1], str(i))
plt.scatter(self.embedding[i, 0], self.embedding[i, 1], c='red', s=10)
plt.show()
return
def draw_piechart_graph(self, ax, ax1, type_pt='pt', gene_exp='', title=''):
# ax1 is the pseudotime
arrow_head_w = 0.4#0.4
edgeweight_scale = 3#1.5
node_pos = self.graph_node_pos
edgelist = list(self.edgelist_maxout)
edgeweight = self.edgeweights_maxout
node_pos = np.asarray(node_pos)
graph_node_label = self.graph_node_label
if type_pt == 'pt':
pt = self.scaled_hitting_times # these are the final MCMC refined pt then slightly scaled
title_ax1 = "pseudotime"
# if type_pt == 'biased_stationary': pt = self.biased_hitting_times_stationary
# if type_pt == 'markov': pt = self.markov_hitting_times
if type_pt == 'gene':
pt = gene_exp
title_ax1 = title
import matplotlib.lines as lines
n_groups = len(set(self.labels)) # node_pos.shape[0]
n_truegroups = len(set(self.true_label))
group_pop = np.zeros([n_groups, 1])
group_frac = pd.DataFrame(np.zeros([n_groups, n_truegroups]), columns=list(set(self.true_label)))
for group_i in set(self.labels):
loc_i = np.where(self.labels == group_i)[0]
group_pop[group_i] = len(loc_i) # np.sum(loc_i) / 1000 + 1
true_label_in_group_i = list(np.asarray(self.true_label)[loc_i])
for ii in set(true_label_in_group_i):
group_frac[ii][group_i] = true_label_in_group_i.count(ii)
# group_frac = group_frac.div(group_frac.sum(axis=1), axis=0)
# print('group frac', group_frac)
line_true = np.linspace(0, 1, n_truegroups)
color_true_list = [plt.cm.jet(color) for color in line_true]
sct = ax.scatter(
node_pos[:, 0], node_pos[:, 1],
c='white', edgecolors='face', s=group_pop, cmap='jet')
# print('draw triangle edgelist', len(edgelist), edgelist)
bboxes = getbb(sct, ax)
for e_i, (start, end) in enumerate(edgelist):
if pt[start] > pt[end]:
temp = start
start = end
end = temp
ax.add_line(lines.Line2D([node_pos[start, 0], node_pos[end, 0]], [node_pos[start, 1], node_pos[end, 1]],
color='grey', lw=edgeweight[e_i] * edgeweight_scale, alpha=0.2))
z = np.polyfit([node_pos[start, 0], node_pos[end, 0]], [node_pos[start, 1], node_pos[end, 1]], 1)
minx = np.min(np.array([node_pos[start, 0], node_pos[end, 0]]))
if (node_pos[start, 0] < node_pos[end, 0]):
direction_arrow = 1
else:
direction_arrow = -1
maxx = np.max(np.array([node_pos[start, 0], node_pos[end, 0]]))
xp = np.linspace(minx, maxx, 500)
p = np.poly1d(z)
smooth = p(xp)
step = 1
if direction_arrow == 1:
ax.arrow(xp[250], smooth[250], xp[250 + step] - xp[250], smooth[250 + step] - smooth[250], shape='full',
lw=0,
length_includes_head=True, head_width=arrow_head_w,
color='grey')
# ax.plot(xp, smooth, linewidth=edgeweight[e_i], c='pink')
else:
ax.arrow(xp[250], smooth[250], xp[250 - step] - xp[250],
smooth[250 - step] - smooth[250], shape='full', lw=0,
length_includes_head=True, head_width=arrow_head_w, color='grey')
trans = ax.transData.transform
bbox = ax.get_position().get_points()
ax_x_min = bbox[0, 0]
ax_x_max = bbox[1, 0]
ax_y_min = bbox[0, 1]
ax_y_max = bbox[1, 1]
ax_len_x = ax_x_max - ax_x_min
ax_len_y = ax_y_max - ax_y_min
trans2 = ax.transAxes.inverted().transform
pie_axs = []
pie_size_ar = ((group_pop - np.min(group_pop)) / (np.max(group_pop) - np.min(group_pop)) + 0.5) / 10
# print('pie_size_ar', pie_size_ar)
for node_i in range(n_groups):
pie_size = pie_size_ar[node_i][0]
x1, y1 = trans(node_pos[node_i]) # data coordinates
xa, ya = trans2((x1, y1)) # axis coordinates
xa = ax_x_min + (xa - pie_size / 2) * ax_len_x
ya = ax_y_min + (ya - pie_size / 2) * ax_len_y
# clip, the fruchterman layout sometimes places below figure
if ya < 0: ya = 0
if xa < 0: xa = 0
rect = [xa, ya, pie_size * ax_len_x, pie_size * ax_len_y]
frac = np.asarray([ff for ff in group_frac.iloc[node_i].values])
pie_axs.append(plt.axes(rect, frameon=False))
pie_axs[node_i].pie(frac, wedgeprops={'linewidth': 0.0}, colors=color_true_list)
pie_axs[node_i].set_xticks([])
pie_axs[node_i].set_yticks([])
pie_axs[node_i].set_aspect('equal')
pie_axs[node_i].text(0.5, 0.5, graph_node_label[node_i])
patches, texts = pie_axs[node_i].pie(frac, wedgeprops={'linewidth': 0.0}, colors=color_true_list)
labels = list(set(self.true_label))
plt.legend(patches, labels, loc=(-5, -5), fontsize=6)
if self.is_coarse == True: # self.too_big_factor >= 0.1:
is_sub = ' super clusters'
else:
is_sub = ' sub clusters'
ti = 'Reference Group Membership. K=' + str(self.knn) + '. ncomp = ' + str(self.ncomp) + is_sub
ax.set_title(ti)
title_list = [title_ax1] # , "PT on undirected original graph"]
for i, ax_i in enumerate([ax1]):
if type_pt == 'pt':
pt = self.markov_hitting_times
else:
pt = gene_exp
for e_i, (start, end) in enumerate(edgelist):
if pt[start] > pt[end]:
temp = start
start = end
end = temp
ax_i.add_line(
lines.Line2D([node_pos[start, 0], node_pos[end, 0]], [node_pos[start, 1], node_pos[end, 1]],
color='black', lw=edgeweight[e_i] * edgeweight_scale, alpha=0.5))
z = np.polyfit([node_pos[start, 0], node_pos[end, 0]], [node_pos[start, 1], node_pos[end, 1]], 1)
minx = np.min(np.array([node_pos[start, 0], node_pos[end, 0]]))
if (node_pos[start, 0] < node_pos[end, 0]):
direction_arrow = 1
else:
direction_arrow = -1
maxx = np.max(np.array([node_pos[start, 0], node_pos[end, 0]]))
xp = np.linspace(minx, maxx, 500)
p = np.poly1d(z)
smooth = p(xp)
step = 1
if direction_arrow == 1:
ax_i.arrow(xp[250], smooth[250], xp[250 + step] - xp[250], smooth[250 + step] - smooth[250],
shape='full', lw=0,
length_includes_head=True, head_width=arrow_head_w,
color='grey')
else:
ax_i.arrow(xp[250], smooth[250], xp[250 - step] - xp[250],
smooth[250 - step] - smooth[250], shape='full', lw=0,
length_includes_head=True, head_width=arrow_head_w, color='grey')
c_edge = []
l_width = []
for ei, pti in enumerate(pt):
if ei in self.terminal_clusters:
c_edge.append('red')
l_width.append(1.5)
else:
c_edge.append('gray')
l_width.append(0.0)
gp_scaling = 1000/max(group_pop)#500 / max(group_pop)
# print(gp_scaling, 'gp_scaling')
group_pop_scale = group_pop * gp_scaling
cmap = 'viridis_r'
if type_pt == 'gene': cmap = 'coolwarm' # cmap = 'viridis'
ax_i.scatter(node_pos[:, 0], node_pos[:, 1], s=group_pop_scale, c=pt, cmap=cmap, edgecolors=c_edge,
alpha=1, zorder=3, linewidth=l_width)
# for ii in range(node_pos.shape[0]):
# ax_i.text(node_pos[ii, 0] + 0.5, node_pos[ii, 1] + 0.5, 'c' + str(ii), color='black', zorder=4)
title_pt = title_list[i]
ax_i.set_title(title_pt)
def accuracy(self, onevsall=1):
true_labels = self.true_label
Index_dict = {}
PARC_labels = self.labels
N = len(PARC_labels)
n_cancer = list(true_labels).count(onevsall)
n_pbmc = N - n_cancer
for k in range(N):
Index_dict.setdefault(PARC_labels[k], []).append(true_labels[k])
num_groups = len(Index_dict)
sorted_keys = list(sorted(Index_dict.keys()))
error_count = []
pbmc_labels = []
thp1_labels = []
fp, fn, tp, tn, precision, recall, f1_score = 0, 0, 0, 0, 0, 0, 0
for kk in sorted_keys:
vals = [t for t in Index_dict[kk]]
majority_val = self.func_mode(vals)
#if majority_val == onevsall: print('cluster', kk, ' has majority', onevsall, 'with population', len(vals))
if kk == -1:
len_unknown = len(vals)
# print('len unknown', len_unknown)
if (majority_val == onevsall) and (kk != -1):
thp1_labels.append(kk)
fp = fp + len([e for e in vals if e != onevsall])
tp = tp + len([e for e in vals if e == onevsall])
list_error = [e for e in vals if e != majority_val]
e_count = len(list_error)
error_count.append(e_count)
elif (majority_val != onevsall) and (kk != -1):
pbmc_labels.append(kk)
tn = tn + len([e for e in vals if e != onevsall])
fn = fn + len([e for e in vals if e == onevsall])
error_count.append(len([e for e in vals if e != majority_val]))
predict_class_array = np.array(PARC_labels)
PARC_labels_array = np.array(PARC_labels)
number_clusters_for_target = len(thp1_labels)
for cancer_class in thp1_labels:
predict_class_array[PARC_labels_array == cancer_class] = 1
for benign_class in pbmc_labels:
predict_class_array[PARC_labels_array == benign_class] = 0
predict_class_array.reshape((predict_class_array.shape[0], -1))
error_rate = sum(error_count) / N
n_target = tp + fn
tnr = tn / n_pbmc
fnr = fn / n_cancer
tpr = tp / n_cancer
fpr = fp / n_pbmc
if tp != 0 or fn != 0: recall = tp / (tp + fn) # ability to find all positives
if tp != 0 or fp != 0: precision = tp / (tp + fp) # ability to not misclassify negatives as positives
if precision != 0 or recall != 0:
f1_score = precision * recall * 2 / (precision + recall)
majority_truth_labels = np.empty((len(true_labels), 1), dtype=object)
for cluster_i in set(PARC_labels):
cluster_i_loc = np.where(np.asarray(PARC_labels) == cluster_i)[0]
true_labels = np.asarray(true_labels)
majority_truth = self.func_mode(list(true_labels[cluster_i_loc]))
majority_truth_labels[cluster_i_loc] = majority_truth
majority_truth_labels = list(majority_truth_labels.flatten())
accuracy_val = [error_rate, f1_score, tnr, fnr, tpr, fpr, precision,
recall, num_groups, n_target]
return accuracy_val, predict_class_array, majority_truth_labels, number_clusters_for_target
def run_VIA(self):
print('input data has shape', self.data.shape[0], '(samples) x', self.data.shape[1], '(features)')
self.ncomp = self.data.shape[1]
pop_list = []
for item in set(list(self.true_label)):
pop_list.append([item, list(self.true_label).count(item)])
# print("population composition", pop_list)
if self.true_label is None:
self.true_label = [1] * self.data.shape[0]
list_roc = []
time_start_total = time.time()
time_start_knn = time.time()
self.knn_struct = self.make_knn_struct()
time_end_knn_struct = time.time() - time_start_knn
# Query dataset, k - number of closest elements (returns 2 numpy arrays)
self.run_subPARC()
run_time = time.time() - time_start_total
print('time elapsed {:.1f} seconds'.format(run_time))
targets = list(set(self.true_label))
N = len(list(self.true_label))
self.f1_accumulated = 0
self.f1_mean = 0
self.stats_df = pd.DataFrame({'jac_std_global': [self.jac_std_global], 'dist_std_local': [self.dist_std_local],
'runtime(s)': [run_time]})
self.majority_truth_labels = []
if len(targets) > 1:
f1_accumulated = 0
f1_acc_noweighting = 0
for onevsall_val in targets:
# print('target is', onevsall_val)
vals_roc, predict_class_array, majority_truth_labels, numclusters_targetval = self.accuracy(
onevsall=onevsall_val)
f1_current = vals_roc[1]
#print('target', onevsall_val, 'has f1-score of %.2f' % (f1_current * 100))
f1_accumulated = f1_accumulated + f1_current * (list(self.true_label).count(onevsall_val)) / N
f1_acc_noweighting = f1_acc_noweighting + f1_current
list_roc.append(
[self.jac_std_global, self.dist_std_local, onevsall_val] + vals_roc + [numclusters_targetval] + [
run_time])
f1_mean = f1_acc_noweighting / len(targets)
#print("f1-score (unweighted) mean %.2f" % (f1_mean * 100), '%')
# print('f1-score weighted (by population) %.2f' % (f1_accumulated * 100), '%')
df_accuracy = pd.DataFrame(list_roc,
columns=['jac_std_global', 'dist_std_local', 'onevsall-target', 'error rate',
'f1-score', 'tnr', 'fnr',
'tpr', 'fpr', 'precision', 'recall', 'num_groups',
'population of target', 'num clusters', 'clustering runtime'])
self.f1_accumulated = f1_accumulated
self.f1_mean = f1_mean
self.stats_df = df_accuracy
self.majority_truth_labels = majority_truth_labels
return
def cellrank_Human(ncomps=80, knn=30, v0_random_seed=7):
import scvelo as scv
dict_abb = {'Basophils': 'BASO1', 'CD4+ Effector Memory': 'TCEL7', 'Colony Forming Unit-Granulocytes': 'GRAN1',
'Colony Forming Unit-Megakaryocytic': 'MEGA1', 'Colony Forming Unit-Monocytes': 'MONO1',
'Common myeloid progenitors': "CMP", 'Early B cells': "PRE_B2", 'Eosinophils': "EOS2",
'Erythroid_CD34- CD71+ GlyA-': "ERY2", 'Erythroid_CD34- CD71+ GlyA+': "ERY3",
'Erythroid_CD34+ CD71+ GlyA-': "ERY1", 'Erythroid_CD34- CD71lo GlyA+': 'ERY4',
'Granulocyte/monocyte progenitors': "GMP", 'Hematopoietic stem cells_CD133+ CD34dim': "HSC1",
'Hematopoietic stem cells_CD38- CD34+': "HSC2",
'Mature B cells class able to switch': "B_a2", 'Mature B cells class switched': "B_a4",
'Mature NK cells_CD56- CD16- CD3-': "Nka3", 'Monocytes': "MONO2",
'Megakaryocyte/erythroid progenitors': "MEP", 'Myeloid Dendritic Cells': 'mDC', 'Naïve B cells': "B_a1",
'Plasmacytoid Dendritic Cells': "pDC", 'Pro B cells': 'PRE_B3'}
string_ = 'ncomp =' + str(ncomps) + ' knn=' + str(knn) + ' randseed=' + str(v0_random_seed)
# print('ncomp =', ncomps, ' knn=', knn, ' randseed=', v0_random_seed)
print(colored(string_, 'blue'))
nover_labels = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/Nover_Cor_PredFine_notLogNorm.csv')[
'x'].values.tolist()
nover_labels = [dict_abb[i] for i in nover_labels]
for i in list(set(nover_labels)):
print('the population of ', i, 'is ', nover_labels.count(i))
ad = scv.read_loom('/home/shobi/Downloads/Human Hematopoietic Profiling homo_sapiens 2019-11-08 16.12.loom')
print(ad)
# ad = sc.read('/home/shobi/Trajectory/Datasets/HumanCD34/human_cd34_bm_rep1.h5ad')
# ad.obs['nover_label'] = nover_labels
print('start cellrank pipeline', time.ctime())
# scv.utils.show_proportions(ad)
scv.pl.proportions(ad)
scv.pp.filter_and_normalize(ad, min_shared_counts=20, n_top_genes=2000)
sc.tl.pca(ad, n_comps=ncomps)
n_pcs = ncomps
print('npcs', n_pcs, 'knn', knn)
sc.pp.neighbors(ad, n_pcs=n_pcs, n_neighbors=knn)
sc.tl.louvain(ad, key_added='clusters', resolution=1)
scv.pp.moments(ad, n_pcs=n_pcs, n_neighbors=knn)
scv.tl.velocity(ad)
scv.tl.velocity_graph(ad)
scv.pl.velocity_embedding_stream(ad, basis='umap', color='nover_label')
def main_Human(ncomps=80, knn=30, v0_random_seed=7, run_palantir_func=False):
dict_abb = {'Basophils': 'BASO1', 'CD4+ Effector Memory': 'TCEL7', 'Colony Forming Unit-Granulocytes': 'GRAN1',
'Colony Forming Unit-Megakaryocytic': 'MEGA1', 'Colony Forming Unit-Monocytes': 'MONO1',
'Common myeloid progenitors': "CMP", 'Early B cells': "PRE_B2", 'Eosinophils': "EOS2",
'Erythroid_CD34- CD71+ GlyA-': "ERY2", 'Erythroid_CD34- CD71+ GlyA+': "ERY3",
'Erythroid_CD34+ CD71+ GlyA-': "ERY1", 'Erythroid_CD34- CD71lo GlyA+': 'ERY4',
'Granulocyte/monocyte progenitors': "GMP", 'Hematopoietic stem cells_CD133+ CD34dim': "HSC1",
'Hematopoietic stem cells_CD38- CD34+': "HSC2",
'Mature B cells class able to switch': "B_a2", 'Mature B cells class switched': "B_a4",
'Mature NK cells_CD56- CD16- CD3-': "Nka3", 'Monocytes': "MONO2",
'Megakaryocyte/erythroid progenitors': "MEP", 'Myeloid Dendritic Cells': 'mDC (cDC)', 'Naïve B cells': "B_a1",
'Plasmacytoid Dendritic Cells': "pDC", 'Pro B cells': 'PRE_B3'}
#NOTE: Myeloid DCs are now called Conventional Dendritic Cells cDCs
string_ = 'ncomp =' + str(ncomps) + ' knn=' + str(knn) + ' randseed=' + str(v0_random_seed)
# print('ncomp =', ncomps, ' knn=', knn, ' randseed=', v0_random_seed)
print(colored(string_, 'blue'))
nover_labels = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/Nover_Cor_PredFine_notLogNorm.csv')[
'x'].values.tolist()
nover_labels = [dict_abb[i] for i in nover_labels]
for i in list(set(nover_labels)):
print('the population of ', i, 'is ', nover_labels.count(i))
parc53_labels = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/Nover_Cor_Parc53_set1.csv')[
'x'].values.tolist()
parclabels_all = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/parclabels_all_set1.csv')[
'parc'].values.tolist()
parc_dict_nover = {}
for i, c in enumerate(parc53_labels):
parc_dict_nover[i] = dict_abb[c]
parclabels_all = [parc_dict_nover[ll] for ll in parclabels_all]
# print('all', len(parclabels_all))
ad = sc.read(
'/home/shobi/Trajectory/Datasets/HumanCD34/human_cd34_bm_rep1.h5ad')
# 5780 cells x 14651 genes Human Replicate 1. Male african american, 38 years
print('h5ad ad size', ad)
colors = pd.Series(ad.uns['cluster_colors'])
colors['10'] = '#0b128f'
ct_colors = pd.Series(ad.uns['ct_colors'])
list_var_names = ad.var_names
# print(list_var_names)
ad.uns['iroot'] = np.flatnonzero(ad.obs_names == ad.obs['palantir_pseudotime'].idxmin())[0]
print('iroot', np.flatnonzero(ad.obs_names == ad.obs['palantir_pseudotime'].idxmin())[0])
tsne =
|
pd.DataFrame(ad.obsm['tsne'], index=ad.obs_names, columns=['x', 'y'])
|
pandas.DataFrame
|
# Fundamental libraries
import os
import re
import sys
import time
import glob
import random
import datetime
import warnings
import itertools
import numpy as np
import pandas as pd
import pickle as cp
import seaborn as sns
import multiprocessing
from scipy import stats
from pathlib import Path
from ast import literal_eval
import matplotlib.pyplot as plt
from collections import Counter
from argparse import ArgumentParser
os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
warnings.filterwarnings(action="ignore")
# PyTorch, PyTorch.Text, and Lightning-PyTorch methods
import torch
from torch import nn, optim, Tensor
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torchtext.vocab import Vocab
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
# SciKit-Learn methods
from sklearn.metrics import confusion_matrix, accuracy_score, roc_auc_score
from sklearn.preprocessing import LabelEncoder, KBinsDiscretizer, OneHotEncoder, StandardScaler
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.utils import resample
from sklearn.utils.class_weight import compute_class_weight
# TQDM for progress tracking
from tqdm import tqdm
# Custom methods
from classes.datasets import CONCISE_PREDICTOR_SET
from models.CPM import CPM_deep
# Train CPM_deep
def train_CPM_deep(TRAINING_SET,
VAL_SET,
TESTING_SET,
TUNE_IDX,
REPEAT,
FOLD,
OUTPUT_DIR,
BATCH_SIZE,
LEARNING_RATE,
LAYERS,
NEURONS,
DROPOUT,
ES_PATIENCE,
EPOCHS,
CLASS_WEIGHTS,
OUTPUT_ACTIVATION):
"""
Args:
TRAINING_SET (pd.DataFrame)
VAL_SET (pd.DataFrame)
TESTING_SET (pd.DataFrame)
TUNE_IDX (str)
REPEAT (int)
FOLD (int)
OUTPUT_DIR (str): directory to save model outputs
BATCH_SIZE (int): size of minibatches during training
LEARNING_RATE (float): Learning rate for ADAM optimizer
LAYERS (int): number of hidden layers in feed forward neural network
NEURONS (list of length layers): the number of neurons in each layer
DROPOUT (flaot): the proportion of each dense layer dropped out during training
ES_PATIENCE (int): patience during early stopping
EPOCHS (int): maximum epochs during training
CLASS_WEIGHTS (boolean): identifies whether loss should be weighted against class frequency
OUTPUT_ACTIVATION (string): 'softmax' for DeepMN or 'sigmoid' for DeepOR
"""
# Create a directory within current repeat/fold combination to store outputs of current tuning configuration
tune_model_dir = os.path.join(OUTPUT_DIR,'tune_'+TUNE_IDX)
os.makedirs(tune_model_dir,exist_ok = True)
# Create PyTorch Dataset objects
train_Dataset = CONCISE_PREDICTOR_SET(TRAINING_SET,OUTPUT_ACTIVATION)
val_Dataset = CONCISE_PREDICTOR_SET(VAL_SET,OUTPUT_ACTIVATION)
test_Dataset = CONCISE_PREDICTOR_SET(TESTING_SET,OUTPUT_ACTIVATION)
# Create PyTorch DataLoader objects
curr_train_DL = DataLoader(train_Dataset,
batch_size=int(BATCH_SIZE),
shuffle=True)
curr_val_DL = DataLoader(val_Dataset,
batch_size=len(val_Dataset),
shuffle=False)
curr_test_DL = DataLoader(test_Dataset,
batch_size=len(test_Dataset),
shuffle=False)
# Initialize current model class based on hyperparameter selections
model = CPM_deep(train_Dataset.X.shape[1],
LAYERS,
NEURONS,
DROPOUT,
OUTPUT_ACTIVATION,
LEARNING_RATE,
CLASS_WEIGHTS,
train_Dataset.y)
early_stop_callback = EarlyStopping(
monitor='val_AUROC',
patience=ES_PATIENCE,
mode='max'
)
checkpoint_callback = ModelCheckpoint(
monitor='val_AUROC',
dirpath=tune_model_dir,
filename='{epoch:02d}-{val_AUROC:.2f}',
save_top_k=1,
mode='max'
)
csv_logger = pl.loggers.CSVLogger(save_dir=OUTPUT_DIR,name='tune_'+TUNE_IDX)
trainer = pl.Trainer(logger = csv_logger,
max_epochs = EPOCHS,
enable_progress_bar = False,
enable_model_summary = False,
callbacks=[early_stop_callback,checkpoint_callback])
trainer.fit(model,curr_train_DL,curr_val_DL)
best_model = CPM_deep.load_from_checkpoint(checkpoint_callback.best_model_path)
best_model.eval()
# Save validation set probabilities
for i, (x,y) in enumerate(curr_val_DL):
yhat = best_model(x)
val_true_y = y.cpu().numpy()
if OUTPUT_ACTIVATION == 'softmax':
curr_val_probs = F.softmax(yhat.detach()).cpu().numpy()
curr_val_preds = pd.DataFrame(curr_val_probs,columns=['Pr(GOSE=1)','Pr(GOSE=2/3)','Pr(GOSE=4)','Pr(GOSE=5)','Pr(GOSE=6)','Pr(GOSE=7)','Pr(GOSE=8)'])
curr_val_preds['TrueLabel'] = val_true_y
elif OUTPUT_ACTIVATION == 'sigmoid':
curr_val_probs = F.sigmoid(yhat.detach()).cpu().numpy()
curr_val_probs = pd.DataFrame(curr_val_probs,columns=['Pr(GOSE>1)','Pr(GOSE>3)','Pr(GOSE>4)','Pr(GOSE>5)','Pr(GOSE>6)','Pr(GOSE>7)'])
curr_val_labels = pd.DataFrame(val_true_y,columns=['GOSE>1','GOSE>3','GOSE>4','GOSE>5','GOSE>6','GOSE>7'])
curr_val_preds = pd.concat([curr_val_probs,curr_val_labels],axis = 1)
else:
raise ValueError("Invalid output layer type. Must be 'softmax' or 'sigmoid'")
curr_val_preds.insert(loc=0, column='GUPI', value=VAL_SET.GUPI.values)
curr_val_preds['TUNE_IDX'] = TUNE_IDX
curr_val_preds.to_csv(os.path.join(tune_model_dir,'val_predictions.csv'),index=False)
best_model.eval()
# Save testing set probabilities
for i, (x,y) in enumerate(curr_test_DL):
yhat = best_model(x)
test_true_y = y.cpu().numpy()
if OUTPUT_ACTIVATION == 'softmax':
curr_test_probs = F.softmax(yhat.detach()).cpu().numpy()
curr_test_preds = pd.DataFrame(curr_test_probs,columns=['Pr(GOSE=1)','Pr(GOSE=2/3)','Pr(GOSE=4)','Pr(GOSE=5)','Pr(GOSE=6)','Pr(GOSE=7)','Pr(GOSE=8)'])
curr_test_preds['TrueLabel'] = test_true_y
elif OUTPUT_ACTIVATION == 'sigmoid':
curr_test_probs = F.sigmoid(yhat.detach()).cpu().numpy()
curr_test_probs = pd.DataFrame(curr_test_probs,columns=['Pr(GOSE>1)','Pr(GOSE>3)','Pr(GOSE>4)','Pr(GOSE>5)','Pr(GOSE>6)','Pr(GOSE>7)'])
curr_test_labels = pd.DataFrame(test_true_y,columns=['GOSE>1','GOSE>3','GOSE>4','GOSE>5','GOSE>6','GOSE>7'])
curr_test_preds = pd.concat([curr_test_probs,curr_test_labels],axis = 1)
else:
raise ValueError("Invalid output layer type. Must be 'softmax' or 'sigmoid'")
curr_test_preds.insert(loc=0, column='GUPI', value=TESTING_SET.GUPI.values)
curr_test_preds['TUNE_IDX'] = TUNE_IDX
curr_test_preds.to_csv(os.path.join(tune_model_dir,'test_predictions.csv'),index=False)
# Functions to collect validation metrics from files in parallel
def collect_val_metrics(
csv_file_list,
n_cores,
progress_bar=True,
progress_bar_desc=''):
# Establish sizes of files for each core
sizes = [len(csv_file_list) // n_cores for _ in range(n_cores)]
sizes[:(len(csv_file_list) - sum(sizes))] = [val+1 for val in sizes[:(len(csv_file_list) - sum(sizes))]]
end_indices = np.cumsum(sizes)
start_indices = np.insert(end_indices[:-1],0,0)
# Build arguments for metric sub-functions
arg_iterable = [(
csv_file_list[start_indices[idx]:end_indices[idx]],
progress_bar,
progress_bar_desc)
for idx in range(len(start_indices))]
# Run metric sub-function in parallel
with multiprocessing.Pool(n_cores) as pool:
result = pool.starmap(_val_metric_par, arg_iterable)
return
|
pd.concat(result, ignore_index=True)
|
pandas.concat
|
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
sns.set()
class ReportBuilder:
def __init__(self, data=None, file_path=None):
self.categorical_columns = []
self.numerical_columns = []
if data is not None:
self.data = data
self.set_categorical_columns()
self.set_numerical_columns()
if file_path:
self.read_csv(file_path)
def read_csv(self, file_path=None):
self.data =
|
pd.read_csv(file_path, encoding='iso-8859-1')
|
pandas.read_csv
|
from functools import partial
import json
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pytest
from solarforecastarbiter.io import utils
# data for test Dataframe
TEST_DICT = {'value': [2.0, 43.9, 338.0, -199.7, 0.32],
'quality_flag': [1, 1, 9, 5, 2]}
DF_INDEX = pd.date_range(start=pd.Timestamp('2019-01-24T00:00'),
freq='1min',
periods=5,
tz='UTC', name='timestamp')
DF_INDEX.freq = None
TEST_DATA = pd.DataFrame(TEST_DICT, index=DF_INDEX)
EMPTY_SERIES = pd.Series(dtype=float)
EMPTY_TIMESERIES = pd.Series([], name='value', index=pd.DatetimeIndex(
[], name='timestamp', tz='UTC'), dtype=float)
EMPTY_DATAFRAME = pd.DataFrame(dtype=float)
EMPTY_TIME_DATAFRAME = pd.DataFrame([], index=pd.DatetimeIndex(
[], name='timestamp', tz='UTC'), dtype=float)
TEST_DATAFRAME = pd.DataFrame({
'25.0': [0.0, 1, 2, 3, 4, 5],
'50.0': [1.0, 2, 3, 4, 5, 6],
'75.0': [2.0, 3, 4, 5, 6, 7]},
index=pd.date_range(start='20190101T0600',
end='20190101T1100',
freq='1h',
tz='America/Denver',
name='timestamp')).tz_convert('UTC')
@pytest.mark.parametrize('dump_quality,default_flag,flag_value', [
(False, None, 1),
(True, 2, 2)
])
def test_obs_df_to_json(dump_quality, default_flag, flag_value):
td = TEST_DATA.copy()
if dump_quality:
del td['quality_flag']
converted = utils.observation_df_to_json_payload(td, default_flag)
converted_dict = json.loads(converted)
assert 'values' in converted_dict
values = converted_dict['values']
assert len(values) == 5
assert values[0]['timestamp'] == '2019-01-24T00:00:00Z'
assert values[0]['quality_flag'] == flag_value
assert isinstance(values[0]['value'], float)
def test_obs_df_to_json_no_quality():
td = TEST_DATA.copy()
del td['quality_flag']
with pytest.raises(KeyError):
utils.observation_df_to_json_payload(td)
def test_obs_df_to_json_no_values():
td = TEST_DATA.copy().rename(columns={'value': 'val1'})
with pytest.raises(KeyError):
utils.observation_df_to_json_payload(td)
def test_forecast_series_to_json():
series = pd.Series([0, 1, 2, 3, 4], index=pd.date_range(
start='2019-01-01T12:00Z', freq='5min', periods=5))
expected = [{'value': 0.0, 'timestamp': '2019-01-01T12:00:00Z'},
{'value': 1.0, 'timestamp': '2019-01-01T12:05:00Z'},
{'value': 2.0, 'timestamp': '2019-01-01T12:10:00Z'},
{'value': 3.0, 'timestamp': '2019-01-01T12:15:00Z'},
{'value': 4.0, 'timestamp': '2019-01-01T12:20:00Z'}]
json_out = utils.forecast_object_to_json(series)
assert json.loads(json_out)['values'] == expected
def test_json_payload_to_observation_df(observation_values,
observation_values_text):
out = utils.json_payload_to_observation_df(
json.loads(observation_values_text))
pdt.assert_frame_equal(out, observation_values)
def test_json_payload_to_forecast_series(forecast_values,
forecast_values_text):
out = utils.json_payload_to_forecast_series(
json.loads(forecast_values_text))
pdt.assert_series_equal(out, forecast_values)
def test_empty_payload_to_obsevation_df():
out = utils.json_payload_to_observation_df({'values': []})
assert set(out.columns) == {'value', 'quality_flag'}
assert isinstance(out.index, pd.DatetimeIndex)
def test_empty_payload_to_forecast_series():
out = utils.json_payload_to_forecast_series({'values': []})
assert isinstance(out.index, pd.DatetimeIndex)
def test_null_json_payload_to_observation_df():
observation_values_text = b"""
{
"_links": {
"metadata": ""
},
"observation_id": "OBSID",
"values": [
{
"quality_flag": 1,
"timestamp": "2019-01-01T12:00:00-0700",
"value": null
},
{
"quality_flag": 1,
"timestamp": "2019-01-01T12:05:00-0700",
"value": null
}
]
}"""
ind = pd.DatetimeIndex([
pd.Timestamp("2019-01-01T19:00:00Z"),
pd.Timestamp("2019-01-01T19:05:00Z")
], name='timestamp')
observation_values = pd.DataFrame({
'value': pd.Series([None, None], index=ind, dtype=float),
'quality_flag': pd.Series([1, 1], index=ind)
})
out = utils.json_payload_to_observation_df(
json.loads(observation_values_text))
pdt.assert_frame_equal(out, observation_values)
def test_null_json_payload_to_forecast_series():
forecast_values_text = b"""
{
"_links": {
"metadata": ""
},
"forecast_id": "OBSID",
"values": [
{
"timestamp": "2019-01-01T12:00:00-0700",
"value": null
},
{
"timestamp": "2019-01-01T12:05:00-0700",
"value": null
}
]
}"""
ind = pd.DatetimeIndex([
pd.Timestamp("2019-01-01T19:00:00Z"),
pd.Timestamp("2019-01-01T19:05:00Z")
], name='timestamp')
forecast_values = pd.Series([None, None], index=ind, dtype=float,
name='value')
out = utils.json_payload_to_forecast_series(
json.loads(forecast_values_text))
pdt.assert_series_equal(out, forecast_values)
@pytest.mark.parametrize('label,exp,start,end', [
('instant', TEST_DATA, None, None),
(None, TEST_DATA, None, None),
('ending', TEST_DATA.iloc[1:], None, None),
('beginning', TEST_DATA.iloc[:-1], None, None),
pytest.param('er', TEST_DATA, None, None,
marks=pytest.mark.xfail(raises=ValueError)),
# start/end outside data
('ending', TEST_DATA,
|
pd.Timestamp('20190123T2300Z')
|
pandas.Timestamp
|
# -*- coding: utf-8 -*-
import unittest
import pandas
from pipesnake.pipe import SeriesPipe
from pipesnake.transformers.imputer import KnnImputer
from pipesnake.transformers.imputer import ReplaceImputer
from pipesnake.transformers.selector import ColumnSelector
class TestImputer(unittest.TestCase):
def test_replace_imputer(self):
from _data import x_nan
from _data import y_nan
pipe = SeriesPipe(transformers=[
ColumnSelector(x_cols=['x_1', 'x_2', 'x_3', 'x_4'], y_cols=['y_1', 'y_2']),
ReplaceImputer(x_cols='all', y_cols='all'),
])
x_new, y_new = pipe.fit_transform(x_nan, y_nan)
self.assertEqual(pandas.isnull(x_new).any().any(), False, 'NaN values has been found in x_new')
self.assertEqual(pandas.isnull(y_new).any().any(), False, 'NaN values has been found in y_new')
del x_nan, y_nan, x_new, y_new
def test_knn_imputer(self):
from _data import x_nan
from _data import y_nan
imputer = KnnImputer(x_cols='all', y_cols='all')
x_new, y_new = imputer.fit_transform(x_nan, y_nan)
self.assertEqual(
|
pandas.isnull(x_new)
|
pandas.isnull
|
from bs4 import BeautifulSoup
import pandas as pd
import os
folderPath = input(
'Please Paste here the Absolute Path to the Folder not to the HTML File as you done before \n and dont forgot to replace \ with / in the path \n > ')
for root, dirs, files in os.walk(folderPath, topdown=True):
for fileName in files:
if fileName.endswith(".html"):
soup = BeautifulSoup(
open(os.path.join(root, fileName)).read(), 'lxml')
one_list = []
sec_list = []
third_list = []
fourth_list = []
fifth_list = []
sixth_list = []
sev_list = []
eight_list = []
nine_list = []
ten_list = []
elev_list = []
twe_list = []
thir_list = []
fourteen_list = []
fif_list = []
six_list = []
seven_list = []
eighteen_list = []
ninteen_list = []
print('Scraping Required Information Please Wait...')
for a in soup.select('.inActiveLink .grayBorder:nth-child(1)'):
one = a.text
one_list.append(one)
for row in soup.select('.inActiveLink .grayBorder:nth-child(4)'):
sec = row.text
sec_list.append(sec)
for b in soup.select('.inActiveLink .grayBorder:nth-child(5)'):
third = b.text
third_list.append(third)
for c in soup.select('.inActiveLink .grayBorder:nth-child(6)'):
four = c.text
fourth_list.append(four)
for d in soup.select('.inActiveLink .grayBorder:nth-child(7)'):
five = d.text
fifth_list.append(five)
for e in soup.select('.inActiveLink .grayBorder:nth-child(8)'):
sixth = e.text
sixth_list.append(sixth)
for f in soup.select('.inActiveLink .grayBorder:nth-child(9)'):
sev = f.text
sev_list.append(sev)
for g in soup.select('.inActiveLink .grayBorder:nth-child(10)'):
ei = g.text
eight_list.append(ei)
for h in soup.select('.inActiveLink .grayBorder:nth-child(11)'):
ni = h.text
nine_list.append(ni)
for i in soup.select('.inActiveLink .grayBorder:nth-child(12)'):
te = i.text
ten_list.append(te)
for j in soup.select('.inActiveLink .grayBorder:nth-child(13)'):
el = j.text
elev_list.append(el)
for k in soup.select('.inActiveLink .grayBorder:nth-child(14)'):
twe = k.text
twe_list.append(twe)
for l in soup.select('.inActiveLink .grayBorder:nth-child(15)'):
th = l.text
thir_list.append(th)
for m in soup.select('.inActiveLink .grayBorder:nth-child(16)'):
fo = m.text
fourteen_list.append(fo)
for n in soup.select('.inActiveLink .grayBorder:nth-child(17)'):
fi = n.text
fif_list.append(fi)
for o in soup.select('.inActiveLink .grayBorder:nth-child(18)'):
sixteen = o.text
six_list.append(sixteen)
for p in soup.select('.inActiveLink .grayBorder:nth-child(19)'):
seven = p.text
seven_list.append(seven)
for q in soup.select('.inActiveLink .grayBorder:nth-child(20)'):
eighteen = q.text
eighteen_list.append(eighteen)
for r in soup.select('.inActiveLink .grayBorder:nth-child(21)'):
niniteen = r.text
ninteen_list.append(niniteen)
print()
print('Scraping Done')
data = {
"'מס": one_list,
"פע'": sec_list,
"שם": third_list,
"ישוב": fourth_list,
"שכונה": fifth_list,
"כתובת": sixth_list,
"חדרים": sev_list,
"סוג": eight_list,
"שטח": nine_list,
"קומה": ten_list,
"מחיר": elev_list,
"טלפון": twe_list,
"תאריך עדכון מערכת": thir_list,
"שעת עדכון מערכת": fourteen_list,
"ת. עדכון": fif_list,
"מדיה": six_list,
"מעלית": seven_list,
"הערות": eighteen_list,
"תאריך רישום במערכת": ninteen_list
}
print()
Output_File = input('Please Enter the Name of File(Output): > ')
df =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
import os
import torch.nn as nn
import torch
import warnings
import argparse
from Logger import *
import pickle
from Dataset import *
warnings.filterwarnings("ignore")
from Functions import *
from Network import *
import pandas as pd
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--gpu_id', type=str, default='0', help='which gpu to use')
parser.add_argument('--path', type=str, default='../', help='path of csv file with DNA sequences and labels')
parser.add_argument('--epochs', type=int, default=150, help='number of epochs to train')
parser.add_argument('--batch_size', type=int, default=24, help='size of each batch during training')
parser.add_argument('--weight_decay', type=float, default=0, help='weight dacay used in optimizer')
parser.add_argument('--ntoken', type=int, default=4, help='number of tokens to represent DNA nucleotides (should always be 4)')
parser.add_argument('--nclass', type=int, default=919, help='number of classes from the linear decoder')
parser.add_argument('--ninp', type=int, default=512, help='ninp for transformer encoder')
parser.add_argument('--nhead', type=int, default=8, help='nhead for transformer encoder')
parser.add_argument('--nhid', type=int, default=2048, help='nhid for transformer encoder')
parser.add_argument('--nlayers', type=int, default=6, help='nlayers for transformer encoder')
parser.add_argument('--save_freq', type=int, default=1, help='saving checkpoints per save_freq epochs')
parser.add_argument('--dropout', type=float, default=.1, help='transformer dropout')
parser.add_argument('--warmup_steps', type=int, default=3200, help='training schedule warmup steps')
parser.add_argument('--lr_scale', type=float, default=1, help='learning rate scale')
parser.add_argument('--nmute', type=int, default=18, help='number of mutations during training')
parser.add_argument('--kmers', type=int, nargs='+', default=[7], help='k-mers to be aggregated')
#parser.add_argument('--kmer_aggregation', type=bool, default=True, help='k-mers to be aggregated')
parser.add_argument('--kmer_aggregation', dest='kmer_aggregation', action='store_true')
parser.add_argument('--no_kmer_aggregation', dest='kmer_aggregation', action='store_false')
parser.set_defaults(kmer_aggregation=True)
parser.add_argument('--nfolds', type=int, default=5, help='number of cross validation folds')
parser.add_argument('--fold', type=int, default=0, help='which fold to train')
parser.add_argument('--val_freq', type=int, default=1, help='which fold to train')
parser.add_argument('--num_workers', type=int, default=1, help='num_workers')
opts = parser.parse_args()
return opts
#def train_fold():
args=get_args()
# DECLARE HOW MANY GPUS YOU WISH TO USE.
# KAGGLE ONLY HAS 1, BUT OFFLINE, YOU CAN USE MORE
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_id #0,1,2,3 for four gpu
if torch.cuda.device_count() > 1:
DATAPARALLEL=True
else:
DATAPARALLEL=False
# print(torch.cuda.device_count())
#
# exit()
# VERSION FOR SAVING MODEL WEIGHTS
VER=26
# IF VARIABLE IS NONE, THEN NOTEBOOK COMPUTES TOKENS
# OTHERWISE NOTEBOOK LOADS TOKENS FROM PATH
LOAD_TOKENS_FROM = '../../input/py-bigbird-v26'
# IF VARIABLE IS NONE, THEN NOTEBOOK TRAINS A NEW MODEL
# OTHERWISE IT LOADS YOUR PREVIOUSLY TRAINED MODEL
#LOAD_MODEL_FROM = '../input/whitespace'
LOAD_MODEL_FROM = None
# IF FOLLOWING IS NONE, THEN NOTEBOOK
# USES INTERNET AND DOWNLOADS HUGGINGFACE
# CONFIG, TOKENIZER, AND MODEL
DOWNLOADED_MODEL_PATH = "../../input/deberta-xlarge"
if DOWNLOADED_MODEL_PATH is None:
DOWNLOADED_MODEL_PATH = 'model'
MODEL_NAME = 'microsoft/deberta-xlarge'
#MODEL_NAME = "google/bigbird-roberta-large"
from torch import cuda
config = {'model_name': MODEL_NAME,
'max_length': 1536,
'train_batch_size':2,
'valid_batch_size':1,
'epochs':7,
'learning_rates': [2.5e-5, 2.5e-5, 2.5e-5, 2.5e-6, 2.5e-6, 2.5e-6, 2.5e-7],
'max_grad_norm':1,
'device': 'cuda' if cuda.is_available() else 'cpu'}
config['learning_rates']=[lr*args.lr_scale for lr in config['learning_rates']]
print('learning_rates:')
print(config['learning_rates'])
#lr_scale
# THIS WILL COMPUTE VAL SCORE DURING COMMIT BUT NOT DURING SUBMIT
COMPUTE_VAL_SCORE = True
if len( os.listdir('../../input/test') )>5:
COMPUTE_VAL_SCORE = False
from transformers import *
if DOWNLOADED_MODEL_PATH == 'model':
os.system('mkdir model')
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, add_prefix_space=True)
tokenizer.save_pretrained('model')
config_model = AutoConfig.from_pretrained(MODEL_NAME)
config_model.num_labels = 15
config_model.save_pretrained('model')
backbone = AutoModelForTokenClassification.from_pretrained(MODEL_NAME,
config=config_model)
backbone.save_pretrained('model')
#load data and libraries
import numpy as np, os
from scipy import stats
import pandas as pd, gc
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModelForTokenClassification
from torch.utils.data import Dataset, DataLoader
import torch
from sklearn.metrics import accuracy_score
train_df = pd.read_csv('../../input/train.csv')
print( train_df.shape )
train_df.head()
# https://www.kaggle.com/raghavendrakotala/fine-tunned-on-roberta-base-as-ner-problem-0-533
test_names, test_texts = [], []
for f in list(os.listdir('../../input/test')):
test_names.append(f.replace('.txt', ''))
test_texts.append(open('../../input/test/' + f, 'r').read())
test_texts = pd.DataFrame({'id': test_names, 'text': test_texts})
print(test_texts.head())
# https://www.kaggle.com/raghavendrakotala/fine-tunned-on-roberta-base-as-ner-problem-0-533
test_names, train_texts = [], []
for f in tqdm(list(os.listdir('../../input/train'))):
test_names.append(f.replace('.txt', ''))
train_texts.append(open('../../input/train/' + f, 'r').read())
train_text_df = pd.DataFrame({'id': test_names, 'text': train_texts})
print(train_text_df.head())
#convert train to text labels
if not LOAD_TOKENS_FROM:
all_entities = []
for ii,i in tqdm(enumerate(train_text_df.iterrows())):
if ii%100==0: print(ii,', ',end='')
total = i[1]['text'].split().__len__()
entities = ["O"]*total
for j in train_df[train_df['id'] == i[1]['id']].iterrows():
discourse = j[1]['discourse_type']
list_ix = [int(x) for x in j[1]['predictionstring'].split(' ')]
entities[list_ix[0]] = f"B-{discourse}"
for k in list_ix[1:]: entities[k] = f"I-{discourse}"
all_entities.append(entities)
train_text_df['entities'] = all_entities
train_text_df.to_csv('train_NER.csv',index=False)
else:
from ast import literal_eval
train_text_df =
|
pd.read_csv(f'{LOAD_TOKENS_FROM}/train_NER.csv')
|
pandas.read_csv
|
import pandas as pd
import re
def chain(tracks_csv="../data/raw/tracks.csv", features_csv="../data/raw/features.csv"):
"""
Method chain function to load and process tracks and features data for analysis 1 (Will)
"""
tracks =
|
pd.read_csv(tracks_csv, low_memory=False)
|
pandas.read_csv
|
#! /usr/bin/env python
import pandas as pd
import matplotlib.pyplot as plt
import glob, os
# The only thing that needs to be changed when running this script is the variable 'path_in'
# The script will go to that directory, read in all text files with the .txt extension
# It will then parse the files, and write out 3 csv files
# These files will then be read in by the script, and it will plot the graphs
path_in = 'C:/Users\isle132/Desktop/For Python/140 nm Silica Data/173 deg/Cell Center'
os.chdir(path_in) # go to the path specified
# This will loop over all files with the .txt extension in the directory specified
# It will take the prefix for the file, and make a .Diameter.csv, a .correlation.csv and a .residual.csv
# These are extracted from the data in the .txt file
for file in glob.glob('*.txt'):
exp_data_prefix = os.path.splitext(file)[0]
exp_data_file = file
file_in_name_path = path_in+'/'+exp_data_file
file_diameter_out_name_path = path_in+'/'+exp_data_prefix+'.Diameter.csv'
file_correlation_out_name_path = path_in+'/'+exp_data_prefix+'.correlation.csv'
file_residual_out_name_path = path_in+'/'+exp_data_prefix+'.residual.csv'
with open(file_in_name_path, 'r') as f_in, open(file_diameter_out_name_path, 'w') as f_dia_out, \
open(file_residual_out_name_path, 'w') as f_res_out, open(file_correlation_out_name_path, 'w') as f_cor_out:
copy_dia = False
copy_res = False
copy_cor = False
for line in f_in:
if line.strip() == 'Diameter (nm),Frequency (%),Undersize (%)':
copy_dia = True
elif line.strip() == '':
copy_dia = False
copy_res = False
copy_cor = False
elif line.strip() == 'Delay Time (µs),Residual':
copy_res = True
elif line.strip() == 'Delay Time (µs),Correlation,Fitting Function':
copy_cor = True
if copy_dia:
f_dia_out.write(line)
elif copy_res:
f_res_out.write(line)
elif copy_cor:
f_cor_out.write(line)
# Import the data from the .Diameter.csv file
# Plot the diameter vs frequency
dia_files = glob.glob('*.Diameter.csv')
dia_dfs = [pd.read_csv(dia_fp).assign(Data=os.path.splitext(os.path.splitext(dia_fp)[0])[0]) for dia_fp in dia_files]
dia_df = pd.concat(dia_dfs, ignore_index=True)
dia_groups = dia_df.groupby('Data')
fig1, ax1 = plt.subplots()
ax1.margins(0.05)
ax1.set_xscale('log')
ax1.set_xlim(dia_df['Diameter (nm)'].min(), dia_df['Diameter (nm)'].max())
for name, group in dia_groups:
ax1.plot(group['Diameter (nm)'], group['Frequency (%)'], label=name)
ax1.legend(loc='upper right')
# Import the data from the .residual.csv file
# Plot the Delay time vs Residual
res_files = glob.glob('*.residual.csv')
res_dfs = [pd.read_csv(res_fp, encoding="ISO-8859-1").assign(Data=os.path.splitext(os.path.splitext(res_fp)[0])[0])
for res_fp in res_files]
res_df =
|
pd.concat(res_dfs, ignore_index=True)
|
pandas.concat
|
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from datetime import datetime
import operator
import nose
from functools import wraps
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex
from pandas.core.datetools import bday
from pandas.core.nanops import nanall, nanany
from pandas.core.panel import Panel
from pandas.core.series import remove_na
import pandas.core.common as com
from pandas import compat
from pandas.compat import range, lrange, StringIO, OrderedDict, signature
from pandas import SparsePanel
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
assert_produces_warning, ensure_clean,
assertRaisesRegexp, makeCustomDataframe as
mkdf, makeMixedDataFrame)
import pandas.core.panel as panelm
import pandas.util.testing as tm
def ignore_sparse_panel_future_warning(func):
"""
decorator to ignore FutureWarning if we have a SparsePanel
can be removed when SparsePanel is fully removed
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if isinstance(self.panel, SparsePanel):
with assert_produces_warning(FutureWarning,
check_stacklevel=False):
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return wrapper
class PanelTests(object):
panel = None
def test_pickle(self):
unpickled = self.round_trip_pickle(self.panel)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_rank(self):
self.assertRaises(NotImplementedError, lambda: self.panel.rank())
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
self.assertRaises(TypeError, hash, c_empty)
self.assertRaises(TypeError, hash, c)
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
def test_repr(self):
repr(self.panel)
@ignore_sparse_panel_future_warning
def test_copy_names(self):
for attr in ('major_axis', 'minor_axis'):
getattr(self.panel, attr).name = None
cp = self.panel.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.panel, attr).name)
def test_iter(self):
tm.equalContents(list(self.panel), self.panel.items)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel
# # set some NAs
# obj.ix[5:10] = np.nan
# obj.ix[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_frame_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
# Unimplemented numeric_only parameter.
if 'numeric_only' in signature(f).args:
self.assertRaisesRegexp(NotImplementedError, name, f,
numeric_only=True)
class SafeForSparse(object):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def test_get_axis(self):
assert (self.panel._get_axis(0) is self.panel.items)
assert (self.panel._get_axis(1) is self.panel.major_axis)
assert (self.panel._get_axis(2) is self.panel.minor_axis)
def test_set_axis(self):
new_items = Index(np.arange(len(self.panel.items)))
new_major = Index(np.arange(len(self.panel.major_axis)))
new_minor = Index(np.arange(len(self.panel.minor_axis)))
# ensure propagate to potentially prior-cached items too
item = self.panel['ItemA']
self.panel.items = new_items
if hasattr(self.panel, '_item_cache'):
self.assertNotIn('ItemA', self.panel._item_cache)
self.assertIs(self.panel.items, new_items)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.major_axis = new_major
self.assertIs(self.panel[0].index, new_major)
self.assertIs(self.panel.major_axis, new_major)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.minor_axis = new_minor
self.assertIs(self.panel[0].columns, new_minor)
self.assertIs(self.panel.minor_axis, new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel._get_axis_number('items'), 0)
self.assertEqual(self.panel._get_axis_number('major'), 1)
self.assertEqual(self.panel._get_axis_number('minor'), 2)
def test_get_axis_name(self):
self.assertEqual(self.panel._get_axis_name(0), 'items')
self.assertEqual(self.panel._get_axis_name(1), 'major_axis')
self.assertEqual(self.panel._get_axis_name(2), 'minor_axis')
def test_get_plane_axes(self):
# what to do here?
index, columns = self.panel._get_plane_axes('items')
index, columns = self.panel._get_plane_axes('major_axis')
index, columns = self.panel._get_plane_axes('minor_axis')
index, columns = self.panel._get_plane_axes(0)
@ignore_sparse_panel_future_warning
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end, axis='major')
expected = self.panel['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start, axis='major')
expected = self.panel['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end, axis='major')
expected = self.panel['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
# XXX test other axes
def test_arith(self):
self._test_op(self.panel, operator.add)
self._test_op(self.panel, operator.sub)
self._test_op(self.panel, operator.mul)
self._test_op(self.panel, operator.truediv)
self._test_op(self.panel, operator.floordiv)
self._test_op(self.panel, operator.pow)
self._test_op(self.panel, lambda x, y: y + x)
self._test_op(self.panel, lambda x, y: y - x)
self._test_op(self.panel, lambda x, y: y * x)
self._test_op(self.panel, lambda x, y: y / x)
self._test_op(self.panel, lambda x, y: y ** x)
self._test_op(self.panel, lambda x, y: x + y) # panel + 1
self._test_op(self.panel, lambda x, y: x - y) # panel - 1
self._test_op(self.panel, lambda x, y: x * y) # panel * 1
self._test_op(self.panel, lambda x, y: x / y) # panel / 1
self._test_op(self.panel, lambda x, y: x ** y) # panel ** 1
self.assertRaises(Exception, self.panel.__add__, self.panel['ItemA'])
@staticmethod
def _test_op(panel, op):
result = op(panel, 1)
assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_keys(self):
tm.equalContents(list(self.panel.keys()), self.panel.items)
def test_iteritems(self):
# Test panel.iteritems(), aka panel.iteritems()
# just test that it works
for k, v in self.panel.iteritems():
pass
self.assertEqual(len(list(self.panel.iteritems())),
len(self.panel.items))
@ignore_sparse_panel_future_warning
def test_combineFrame(self):
def check_op(op, name):
# items
df = self.panel['ItemA']
func = getattr(self.panel, name)
result = func(df, axis='items')
assert_frame_equal(result['ItemB'], op(self.panel['ItemB'], df))
# major
xs = self.panel.major_xs(self.panel.major_axis[0])
result = func(xs, axis='major')
idx = self.panel.major_axis[1]
assert_frame_equal(result.major_xs(idx),
op(self.panel.major_xs(idx), xs))
# minor
xs = self.panel.minor_xs(self.panel.minor_axis[0])
result = func(xs, axis='minor')
idx = self.panel.minor_axis[1]
assert_frame_equal(result.minor_xs(idx),
op(self.panel.minor_xs(idx), xs))
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv']
if not compat.PY3:
ops.append('div')
# pow, mod not supported for SparsePanel as flex ops (for now)
if not isinstance(self.panel, SparsePanel):
ops.extend(['pow', 'mod'])
else:
idx = self.panel.minor_axis[1]
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.pow(self.panel.minor_xs(idx), axis='minor')
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.mod(self.panel.minor_xs(idx), axis='minor')
for op in ops:
try:
check_op(getattr(operator, op), op)
except:
com.pprint_thing("Failing operation: %r" % op)
raise
if compat.PY3:
try:
check_op(operator.truediv, 'div')
except:
com.pprint_thing("Failing operation: %r" % 'div')
raise
@ignore_sparse_panel_future_warning
def test_combinePanel(self):
result = self.panel.add(self.panel)
self.assert_panel_equal(result, self.panel * 2)
@ignore_sparse_panel_future_warning
def test_neg(self):
self.assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=pd.date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).ix[0]
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'div', 'mod', 'pow']
for op in ops:
with self.assertRaises(NotImplementedError):
getattr(p, op)(d, axis=0)
@ignore_sparse_panel_future_warning
def test_select(self):
p = self.panel
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
self.assert_panel_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
self.assert_panel_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=2)
expected = p.reindex(minor=['A', 'D'])
self.assert_panel_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo', ), axis='items')
self.assert_panel_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
@ignore_sparse_panel_future_warning
def test_abs(self):
result = self.panel.abs()
result2 = abs(self.panel)
expected = np.abs(self.panel)
self.assert_panel_equal(result, expected)
self.assert_panel_equal(result2, expected)
df = self.panel['ItemA']
result = df.abs()
result2 = abs(df)
expected = np.abs(df)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
s = df['A']
result = s.abs()
result2 = abs(s)
expected = np.abs(s)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertEqual(result.name, 'A')
self.assertEqual(result2.name, 'A')
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
self.assertRaises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
expected = self.panel['ItemA']
result = self.panel.pop('ItemA')
assert_frame_equal(expected, result)
self.assertNotIn('ItemA', self.panel.items)
del self.panel['ItemB']
self.assertNotIn('ItemB', self.panel.items)
self.assertRaises(Exception, self.panel.__delitem__, 'ItemB')
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
assert_frame_equal(panelc[0], panel[0])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# LongPanel with one item
lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
with tm.assertRaises(ValueError):
self.panel['ItemE'] = lp
# DataFrame
df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
self.panel['ItemF'] = df
self.panel['ItemE'] = df
df2 = self.panel['ItemF']
assert_frame_equal(df, df2.reindex(index=df.index, columns=df.columns))
# scalar
self.panel['ItemG'] = 1
self.panel['ItemE'] = True
self.assertEqual(self.panel['ItemG'].values.dtype, np.int64)
self.assertEqual(self.panel['ItemE'].values.dtype, np.bool_)
# object dtype
self.panel['ItemQ'] = 'foo'
self.assertEqual(self.panel['ItemQ'].values.dtype, np.object_)
# boolean dtype
self.panel['ItemP'] = self.panel['ItemA'] > 0
self.assertEqual(self.panel['ItemP'].values.dtype, np.bool_)
self.assertRaises(TypeError, self.panel.__setitem__, 'foo',
self.panel.ix[['ItemP']])
# bad shape
p = Panel(np.random.randn(4, 3, 2))
with tm.assertRaisesRegexp(ValueError,
"shape of value must be \(3, 2\), "
"shape of given object was \(4, 2\)"):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
from pandas import date_range, datetools
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=datetools.MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1': df1, 'Item2': df2})
newminor = notnull(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'],
newminor.astype(object))
newmajor = notnull(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :],
newmajor.astype(object))
def test_major_xs(self):
ref = self.panel['ItemA']
idx = self.panel.major_axis[5]
xs = self.panel.major_xs(idx)
result = xs['ItemA']
assert_series_equal(result, ref.xs(idx), check_names=False)
self.assertEqual(result.name, 'ItemA')
# not contained
idx = self.panel.major_axis[0] - bday
self.assertRaises(Exception, self.panel.major_xs, idx)
def test_major_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.major_xs(self.panel.major_axis[0])
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_minor_xs(self):
ref = self.panel['ItemA']
idx = self.panel.minor_axis[1]
xs = self.panel.minor_xs(idx)
assert_series_equal(xs['ItemA'], ref[idx], check_names=False)
# not contained
self.assertRaises(Exception, self.panel.minor_xs, 'E')
def test_minor_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.minor_xs('D')
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_xs(self):
itemA = self.panel.xs('ItemA', axis=0)
expected = self.panel['ItemA']
assert_frame_equal(itemA, expected)
# get a view by default
itemA_view = self.panel.xs('ItemA', axis=0)
itemA_view.values[:] = np.nan
self.assertTrue(np.isnan(self.panel['ItemA'].values).all())
# mixed-type yields a copy
self.panel['strings'] = 'foo'
result = self.panel.xs('D', axis=2)
self.assertIsNotNone(result.is_copy)
def test_getitem_fancy_labels(self):
p = self.panel
items = p.items[[1, 0]]
dates = p.major_axis[::2]
cols = ['D', 'C', 'F']
# all 3 specified
assert_panel_equal(p.ix[items, dates, cols],
p.reindex(items=items, major=dates, minor=cols))
# 2 specified
assert_panel_equal(p.ix[:, dates, cols],
p.reindex(major=dates, minor=cols))
assert_panel_equal(p.ix[items, :, cols],
p.reindex(items=items, minor=cols))
assert_panel_equal(p.ix[items, dates, :],
p.reindex(items=items, major=dates))
# only 1
assert_panel_equal(p.ix[items, :, :], p.reindex(items=items))
assert_panel_equal(p.ix[:, dates, :], p.reindex(major=dates))
assert_panel_equal(p.ix[:, :, cols], p.reindex(minor=cols))
def test_getitem_fancy_slice(self):
pass
def test_getitem_fancy_ints(self):
p = self.panel
# #1603
result = p.ix[:, -1, :]
expected = p.ix[:, p.major_axis[-1], :]
assert_frame_equal(result, expected)
def test_getitem_fancy_xs(self):
p = self.panel
item = 'ItemB'
date = p.major_axis[5]
col = 'C'
# get DataFrame
# item
assert_frame_equal(p.ix[item], p[item])
assert_frame_equal(p.ix[item, :], p[item])
assert_frame_equal(p.ix[item, :, :], p[item])
# major axis, axis=1
assert_frame_equal(p.ix[:, date], p.major_xs(date))
assert_frame_equal(p.ix[:, date, :], p.major_xs(date))
# minor axis, axis=2
assert_frame_equal(p.ix[:, :, 'C'], p.minor_xs('C'))
# get Series
assert_series_equal(p.ix[item, date], p[item].ix[date])
assert_series_equal(p.ix[item, date, :], p[item].ix[date])
assert_series_equal(p.ix[item, :, col], p[item][col])
assert_series_equal(p.ix[:, date, col], p.major_xs(date).ix[col])
def test_getitem_fancy_xs_check_view(self):
item = 'ItemB'
date = self.panel.major_axis[5]
# make sure it's always a view
NS = slice(None, None)
# DataFrames
comp = assert_frame_equal
self._check_view(item, comp)
self._check_view((item, NS), comp)
self._check_view((item, NS, NS), comp)
self._check_view((NS, date), comp)
self._check_view((NS, date, NS), comp)
self._check_view((NS, NS, 'C'), comp)
# Series
comp = assert_series_equal
self._check_view((item, date), comp)
self._check_view((item, date, NS), comp)
self._check_view((item, NS, 'C'), comp)
self._check_view((NS, date, 'C'), comp)
def test_ix_setitem_slice_dataframe(self):
a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
minor_axis=[111, 222, 333])
b = DataFrame(np.random.randn(2, 3), index=[111, 333],
columns=[1, 2, 3])
a.ix[:, 22, [111, 333]] = b
assert_frame_equal(a.ix[:, 22, [111, 333]], b)
def test_ix_align(self):
from pandas import Series
b = Series(np.random.randn(10), name=0)
b.sort()
df_orig = Panel(np.random.randn(3, 10, 2))
df = df_orig.copy()
df.ix[0, :, 0] = b
assert_series_equal(df.ix[0, :, 0].reindex(b.index), b)
df = df_orig.swapaxes(0, 1)
df.ix[:, 0, 0] = b
assert_series_equal(df.ix[:, 0, 0].reindex(b.index), b)
df = df_orig.swapaxes(1, 2)
df.ix[0, 0, :] = b
assert_series_equal(df.ix[0, 0, :].reindex(b.index), b)
def test_ix_frame_align(self):
p_orig = tm.makePanel()
df = p_orig.ix[0].copy()
assert_frame_equal(p_orig['ItemA'], df)
p = p_orig.copy()
p.ix[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA', :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0, [0, 1, 3, 5], -2:] = df
out = p.ix[0, [0, 1, 3, 5], -2:]
assert_frame_equal(out, df.iloc[[0, 1, 3, 5], [2, 3]])
# GH3830, panel assignent by values/frame
for dtype in ['float64', 'int64']:
panel = Panel(np.arange(40).reshape((2, 4, 5)),
items=['a1', 'a2'], dtype=dtype)
df1 = panel.iloc[0]
df2 = panel.iloc[1]
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by Value Passes for 'a2'
panel.loc['a2'] = df1.values
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df1)
# Assignment by DataFrame Ok w/o loc 'a2'
panel['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by DataFrame Fails for 'a2'
panel.loc['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
def _check_view(self, indexer, comp):
cp = self.panel.copy()
obj = cp.ix[indexer]
obj.values[:] = 0
self.assertTrue((obj.values == 0).all())
comp(cp.ix[indexer].reindex_like(obj), obj)
def test_logical_with_nas(self):
d = Panel({'ItemA': {'a': [np.nan, False]},
'ItemB': {'a': [True, True]}})
result = d['ItemA'] | d['ItemB']
expected = DataFrame({'a': [np.nan, True]})
assert_frame_equal(result, expected)
# this is autodowncasted here
result = d['ItemA'].fillna(False) | d['ItemB']
expected = DataFrame({'a': [True, True]})
assert_frame_equal(result, expected)
def test_neg(self):
# what to do?
assert_panel_equal(-self.panel, -1 * self.panel)
def test_invert(self):
assert_panel_equal(-(self.panel < 0), ~(self.panel < 0))
def test_comparisons(self):
p1 = tm.makePanel()
p2 = tm.makePanel()
tp = p1.reindex(items=p1.items + ['foo'])
df = p1[p1.items[0]]
def test_comp(func):
# versus same index
result = func(p1, p2)
self.assert_numpy_array_equal(result.values,
func(p1.values, p2.values))
# versus non-indexed same objs
self.assertRaises(Exception, func, p1, tp)
# versus different objs
self.assertRaises(Exception, func, p1, df)
# versus scalar
result3 = func(self.panel, 0)
self.assert_numpy_array_equal(result3.values,
func(self.panel.values, 0))
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"):
self.panel.get_value('a')
def test_set_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
self.panel.set_value(item, mjr, mnr, 1.)
assert_almost_equal(self.panel[item][mnr][mjr], 1.)
# resize
res = self.panel.set_value('ItemE', 'foo', 'bar', 1.5)
tm.assertIsInstance(res, Panel)
self.assertIsNot(res, self.panel)
self.assertEqual(res.get_value('ItemE', 'foo', 'bar'), 1.5)
res3 = self.panel.set_value('ItemE', 'foobar', 'baz', 5)
self.assertTrue(com.is_float_dtype(res3['ItemE'].values))
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"
" plus the value provided"):
self.panel.set_value('a')
_panel = tm.makePanel()
tm.add_nans(_panel)
class TestPanel(tm.TestCase, PanelTests, CheckIndexing, SafeForLongAndSparse,
SafeForSparse):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def setUp(self):
self.panel = _panel.copy()
self.panel.major_axis.name = None
self.panel.minor_axis.name = None
self.panel.items.name = None
def test_panel_warnings(self):
with tm.assert_produces_warning(FutureWarning):
shifted1 = self.panel.shift(lags=1)
with tm.assert_produces_warning(False):
shifted2 = self.panel.shift(periods=1)
tm.assert_panel_equal(shifted1, shifted2)
with tm.assert_produces_warning(False):
shifted3 = self.panel.shift()
tm.assert_panel_equal(shifted1, shifted3)
def test_constructor(self):
# with BlockManager
wp = Panel(self.panel._data)
self.assertIs(wp._data, self.panel._data)
wp = Panel(self.panel._data, copy=True)
self.assertIsNot(wp._data, self.panel._data)
assert_panel_equal(wp, self.panel)
# strings handled prop
wp = Panel([[['foo', 'foo', 'foo', ], ['foo', 'foo', 'foo']]])
self.assertEqual(wp.values.dtype, np.object_)
vals = self.panel.values
# no copy
wp = Panel(vals)
self.assertIs(wp.values, vals)
# copy
wp = Panel(vals, copy=True)
self.assertIsNot(wp.values, vals)
# GH #8285, test when scalar data is used to construct a Panel
# if dtype is not passed, it should be inferred
value_and_dtype = [(1, 'int64'), (3.14, 'float64'),
('foo', np.object_)]
for (val, dtype) in value_and_dtype:
wp = Panel(val, items=range(2), major_axis=range(3),
minor_axis=range(4))
vals = np.empty((2, 3, 4), dtype=dtype)
vals.fill(val)
assert_panel_equal(wp, Panel(vals, dtype=dtype))
# test the case when dtype is passed
wp = Panel(1, items=range(2), major_axis=range(3), minor_axis=range(4),
dtype='float32')
vals = np.empty((2, 3, 4), dtype='float32')
vals.fill(1)
assert_panel_equal(wp, Panel(vals, dtype='float32'))
def test_constructor_cast(self):
zero_filled = self.panel.fillna(0)
casted = Panel(zero_filled._data, dtype=int)
casted2 = Panel(zero_filled.values, dtype=int)
exp_values = zero_filled.values.astype(int)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
casted = Panel(zero_filled._data, dtype=np.int32)
casted2 = Panel(zero_filled.values, dtype=np.int32)
exp_values = zero_filled.values.astype(np.int32)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
# can't cast
data = [[['foo', 'bar', 'baz']]]
self.assertRaises(ValueError, Panel, data, dtype=float)
def test_constructor_empty_panel(self):
empty = Panel()
self.assertEqual(len(empty.items), 0)
self.assertEqual(len(empty.major_axis), 0)
self.assertEqual(len(empty.minor_axis), 0)
def test_constructor_observe_dtype(self):
# GH #411
panel = Panel(items=lrange(3), major_axis=lrange(3),
minor_axis=lrange(3), dtype='O')
self.assertEqual(panel.values.dtype, np.object_)
def test_constructor_dtypes(self):
# GH #797
def _check_dtype(panel, dtype):
for i in panel.items:
self.assertEqual(panel[i].values.dtype.name, dtype)
# only nan holding types allowed here
for dtype in ['float64', 'float32', 'object']:
panel = Panel(items=lrange(2), major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype=dtype),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype='O'),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.random.randn(2, 10, 5), items=lrange(
2), major_axis=lrange(10), minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
df1 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
df2 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
panel = Panel.from_dict({'a': df1, 'b': df2}, dtype=dtype)
_check_dtype(panel, dtype)
def test_constructor_fails_with_not_3d_input(self):
with tm.assertRaisesRegexp(ValueError,
"The number of dimensions required is 3"):
Panel(np.random.randn(10, 2))
def test_consolidate(self):
self.assertTrue(self.panel._data.is_consolidated())
self.panel['foo'] = 1.
self.assertFalse(self.panel._data.is_consolidated())
panel = self.panel.consolidate()
self.assertTrue(panel._data.is_consolidated())
def test_ctor_dict(self):
itema = self.panel['ItemA']
itemb = self.panel['ItemB']
d = {'A': itema, 'B': itemb[5:]}
d2 = {'A': itema._series, 'B': itemb[5:]._series}
d3 = {'A': None,
'B': DataFrame(itemb[5:]._series),
'C': DataFrame(itema._series)}
wp = Panel.from_dict(d)
wp2 = Panel.from_dict(d2) # nested Dict
# TODO: unused?
wp3 = Panel.from_dict(d3) # noqa
self.assertTrue(wp.major_axis.equals(self.panel.major_axis))
assert_panel_equal(wp, wp2)
# intersect
wp = Panel.from_dict(d, intersect=True)
self.assertTrue(wp.major_axis.equals(itemb.index[5:]))
# use constructor
assert_panel_equal(Panel(d), Panel.from_dict(d))
assert_panel_equal(Panel(d2), Panel.from_dict(d2))
assert_panel_equal(Panel(d3), Panel.from_dict(d3))
# a pathological case
d4 = {'A': None, 'B': None}
# TODO: unused?
wp4 = Panel.from_dict(d4) # noqa
assert_panel_equal(Panel(d4), Panel(items=['A', 'B']))
# cast
dcasted = dict((k, v.reindex(wp.major_axis).fillna(0))
for k, v in compat.iteritems(d))
result = Panel(dcasted, dtype=int)
expected = Panel(dict((k, v.astype(int))
for k, v in compat.iteritems(dcasted)))
assert_panel_equal(result, expected)
result = Panel(dcasted, dtype=np.int32)
expected = Panel(dict((k, v.astype(np.int32))
for k, v in compat.iteritems(dcasted)))
assert_panel_equal(result, expected)
def test_constructor_dict_mixed(self):
data = dict((k, v.values) for k, v in self.panel.iteritems())
result = Panel(data)
exp_major = Index(np.arange(len(self.panel.major_axis)))
self.assertTrue(result.major_axis.equals(exp_major))
result = Panel(data, items=self.panel.items,
major_axis=self.panel.major_axis,
minor_axis=self.panel.minor_axis)
assert_panel_equal(result, self.panel)
data['ItemC'] = self.panel['ItemC']
result = Panel(data)
assert_panel_equal(result, self.panel)
# corner, blow up
data['ItemB'] = data['ItemB'][:-1]
self.assertRaises(Exception, Panel, data)
data['ItemB'] = self.panel['ItemB'].values[:, :-1]
self.assertRaises(Exception, Panel, data)
def test_ctor_orderedDict(self):
keys = list(set(np.random.randint(0, 5000, 100)))[
:50] # unique random int keys
d = OrderedDict([(k, mkdf(10, 5)) for k in keys])
p = Panel(d)
self.assertTrue(list(p.items) == keys)
p = Panel.from_dict(d)
self.assertTrue(list(p.items) == keys)
def test_constructor_resize(self):
data = self.panel._data
items = self.panel.items[:-1]
major = self.panel.major_axis[:-1]
minor = self.panel.minor_axis[:-1]
result = Panel(data, items=items, major_axis=major, minor_axis=minor)
expected = self.panel.reindex(items=items, major=major, minor=minor)
assert_panel_equal(result, expected)
result = Panel(data, items=items, major_axis=major)
expected = self.panel.reindex(items=items, major=major)
assert_panel_equal(result, expected)
result = Panel(data, items=items)
expected = self.panel.reindex(items=items)
assert_panel_equal(result, expected)
result = Panel(data, minor_axis=minor)
expected = self.panel.reindex(minor=minor)
assert_panel_equal(result, expected)
def test_from_dict_mixed_orient(self):
df = tm.makeDataFrame()
df['foo'] = 'bar'
data = {'k1': df, 'k2': df}
panel = Panel.from_dict(data, orient='minor')
self.assertEqual(panel['foo'].values.dtype, np.object_)
self.assertEqual(panel['A'].values.dtype, np.float64)
def test_constructor_error_msgs(self):
def testit():
Panel(np.random.randn(3, 4, 5), lrange(4), lrange(5), lrange(5))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(4, 5, 5\)",
testit)
def testit():
Panel(np.random.randn(3, 4, 5), lrange(5), lrange(4), lrange(5))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(5, 4, 5\)",
testit)
def testit():
Panel(np.random.randn(3, 4, 5), lrange(5), lrange(5), lrange(4))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(5, 5, 4\)",
testit)
def test_conform(self):
df = self.panel['ItemA'][:-5].filter(items=['A', 'B'])
conformed = self.panel.conform(df)
assert (conformed.index.equals(self.panel.major_axis))
assert (conformed.columns.equals(self.panel.minor_axis))
def test_convert_objects(self):
# GH 4937
p = Panel(dict(A=dict(a=['1', '1.0'])))
expected = Panel(dict(A=dict(a=[1, 1.0])))
result = p._convert(numeric=True, coerce=True)
assert_panel_equal(result, expected)
def test_dtypes(self):
result = self.panel.dtypes
expected = Series(np.dtype('float64'), index=self.panel.items)
assert_series_equal(result, expected)
def test_apply(self):
# GH1148
# ufunc
applied = self.panel.apply(np.sqrt)
self.assertTrue(assert_almost_equal(applied.values, np.sqrt(
self.panel.values)))
# ufunc same shape
result = self.panel.apply(lambda x: x * 2, axis='items')
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2, axis='major_axis')
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2, axis='minor_axis')
expected = self.panel * 2
assert_panel_equal(result, expected)
# reduction to DataFrame
result = self.panel.apply(lambda x: x.dtype, axis='items')
expected = DataFrame(np.dtype('float64'), index=self.panel.major_axis,
columns=self.panel.minor_axis)
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.dtype, axis='major_axis')
expected = DataFrame(np.dtype('float64'), index=self.panel.minor_axis,
columns=self.panel.items)
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.dtype, axis='minor_axis')
expected = DataFrame(np.dtype('float64'), index=self.panel.major_axis,
columns=self.panel.items)
assert_frame_equal(result, expected)
# reductions via other dims
expected = self.panel.sum(0)
result = self.panel.apply(lambda x: x.sum(), axis='items')
assert_frame_equal(result, expected)
expected = self.panel.sum(1)
result = self.panel.apply(lambda x: x.sum(), axis='major_axis')
assert_frame_equal(result, expected)
expected = self.panel.sum(2)
result = self.panel.apply(lambda x: x.sum(), axis='minor_axis')
assert_frame_equal(result, expected)
# pass kwargs
result = self.panel.apply(lambda x, y: x.sum() + y, axis='items', y=5)
expected = self.panel.sum(0) + 5
assert_frame_equal(result, expected)
def test_apply_slabs(self):
# same shape as original
result = self.panel.apply(lambda x: x * 2,
axis=['items', 'major_axis'])
expected = (self.panel * 2).transpose('minor_axis', 'major_axis',
'items')
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['major_axis', 'items'])
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['items', 'minor_axis'])
expected = (self.panel * 2).transpose('major_axis', 'minor_axis',
'items')
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['minor_axis', 'items'])
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['major_axis', 'minor_axis'])
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['minor_axis', 'major_axis'])
assert_panel_equal(result, expected)
# reductions
result = self.panel.apply(lambda x: x.sum(0), axis=[
'items', 'major_axis'
])
expected = self.panel.sum(1).T
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.sum(1), axis=[
'items', 'major_axis'
])
expected = self.panel.sum(0)
assert_frame_equal(result, expected)
# transforms
f = lambda x: ((x.T - x.mean(1)) / x.std(1)).T
# make sure that we don't trigger any warnings
with tm.assert_produces_warning(False):
result = self.panel.apply(f, axis=['items', 'major_axis'])
expected = Panel(dict([(ax, f(self.panel.loc[:, :, ax]))
for ax in self.panel.minor_axis]))
assert_panel_equal(result, expected)
result = self.panel.apply(f, axis=['major_axis', 'minor_axis'])
expected = Panel(dict([(ax, f(self.panel.loc[ax]))
for ax in self.panel.items]))
assert_panel_equal(result, expected)
result = self.panel.apply(f, axis=['minor_axis', 'items'])
expected = Panel(dict([(ax, f(self.panel.loc[:, ax]))
for ax in self.panel.major_axis]))
assert_panel_equal(result, expected)
# with multi-indexes
# GH7469
index = MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), (
'two', 'a'), ('two', 'b')])
dfa = DataFrame(np.array(np.arange(12, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
dfb = DataFrame(np.array(np.arange(10, 22, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
p = Panel({'f': dfa, 'g': dfb})
result = p.apply(lambda x: x.sum(), axis=0)
# on windows this will be in32
result = result.astype('int64')
expected = p.sum(0)
assert_frame_equal(result, expected)
def test_apply_no_or_zero_ndim(self):
# GH10332
self.panel = Panel(np.random.rand(5, 5, 5))
result_int = self.panel.apply(lambda df: 0, axis=[1, 2])
result_float = self.panel.apply(lambda df: 0.0, axis=[1, 2])
result_int64 = self.panel.apply(lambda df: np.int64(0), axis=[1, 2])
result_float64 = self.panel.apply(lambda df: np.float64(0.0),
axis=[1, 2])
expected_int = expected_int64 = Series([0] * 5)
expected_float = expected_float64 = Series([0.0] * 5)
assert_series_equal(result_int, expected_int)
assert_series_equal(result_int64, expected_int64)
assert_series_equal(result_float, expected_float)
assert_series_equal(result_float64, expected_float64)
def test_reindex(self):
ref = self.panel['ItemB']
# items
result = self.panel.reindex(items=['ItemA', 'ItemB'])
assert_frame_equal(result['ItemB'], ref)
# major
new_major = list(self.panel.major_axis[:10])
result = self.panel.reindex(major=new_major)
assert_frame_equal(result['ItemB'], ref.reindex(index=new_major))
# raise exception put both major and major_axis
self.assertRaises(Exception, self.panel.reindex, major_axis=new_major,
major=new_major)
# minor
new_minor = list(self.panel.minor_axis[:2])
result = self.panel.reindex(minor=new_minor)
assert_frame_equal(result['ItemB'], ref.reindex(columns=new_minor))
# this ok
result = self.panel.reindex()
assert_panel_equal(result, self.panel)
self.assertFalse(result is self.panel)
# with filling
smaller_major = self.panel.major_axis[::5]
smaller = self.panel.reindex(major=smaller_major)
larger = smaller.reindex(major=self.panel.major_axis, method='pad')
assert_frame_equal(larger.major_xs(self.panel.major_axis[1]),
smaller.major_xs(smaller_major[0]))
# don't necessarily copy
result = self.panel.reindex(major=self.panel.major_axis, copy=False)
assert_panel_equal(result, self.panel)
self.assertTrue(result is self.panel)
def test_reindex_multi(self):
# with and without copy full reindexing
result = self.panel.reindex(items=self.panel.items,
major=self.panel.major_axis,
minor=self.panel.minor_axis, copy=False)
self.assertIs(result.items, self.panel.items)
self.assertIs(result.major_axis, self.panel.major_axis)
self.assertIs(result.minor_axis, self.panel.minor_axis)
result = self.panel.reindex(items=self.panel.items,
major=self.panel.major_axis,
minor=self.panel.minor_axis, copy=False)
assert_panel_equal(result, self.panel)
# multi-axis indexing consistency
# GH 5900
df = DataFrame(np.random.randn(4, 3))
p = Panel({'Item1': df})
expected = Panel({'Item1': df})
expected['Item2'] = np.nan
items = ['Item1', 'Item2']
major_axis = np.arange(4)
minor_axis = np.arange(3)
results = []
results.append(p.reindex(items=items, major_axis=major_axis,
copy=True))
results.append(p.reindex(items=items, major_axis=major_axis,
copy=False))
results.append(p.reindex(items=items, minor_axis=minor_axis,
copy=True))
results.append(p.reindex(items=items, minor_axis=minor_axis,
copy=False))
results.append(p.reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=True))
results.append(p.reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=False))
for i, r in enumerate(results):
assert_panel_equal(expected, r)
def test_reindex_like(self):
# reindex_like
smaller = self.panel.reindex(items=self.panel.items[:-1],
major=self.panel.major_axis[:-1],
minor=self.panel.minor_axis[:-1])
smaller_like = self.panel.reindex_like(smaller)
assert_panel_equal(smaller, smaller_like)
def test_take(self):
# axis == 0
result = self.panel.take([2, 0, 1], axis=0)
expected = self.panel.reindex(items=['ItemC', 'ItemA', 'ItemB'])
assert_panel_equal(result, expected)
# axis >= 1
result = self.panel.take([3, 0, 1, 2], axis=2)
expected = self.panel.reindex(minor=['D', 'A', 'B', 'C'])
assert_panel_equal(result, expected)
# neg indicies ok
expected = self.panel.reindex(minor=['D', 'D', 'B', 'C'])
result = self.panel.take([3, -1, 1, 2], axis=2)
assert_panel_equal(result, expected)
self.assertRaises(Exception, self.panel.take, [4, 0, 1, 2], axis=2)
def test_sort_index(self):
import random
ritems = list(self.panel.items)
rmajor = list(self.panel.major_axis)
rminor = list(self.panel.minor_axis)
random.shuffle(ritems)
random.shuffle(rmajor)
random.shuffle(rminor)
random_order = self.panel.reindex(items=ritems)
sorted_panel = random_order.sort_index(axis=0)
assert_panel_equal(sorted_panel, self.panel)
# descending
random_order = self.panel.reindex(items=ritems)
sorted_panel = random_order.sort_index(axis=0, ascending=False)
assert_panel_equal(sorted_panel,
self.panel.reindex(items=self.panel.items[::-1]))
random_order = self.panel.reindex(major=rmajor)
sorted_panel = random_order.sort_index(axis=1)
assert_panel_equal(sorted_panel, self.panel)
random_order = self.panel.reindex(minor=rminor)
sorted_panel = random_order.sort_index(axis=2)
assert_panel_equal(sorted_panel, self.panel)
def test_fillna(self):
filled = self.panel.fillna(0)
self.assertTrue(np.isfinite(filled.values).all())
filled = self.panel.fillna(method='backfill')
assert_frame_equal(filled['ItemA'],
self.panel['ItemA'].fillna(method='backfill'))
panel = self.panel.copy()
panel['str'] = 'foo'
filled = panel.fillna(method='backfill')
assert_frame_equal(filled['ItemA'],
panel['ItemA'].fillna(method='backfill'))
empty = self.panel.reindex(items=[])
filled = empty.fillna(0)
assert_panel_equal(filled, empty)
self.assertRaises(ValueError, self.panel.fillna)
self.assertRaises(ValueError, self.panel.fillna, 5, method='ffill')
self.assertRaises(TypeError, self.panel.fillna, [1, 2])
self.assertRaises(TypeError, self.panel.fillna, (1, 2))
# limit not implemented when only value is specified
p = Panel(np.random.randn(3, 4, 5))
p.iloc[0:2, 0:2, 0:2] = np.nan
self.assertRaises(NotImplementedError, lambda: p.fillna(999, limit=1))
def test_ffill_bfill(self):
assert_panel_equal(self.panel.ffill(),
self.panel.fillna(method='ffill'))
assert_panel_equal(self.panel.bfill(),
self.panel.fillna(method='bfill'))
def test_truncate_fillna_bug(self):
# #1823
result = self.panel.truncate(before=None, after=None, axis='items')
# it works!
result.fillna(value=0.0)
def test_swapaxes(self):
result = self.panel.swapaxes('items', 'minor')
self.assertIs(result.items, self.panel.minor_axis)
result = self.panel.swapaxes('items', 'major')
self.assertIs(result.items, self.panel.major_axis)
result = self.panel.swapaxes('major', 'minor')
self.assertIs(result.major_axis, self.panel.minor_axis)
panel = self.panel.copy()
result = panel.swapaxes('major', 'minor')
panel.values[0, 0, 1] = np.nan
expected = panel.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
# this should also work
result = self.panel.swapaxes(0, 1)
self.assertIs(result.items, self.panel.major_axis)
# this works, but return a copy
result = self.panel.swapaxes('items', 'items')
assert_panel_equal(self.panel, result)
self.assertNotEqual(id(self.panel), id(result))
def test_transpose(self):
result = self.panel.transpose('minor', 'major', 'items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# test kwargs
result = self.panel.transpose(items='minor', major='major',
minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# text mixture of args
result = self.panel.transpose('minor', major='major', minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
result = self.panel.transpose('minor', 'major', minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# duplicate axes
with tm.assertRaisesRegexp(TypeError,
'not enough/duplicate arguments'):
self.panel.transpose('minor', maj='major', minor='items')
with tm.assertRaisesRegexp(ValueError, 'repeated axis in transpose'):
self.panel.transpose('minor', 'major', major='minor',
minor='items')
result = self.panel.transpose(2, 1, 0)
assert_panel_equal(result, expected)
result = self.panel.transpose('minor', 'items', 'major')
expected = self.panel.swapaxes('items', 'minor')
expected = expected.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
result = self.panel.transpose(2, 0, 1)
assert_panel_equal(result, expected)
self.assertRaises(ValueError, self.panel.transpose, 0, 0, 1)
def test_transpose_copy(self):
panel = self.panel.copy()
result = panel.transpose(2, 0, 1, copy=True)
expected = panel.swapaxes('items', 'minor')
expected = expected.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
panel.values[0, 1, 1] = np.nan
self.assertTrue(notnull(result.values[1, 0, 1]))
@ignore_sparse_panel_future_warning
def test_to_frame(self):
# filtered
filtered = self.panel.to_frame()
expected = self.panel.to_frame().dropna(how='any')
assert_frame_equal(filtered, expected)
# unfiltered
unfiltered = self.panel.to_frame(filter_observations=False)
assert_panel_equal(unfiltered.to_panel(), self.panel)
# names
self.assertEqual(unfiltered.index.names, ('major', 'minor'))
# unsorted, round trip
df = self.panel.to_frame(filter_observations=False)
unsorted = df.take(np.random.permutation(len(df)))
pan = unsorted.to_panel()
assert_panel_equal(pan, self.panel)
# preserve original index names
df = DataFrame(np.random.randn(6, 2),
index=[['a', 'a', 'b', 'b', 'c', 'c'],
[0, 1, 0, 1, 0, 1]],
columns=['one', 'two'])
df.index.names = ['foo', 'bar']
df.columns.name = 'baz'
rdf = df.to_panel().to_frame()
self.assertEqual(rdf.index.names, df.index.names)
self.assertEqual(rdf.columns.names, df.columns.names)
def test_to_frame_mixed(self):
panel = self.panel.fillna(0)
panel['str'] = 'foo'
panel['bool'] = panel['ItemA'] > 0
lp = panel.to_frame()
wp = lp.to_panel()
self.assertEqual(wp['bool'].values.dtype, np.bool_)
# Previously, this was mutating the underlying index and changing its
# name
assert_frame_equal(wp['bool'], panel['bool'], check_names=False)
# GH 8704
# with categorical
df = panel.to_frame()
df['category'] = df['str'].astype('category')
# to_panel
# TODO: this converts back to object
p = df.to_panel()
expected = panel.copy()
expected['category'] = 'foo'
assert_panel_equal(p, expected)
def test_to_frame_multi_major(self):
idx = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two')])
df = DataFrame([[1, 'a', 1], [2, 'b', 1], [3, 'c', 1], [4, 'd', 1]],
columns=['A', 'B', 'C'], index=idx)
wp = Panel({'i1': df, 'i2': df})
expected_idx = MultiIndex.from_tuples(
[
(1, 'one', 'A'), (1, 'one', 'B'),
(1, 'one', 'C'), (1, 'two', 'A'),
(1, 'two', 'B'), (1, 'two', 'C'),
(2, 'one', 'A'), (2, 'one', 'B'),
(2, 'one', 'C'), (2, 'two', 'A'),
(2, 'two', 'B'), (2, 'two', 'C')
],
names=[None, None, 'minor'])
expected = DataFrame({'i1': [1, 'a', 1, 2, 'b', 1, 3,
'c', 1, 4, 'd', 1],
'i2': [1, 'a', 1, 2, 'b',
1, 3, 'c', 1, 4, 'd', 1]},
index=expected_idx)
result = wp.to_frame()
assert_frame_equal(result, expected)
wp.iloc[0, 0].iloc[0] = np.nan # BUG on setting. GH #5773
result = wp.to_frame()
assert_frame_equal(result, expected[1:])
idx = MultiIndex.from_tuples([(1, 'two'), (1, 'one'), (2, 'one'), (
np.nan, 'two')])
df = DataFrame([[1, 'a', 1], [2, 'b', 1], [3, 'c', 1], [4, 'd', 1]],
columns=['A', 'B', 'C'], index=idx)
wp = Panel({'i1': df, 'i2': df})
ex_idx = MultiIndex.from_tuples([(1, 'two', 'A'), (1, 'two', 'B'),
(1, 'two', 'C'),
(1, 'one', 'A'),
(1, 'one', 'B'),
(1, 'one', 'C'),
(2, 'one', 'A'),
(2, 'one', 'B'),
(2, 'one', 'C'),
(np.nan, 'two', 'A'),
(np.nan, 'two', 'B'),
(np.nan, 'two', 'C')],
names=[None, None, 'minor'])
expected.index = ex_idx
result = wp.to_frame()
assert_frame_equal(result, expected)
def test_to_frame_multi_major_minor(self):
cols = MultiIndex(levels=[['C_A', 'C_B'], ['C_1', 'C_2']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
idx = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two'), (3, 'three'), (4, 'four')])
df = DataFrame([[1, 2, 11, 12], [3, 4, 13, 14],
['a', 'b', 'w', 'x'],
['c', 'd', 'y', 'z'], [-1, -2, -3, -4],
[-5, -6, -7, -8]], columns=cols, index=idx)
wp = Panel({'i1': df, 'i2': df})
exp_idx = MultiIndex.from_tuples(
[(1, 'one', 'C_A', 'C_1'), (1, 'one', 'C_A', 'C_2'),
(1, 'one', 'C_B', 'C_1'), (1, 'one', 'C_B', 'C_2'),
(1, 'two', 'C_A', 'C_1'), (1, 'two', 'C_A', 'C_2'),
(1, 'two', 'C_B', 'C_1'), (1, 'two', 'C_B', 'C_2'),
(2, 'one', 'C_A', 'C_1'), (2, 'one', 'C_A', 'C_2'),
(2, 'one', 'C_B', 'C_1'), (2, 'one', 'C_B', 'C_2'),
(2, 'two', 'C_A', 'C_1'), (2, 'two', 'C_A', 'C_2'),
(2, 'two', 'C_B', 'C_1'), (2, 'two', 'C_B', 'C_2'),
(3, 'three', 'C_A', 'C_1'), (3, 'three', 'C_A', 'C_2'),
(3, 'three', 'C_B', 'C_1'), (3, 'three', 'C_B', 'C_2'),
(4, 'four', 'C_A', 'C_1'), (4, 'four', 'C_A', 'C_2'),
(4, 'four', 'C_B', 'C_1'), (4, 'four', 'C_B', 'C_2')],
names=[None, None, None, None])
exp_val = [[1, 1], [2, 2], [11, 11], [12, 12], [3, 3], [4, 4],
[13, 13], [14, 14], ['a', 'a'], ['b', 'b'], ['w', 'w'],
['x', 'x'], ['c', 'c'], ['d', 'd'], ['y', 'y'], ['z', 'z'],
[-1, -1], [-2, -2], [-3, -3], [-4, -4], [-5, -5], [-6, -6],
[-7, -7], [-8, -8]]
result = wp.to_frame()
expected = DataFrame(exp_val, columns=['i1', 'i2'], index=exp_idx)
assert_frame_equal(result, expected)
def test_to_frame_multi_drop_level(self):
idx = MultiIndex.from_tuples([(1, 'one'), (2, 'one'), (2, 'two')])
df = DataFrame({'A': [np.nan, 1, 2]}, index=idx)
wp = Panel({'i1': df, 'i2': df})
result = wp.to_frame()
exp_idx = MultiIndex.from_tuples([(2, 'one', 'A'), (2, 'two', 'A')],
names=[None, None, 'minor'])
expected = DataFrame({'i1': [1., 2], 'i2': [1., 2]}, index=exp_idx)
assert_frame_equal(result, expected)
def test_to_panel_na_handling(self):
df = DataFrame(np.random.randint(0, 10, size=20).reshape((10, 2)),
index=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[0, 1, 2, 3, 4, 5, 2, 3, 4, 5]])
panel = df.to_panel()
self.assertTrue(isnull(panel[0].ix[1, [0, 1]]).all())
def test_to_panel_duplicates(self):
# #2441
df = DataFrame({'a': [0, 0, 1], 'b': [1, 1, 1], 'c': [1, 2, 3]})
idf = df.set_index(['a', 'b'])
assertRaisesRegexp(ValueError, 'non-uniquely indexed', idf.to_panel)
def test_panel_dups(self):
# GH 4960
# duplicates in an index
# items
data = np.random.randn(5, 100, 5)
no_dup_panel = Panel(data, items=list("ABCDE"))
panel = Panel(data, items=list("AACDE"))
expected = no_dup_panel['A']
result = panel.iloc[0]
assert_frame_equal(result, expected)
expected = no_dup_panel['E']
result = panel.loc['E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[['A', 'B']]
expected.items = ['A', 'A']
result = panel.loc['A']
assert_panel_equal(result, expected)
# major
data = np.random.randn(5, 5, 5)
no_dup_panel = Panel(data, major_axis=list("ABCDE"))
panel = Panel(data, major_axis=list("AACDE"))
expected = no_dup_panel.loc[:, 'A']
result = panel.iloc[:, 0]
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, 'E']
result = panel.loc[:, 'E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, ['A', 'B']]
expected.major_axis = ['A', 'A']
result = panel.loc[:, 'A']
assert_panel_equal(result, expected)
# minor
data = np.random.randn(5, 100, 5)
no_dup_panel = Panel(data, minor_axis=list("ABCDE"))
panel = Panel(data, minor_axis=list("AACDE"))
expected = no_dup_panel.loc[:, :, 'A']
result = panel.iloc[:, :, 0]
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, :, 'E']
result = panel.loc[:, :, 'E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, :, ['A', 'B']]
expected.minor_axis = ['A', 'A']
result = panel.loc[:, :, 'A']
assert_panel_equal(result, expected)
def test_filter(self):
pass
def test_compound(self):
compounded = self.panel.compound()
assert_series_equal(compounded['ItemA'],
(1 + self.panel['ItemA']).product(0) - 1,
check_names=False)
def test_shift(self):
# major
idx = self.panel.major_axis[0]
idx_lag = self.panel.major_axis[1]
shifted = self.panel.shift(1)
assert_frame_equal(self.panel.major_xs(idx), shifted.major_xs(idx_lag))
# minor
idx = self.panel.minor_axis[0]
idx_lag = self.panel.minor_axis[1]
shifted = self.panel.shift(1, axis='minor')
assert_frame_equal(self.panel.minor_xs(idx), shifted.minor_xs(idx_lag))
# items
idx = self.panel.items[0]
idx_lag = self.panel.items[1]
shifted = self.panel.shift(1, axis='items')
assert_frame_equal(self.panel[idx], shifted[idx_lag])
# negative numbers, #2164
result = self.panel.shift(-1)
expected = Panel(dict((i, f.shift(-1)[:-1])
for i, f in self.panel.iteritems()))
assert_panel_equal(result, expected)
# mixed dtypes #6959
data = [('item ' + ch, makeMixedDataFrame()) for ch in list('abcde')]
data = dict(data)
mixed_panel = Panel.from_dict(data, orient='minor')
shifted = mixed_panel.shift(1)
assert_series_equal(mixed_panel.dtypes, shifted.dtypes)
def test_tshift(self):
# PeriodIndex
ps = tm.makePeriodPanel()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
assert_panel_equal(unshifted, ps)
shifted2 = ps.tshift(freq='B')
assert_panel_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=bday)
assert_panel_equal(shifted, shifted3)
assertRaisesRegexp(ValueError, 'does not match', ps.tshift, freq='M')
# DatetimeIndex
panel = _panel
shifted = panel.tshift(1)
unshifted = shifted.tshift(-1)
assert_panel_equal(panel, unshifted)
shifted2 = panel.tshift(freq=panel.major_axis.freq)
assert_panel_equal(shifted, shifted2)
inferred_ts = Panel(panel.values, items=panel.items,
major_axis=Index(np.asarray(panel.major_axis)),
minor_axis=panel.minor_axis)
shifted = inferred_ts.tshift(1)
unshifted = shifted.tshift(-1)
assert_panel_equal(shifted, panel.tshift(1))
assert_panel_equal(unshifted, inferred_ts)
no_freq = panel.ix[:, [0, 5, 7], :]
self.assertRaises(ValueError, no_freq.tshift)
def test_pct_change(self):
df1 = DataFrame({'c1': [1, 2, 5], 'c2': [3, 4, 6]})
df2 = df1 + 1
df3 = DataFrame({'c1': [3, 4, 7], 'c2': [5, 6, 8]})
wp = Panel({'i1': df1, 'i2': df2, 'i3': df3})
# major, 1
result = wp.pct_change() # axis='major'
expected = Panel({'i1': df1.pct_change(),
'i2': df2.pct_change(),
'i3': df3.pct_change()})
assert_panel_equal(result, expected)
result = wp.pct_change(axis=1)
assert_panel_equal(result, expected)
# major, 2
result = wp.pct_change(periods=2)
expected = Panel({'i1': df1.pct_change(2),
'i2': df2.pct_change(2),
'i3': df3.pct_change(2)})
assert_panel_equal(result, expected)
# minor, 1
result = wp.pct_change(axis='minor')
expected = Panel({'i1': df1.pct_change(axis=1),
'i2': df2.pct_change(axis=1),
'i3': df3.pct_change(axis=1)})
assert_panel_equal(result, expected)
result = wp.pct_change(axis=2)
assert_panel_equal(result, expected)
# minor, 2
result = wp.pct_change(periods=2, axis='minor')
expected = Panel({'i1': df1.pct_change(periods=2, axis=1),
'i2': df2.pct_change(periods=2, axis=1),
'i3': df3.pct_change(periods=2, axis=1)})
assert_panel_equal(result, expected)
# items, 1
result = wp.pct_change(axis='items')
expected = Panel({'i1': DataFrame({'c1': [np.nan, np.nan, np.nan],
'c2': [np.nan, np.nan, np.nan]}),
'i2': DataFrame({'c1': [1, 0.5, .2],
'c2': [1. / 3, 0.25, 1. / 6]}),
'i3': DataFrame({'c1': [.5, 1. / 3, 1. / 6],
'c2': [.25, .2, 1. / 7]})})
assert_panel_equal(result, expected)
result = wp.pct_change(axis=0)
assert_panel_equal(result, expected)
# items, 2
result = wp.pct_change(periods=2, axis='items')
expected = Panel({'i1': DataFrame({'c1': [np.nan, np.nan, np.nan],
'c2': [np.nan, np.nan, np.nan]}),
'i2': DataFrame({'c1': [np.nan, np.nan, np.nan],
'c2': [np.nan, np.nan, np.nan]}),
'i3': DataFrame({'c1': [2, 1, .4],
'c2': [2. / 3, .5, 1. / 3]})})
assert_panel_equal(result, expected)
def test_round(self):
values = [[[-3.2, 2.2], [0, -4.8213], [3.123, 123.12],
[-1566.213, 88.88], [-12, 94.5]],
[[-5.82, 3.5], [6.21, -73.272], [-9.087, 23.12],
[272.212, -99.99], [23, -76.5]]]
evalues = [[[float(np.around(i)) for i in j] for j in k]
for k in values]
p = Panel(values, items=['Item1', 'Item2'],
major_axis=pd.date_range('1/1/2000', periods=5),
minor_axis=['A', 'B'])
expected = Panel(evalues, items=['Item1', 'Item2'],
major_axis=pd.date_range('1/1/2000', periods=5),
minor_axis=['A', 'B'])
result = p.round()
self.assert_panel_equal(expected, result)
def test_multiindex_get(self):
ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1), ('b', 2)],
names=['first', 'second'])
wp = Panel(np.random.random((4, 5, 5)),
items=ind,
major_axis=np.arange(5),
minor_axis=np.arange(5))
f1 = wp['a']
f2 = wp.ix['a']
assert_panel_equal(f1, f2)
self.assertTrue((f1.items == [1, 2]).all())
self.assertTrue((f2.items == [1, 2]).all())
ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)],
names=['first', 'second'])
def test_multiindex_blocks(self):
ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)],
names=['first', 'second'])
wp = Panel(self.panel._data)
wp.items = ind
f1 = wp['a']
self.assertTrue((f1.items == [1, 2]).all())
f1 = wp[('b', 1)]
self.assertTrue((f1.columns == ['A', 'B', 'C', 'D']).all())
def test_repr_empty(self):
empty = Panel()
repr(empty)
def test_rename(self):
mapper = {'ItemA': 'foo', 'ItemB': 'bar', 'ItemC': 'baz'}
renamed = self.panel.rename_axis(mapper, axis=0)
exp = Index(['foo', 'bar', 'baz'])
self.assertTrue(renamed.items.equals(exp))
renamed = self.panel.rename_axis(str.lower, axis=2)
exp = Index(['a', 'b', 'c', 'd'])
self.assertTrue(renamed.minor_axis.equals(exp))
# don't copy
renamed_nocopy = self.panel.rename_axis(mapper, axis=0, copy=False)
renamed_nocopy['foo'] = 3.
self.assertTrue((self.panel['ItemA'].values == 3).all())
def test_get_attr(self):
assert_frame_equal(self.panel['ItemA'], self.panel.ItemA)
# specific cases from #3440
self.panel['a'] = self.panel['ItemA']
assert_frame_equal(self.panel['a'], self.panel.a)
self.panel['i'] = self.panel['ItemA']
assert_frame_equal(self.panel['i'], self.panel.i)
def test_from_frame_level1_unsorted(self):
tuples = [('MSFT', 3), ('MSFT', 2), ('AAPL', 2), ('AAPL', 1),
('MSFT', 1)]
midx = MultiIndex.from_tuples(tuples)
df = DataFrame(np.random.rand(5, 4), index=midx)
p = df.to_panel()
assert_frame_equal(p.minor_xs(2), df.xs(2, level=1).sort_index())
def test_to_excel(self):
try:
import xlwt # noqa
import xlrd # noqa
import openpyxl # noqa
from pandas.io.excel import ExcelFile
except ImportError:
raise nose.SkipTest("need xlwt xlrd openpyxl")
for ext in ['xls', 'xlsx']:
path = '__tmp__.' + ext
with ensure_clean(path) as path:
self.panel.to_excel(path)
try:
reader = ExcelFile(path)
except ImportError:
raise nose.SkipTest("need xlwt xlrd openpyxl")
for item, df in self.panel.iteritems():
recdf = reader.parse(str(item), index_col=0)
assert_frame_equal(df, recdf)
def test_to_excel_xlsxwriter(self):
try:
import xlrd # noqa
import xlsxwriter # noqa
from pandas.io.excel import ExcelFile
except ImportError:
raise nose.SkipTest("Requires xlrd and xlsxwriter. Skipping test.")
path = '__tmp__.xlsx'
with ensure_clean(path) as path:
self.panel.to_excel(path, engine='xlsxwriter')
try:
reader = ExcelFile(path)
except ImportError as e:
raise nose.SkipTest("cannot write excel file: %s" % e)
for item, df in self.panel.iteritems():
recdf = reader.parse(str(item), index_col=0)
assert_frame_equal(df, recdf)
def test_dropna(self):
p = Panel(np.random.randn(4, 5, 6), major_axis=list('abcde'))
p.ix[:, ['b', 'd'], 0] = np.nan
result = p.dropna(axis=1)
exp = p.ix[:, ['a', 'c', 'e'], :]
|
assert_panel_equal(result, exp)
|
pandas.util.testing.assert_panel_equal
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from __future__ import division
from __future__ import print_function
import os
import pickle
import re
import sys
import copy
import json
import yaml
import redis
import bisect
import shutil
import difflib
import hashlib
import warnings
import datetime
import requests
import tempfile
import importlib
import contextlib
import collections
import numpy as np
import pandas as pd
from pathlib import Path
from typing import Dict, Union, Tuple, Any, Text, Optional, Callable
from types import ModuleType
from urllib.parse import urlparse
from ..config import C
from ..log import get_module_logger, set_log_with_config
log = get_module_logger("utils")
#################### Server ####################
def get_redis_connection():
"""get redis connection instance."""
return redis.StrictRedis(host=C.redis_host, port=C.redis_port, db=C.redis_task_db)
#################### Data ####################
def read_bin(file_path: Union[str, Path], start_index, end_index):
file_path = Path(file_path.expanduser().resolve())
with file_path.open("rb") as f:
# read start_index
ref_start_index = int(np.frombuffer(f.read(4), dtype="<f")[0])
si = max(ref_start_index, start_index)
if si > end_index:
return pd.Series(dtype=np.float32)
# calculate offset
f.seek(4 * (si - ref_start_index) + 4)
# read nbytes
count = end_index - si + 1
data = np.frombuffer(f.read(4 * count), dtype="<f")
series = pd.Series(data, index=pd.RangeIndex(si, si + len(data)))
return series
def np_ffill(arr: np.array):
"""
forward fill a 1D numpy array
Parameters
----------
arr : np.array
Input numpy 1D array
"""
mask = np.isnan(arr.astype(float)) # np.isnan only works on np.float
# get fill index
idx = np.where(~mask, np.arange(mask.shape[0]), 0)
np.maximum.accumulate(idx, out=idx)
return arr[idx]
#################### Search ####################
def lower_bound(data, val, level=0):
"""multi fields list lower bound.
for single field list use `bisect.bisect_left` instead
"""
left = 0
right = len(data)
while left < right:
mid = (left + right) // 2
if val <= data[mid][level]:
right = mid
else:
left = mid + 1
return left
def upper_bound(data, val, level=0):
"""multi fields list upper bound.
for single field list use `bisect.bisect_right` instead
"""
left = 0
right = len(data)
while left < right:
mid = (left + right) // 2
if val >= data[mid][level]:
left = mid + 1
else:
right = mid
return left
#################### HTTP ####################
def requests_with_retry(url, retry=5, **kwargs):
while retry > 0:
retry -= 1
try:
res = requests.get(url, timeout=1, **kwargs)
assert res.status_code in {200, 206}
return res
except AssertionError:
continue
except Exception as e:
log.warning("exception encountered {}".format(e))
continue
raise Exception("ERROR: requests failed!")
#################### Parse ####################
def parse_config(config):
# Check whether need parse, all object except str do not need to be parsed
if not isinstance(config, str):
return config
# Check whether config is file
if os.path.exists(config):
with open(config, "r") as f:
return yaml.safe_load(f)
# Check whether the str can be parsed
try:
return yaml.safe_load(config)
except BaseException:
raise ValueError("cannot parse config!")
#################### Other ####################
def drop_nan_by_y_index(x, y, weight=None):
# x, y, weight: DataFrame
# Find index of rows which do not contain Nan in all columns from y.
mask = ~y.isna().any(axis=1)
# Get related rows from x, y, weight.
x = x[mask]
y = y[mask]
if weight is not None:
weight = weight[mask]
return x, y, weight
def hash_args(*args):
# json.dumps will keep the dict keys always sorted.
string = json.dumps(args, sort_keys=True, default=str) # frozenset
return hashlib.md5(string.encode()).hexdigest()
def parse_field(field):
# Following patterns will be matched:
# - $close -> Feature("close")
# - $close5 -> Feature("close5")
# - $open+$close -> Feature("open")+Feature("close")
if not isinstance(field, str):
field = str(field)
return re.sub(r"\$(\w+)", r'Feature("\1")', re.sub(r"(\w+\s*)\(", r"Operators.\1(", field))
def get_module_by_module_path(module_path: Union[str, ModuleType]):
"""Load module path
:param module_path:
:return:
"""
if isinstance(module_path, ModuleType):
module = module_path
else:
if module_path.endswith(".py"):
module_name = re.sub("^[^a-zA-Z_]+", "", re.sub("[^0-9a-zA-Z_]", "", module_path[:-3].replace("/", "_")))
module_spec = importlib.util.spec_from_file_location(module_name, module_path)
module = importlib.util.module_from_spec(module_spec)
sys.modules[module_name] = module
module_spec.loader.exec_module(module)
else:
module = importlib.import_module(module_path)
return module
def get_callable_kwargs(config: Union[dict, str], default_module: Union[str, ModuleType] = None) -> (type, dict):
"""
extract class/func and kwargs from config info
Parameters
----------
config : [dict, str]
similar to config
please refer to the doc of init_instance_by_config
default_module : Python module or str
It should be a python module to load the class type
This function will load class from the config['module_path'] first.
If config['module_path'] doesn't exists, it will load the class from default_module.
Returns
-------
(type, dict):
the class/func object and it's arguments.
"""
if isinstance(config, dict):
if isinstance(config["class"], str):
module = get_module_by_module_path(config.get("module_path", default_module))
# raise AttributeError
_callable = getattr(module, config["class" if "class" in config else "func"])
else:
_callable = config["class"] # the class type itself is passed in
kwargs = config.get("kwargs", {})
elif isinstance(config, str):
# a.b.c.ClassName
*m_path, cls = config.split(".")
m_path = ".".join(m_path)
module = get_module_by_module_path(default_module if m_path == "" else m_path)
_callable = getattr(module, cls)
kwargs = {}
else:
raise NotImplementedError(f"This type of input is not supported")
return _callable, kwargs
get_cls_kwargs = get_callable_kwargs # NOTE: this is for compatibility for the previous version
def init_instance_by_config(
config: Union[str, dict, object],
default_module=None,
accept_types: Union[type, Tuple[type]] = (),
try_kwargs: Dict = {},
**kwargs,
) -> Any:
"""
get initialized instance with config
Parameters
----------
config : Union[str, dict, object]
dict example.
case 1)
{
'class': 'ClassName',
'kwargs': dict, # It is optional. {} will be used if not given
'model_path': path, # It is optional if module is given
}
case 2)
{
'class': <The class it self>,
'kwargs': dict, # It is optional. {} will be used if not given
}
str example.
1) specify a pickle object
- path like 'file:///<path to pickle file>/obj.pkl'
2) specify a class name
- "ClassName": getattr(module, "ClassName")() will be used.
3) specify module path with class name
- "a.b.c.ClassName" getattr(<a.b.c.module>, "ClassName")() will be used.
object example:
instance of accept_types
default_module : Python module
Optional. It should be a python module.
NOTE: the "module_path" will be override by `module` arguments
This function will load class from the config['module_path'] first.
If config['module_path'] doesn't exists, it will load the class from default_module.
accept_types: Union[type, Tuple[type]]
Optional. If the config is a instance of specific type, return the config directly.
This will be passed into the second parameter of isinstance.
try_kwargs: Dict
Try to pass in kwargs in `try_kwargs` when initialized the instance
If error occurred, it will fail back to initialization without try_kwargs.
Returns
-------
object:
An initialized object based on the config info
"""
if isinstance(config, accept_types):
return config
if isinstance(config, str):
# path like 'file:///<path to pickle file>/obj.pkl'
pr = urlparse(config)
if pr.scheme == "file":
with open(os.path.join(pr.netloc, pr.path), "rb") as f:
return pickle.load(f)
klass, cls_kwargs = get_callable_kwargs(config, default_module=default_module)
try:
return klass(**cls_kwargs, **try_kwargs, **kwargs)
except (TypeError,):
# TypeError for handling errors like
# 1: `XXX() got multiple values for keyword argument 'YYY'`
# 2: `XXX() got an unexpected keyword argument 'YYY'
return klass(**cls_kwargs, **kwargs)
@contextlib.contextmanager
def class_casting(obj: object, cls: type):
"""
Python doesn't provide the downcasting mechanism.
We use the trick here to downcast the class
Parameters
----------
obj : object
the object to be cast
cls : type
the target class type
"""
orig_cls = obj.__class__
obj.__class__ = cls
yield
obj.__class__ = orig_cls
def compare_dict_value(src_data: dict, dst_data: dict):
"""Compare dict value
:param src_data:
:param dst_data:
:return:
"""
class DateEncoder(json.JSONEncoder):
# FIXME: This class can only be accurate to the day. If it is a minute,
# there may be a bug
def default(self, o):
if isinstance(o, (datetime.datetime, datetime.date)):
return o.strftime("%Y-%m-%d %H:%M:%S")
return json.JSONEncoder.default(self, o)
src_data = json.dumps(src_data, indent=4, sort_keys=True, cls=DateEncoder)
dst_data = json.dumps(dst_data, indent=4, sort_keys=True, cls=DateEncoder)
diff = difflib.ndiff(src_data, dst_data)
changes = [line for line in diff if line.startswith("+ ") or line.startswith("- ")]
return changes
def get_or_create_path(path: Optional[Text] = None, return_dir: bool = False):
"""Create or get a file or directory given the path and return_dir.
Parameters
----------
path: a string indicates the path or None indicates creating a temporary path.
return_dir: if True, create and return a directory; otherwise c&r a file.
"""
if path:
if return_dir and not os.path.exists(path):
os.makedirs(path)
elif not return_dir: # return a file, thus we need to create its parent directory
xpath = os.path.abspath(os.path.join(path, ".."))
if not os.path.exists(xpath):
os.makedirs(xpath)
else:
temp_dir = os.path.expanduser("~/tmp")
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
if return_dir:
_, path = tempfile.mkdtemp(dir=temp_dir)
else:
_, path = tempfile.mkstemp(dir=temp_dir)
return path
@contextlib.contextmanager
def save_multiple_parts_file(filename, format="gztar"):
"""Save multiple parts file
Implementation process:
1. get the absolute path to 'filename'
2. create a 'filename' directory
3. user does something with file_path('filename/')
4. remove 'filename' directory
5. make_archive 'filename' directory, and rename 'archive file' to filename
:param filename: result model path
:param format: archive format: one of "zip", "tar", "gztar", "bztar", or "xztar"
:return: real model path
Usage::
>>> # The following code will create an archive file('~/tmp/test_file') containing 'test_doc_i'(i is 0-10) files.
>>> with save_multiple_parts_file('~/tmp/test_file') as filename_dir:
... for i in range(10):
... temp_path = os.path.join(filename_dir, 'test_doc_{}'.format(str(i)))
... with open(temp_path) as fp:
... fp.write(str(i))
...
"""
if filename.startswith("~"):
filename = os.path.expanduser(filename)
file_path = os.path.abspath(filename)
# Create model dir
if os.path.exists(file_path):
raise FileExistsError("ERROR: file exists: {}, cannot be create the directory.".format(file_path))
os.makedirs(file_path)
# return model dir
yield file_path
# filename dir to filename.tar.gz file
tar_file = shutil.make_archive(file_path, format=format, root_dir=file_path)
# Remove filename dir
if os.path.exists(file_path):
shutil.rmtree(file_path)
# filename.tar.gz rename to filename
os.rename(tar_file, file_path)
@contextlib.contextmanager
def unpack_archive_with_buffer(buffer, format="gztar"):
"""Unpack archive with archive buffer
After the call is finished, the archive file and directory will be deleted.
Implementation process:
1. create 'tempfile' in '~/tmp/' and directory
2. 'buffer' write to 'tempfile'
3. unpack archive file('tempfile')
4. user does something with file_path('tempfile/')
5. remove 'tempfile' and 'tempfile directory'
:param buffer: bytes
:param format: archive format: one of "zip", "tar", "gztar", "bztar", or "xztar"
:return: unpack archive directory path
Usage::
>>> # The following code is to print all the file names in 'test_unpack.tar.gz'
>>> with open('test_unpack.tar.gz') as fp:
... buffer = fp.read()
...
>>> with unpack_archive_with_buffer(buffer) as temp_dir:
... for f_n in os.listdir(temp_dir):
... print(f_n)
...
"""
temp_dir = os.path.expanduser("~/tmp")
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
with tempfile.NamedTemporaryFile("wb", delete=False, dir=temp_dir) as fp:
fp.write(buffer)
file_path = fp.name
try:
tar_file = file_path + ".tar.gz"
os.rename(file_path, tar_file)
# Create dir
os.makedirs(file_path)
shutil.unpack_archive(tar_file, format=format, extract_dir=file_path)
# Return temp dir
yield file_path
except Exception as e:
log.error(str(e))
finally:
# Remove temp tar file
if os.path.exists(tar_file):
os.unlink(tar_file)
# Remove temp model dir
if os.path.exists(file_path):
shutil.rmtree(file_path)
@contextlib.contextmanager
def get_tmp_file_with_buffer(buffer):
temp_dir = os.path.expanduser("~/tmp")
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
with tempfile.NamedTemporaryFile("wb", delete=True, dir=temp_dir) as fp:
fp.write(buffer)
file_path = fp.name
yield file_path
def remove_repeat_field(fields):
"""remove repeat field
:param fields: list; features fields
:return: list
"""
fields = copy.deepcopy(fields)
_fields = set(fields)
return sorted(_fields, key=fields.index)
def remove_fields_space(fields: [list, str, tuple]):
"""remove fields space
:param fields: features fields
:return: list or str
"""
if isinstance(fields, str):
return fields.replace(" ", "")
return [i.replace(" ", "") for i in fields if isinstance(i, str)]
def normalize_cache_fields(fields: [list, tuple]):
"""normalize cache fields
:param fields: features fields
:return: list
"""
return sorted(remove_repeat_field(remove_fields_space(fields)))
def normalize_cache_instruments(instruments):
"""normalize cache instruments
:return: list or dict
"""
if isinstance(instruments, (list, tuple, pd.Index, np.ndarray)):
instruments = sorted(list(instruments))
else:
# dict type stockpool
if "market" in instruments:
pass
else:
instruments = {k: sorted(v) for k, v in instruments.items()}
return instruments
def is_tradable_date(cur_date):
"""judgy whether date is a tradable date
----------
date : pandas.Timestamp
current date
"""
from ..data import D
return str(cur_date.date()) == str(D.calendar(start_time=cur_date, future=True)[0].date())
def get_date_range(trading_date, left_shift=0, right_shift=0, future=False):
"""get trading date range by shift
Parameters
----------
trading_date: pd.Timestamp
left_shift: int
right_shift: int
future: bool
"""
from ..data import D
start = get_date_by_shift(trading_date, left_shift, future=future)
end = get_date_by_shift(trading_date, right_shift, future=future)
calendar = D.calendar(start, end, future=future)
return calendar
def get_date_by_shift(trading_date, shift, future=False, clip_shift=True, freq="day", align: Optional[str] = None):
"""get trading date with shift bias will cur_date
e.g. : shift == 1, return next trading date
shift == -1, return previous trading date
----------
trading_date : pandas.Timestamp
current date
shift : int
clip_shift: bool
align : Optional[str]
When align is None, this function will raise ValueError if `trading_date` is not a trading date
when align is "left"/"right", it will try to align to left/right nearest trading date before shifting when `trading_date` is not a trading date
"""
from qlib.data import D
cal = D.calendar(future=future, freq=freq)
trading_date = pd.to_datetime(trading_date)
if align is None:
if trading_date not in list(cal):
raise ValueError("{} is not trading day!".format(str(trading_date)))
_index = bisect.bisect_left(cal, trading_date)
elif align == "left":
_index = bisect.bisect_right(cal, trading_date) - 1
elif align == "right":
_index = bisect.bisect_left(cal, trading_date)
else:
raise ValueError(f"align with value `{align}` is not supported")
shift_index = _index + shift
if shift_index < 0 or shift_index >= len(cal):
if clip_shift:
shift_index = np.clip(shift_index, 0, len(cal) - 1)
else:
raise IndexError(f"The shift_index({shift_index}) of the trading day ({trading_date}) is out of range")
return cal[shift_index]
def get_next_trading_date(trading_date, future=False):
"""get next trading date
----------
cur_date : pandas.Timestamp
current date
"""
return get_date_by_shift(trading_date, 1, future=future)
def get_pre_trading_date(trading_date, future=False):
"""get previous trading date
----------
date : pandas.Timestamp
current date
"""
return get_date_by_shift(trading_date, -1, future=future)
def transform_end_date(end_date=None, freq="day"):
"""handle the end date with various format
If end_date is -1, None, or end_date is greater than the maximum trading day, the last trading date is returned.
Otherwise, returns the end_date
----------
end_date: str
end trading date
date : pandas.Timestamp
current date
"""
from ..data import D
last_date = D.calendar(freq=freq)[-1]
if end_date is None or (str(end_date) == "-1") or (pd.Timestamp(last_date) <
|
pd.Timestamp(end_date)
|
pandas.Timestamp
|
import pandas as pd
import numpy as np
def convert_minutes_to_seconds(time_minutes):
"""
Convert time expressed in (float) minutes into (float) seconds.
:param float time_minutes: Time expressed in minutes.
:return: Time expressed in seconds.
:rtype: float
"""
time_seconds = time_minutes * 60
return time_seconds
def transform_llimllib_boston_data(df, year):
"""
Transform 2013-2104 Boston Marathon data from llimllib's Github repo into a standard form for downstream processing.
Namely, split times are converted to integer seconds.
:param pandas.DataFrame df: DataFrame representing 2013-2014 Boston Marathon data from llimllib's Github repo.
:param int year: Year of Boston Marathon
:return: DataFrame of transformed marathon data
:rtype: pandas.DataFrame
"""
# Header names for split time field in llimllib marathon data
headers_split = ['5k', '10k', '20k', 'half', '25k', '30k', '35k', '40k', 'official']
# Replace nan placeholders with actual nan values
for header in headers_split:
df[header].replace('-', np.nan, inplace=True)
# Cast split times to float
dtypes_new = dict(zip(headers_split, [float] * len(headers_split)))
df = df.astype(dtypes_new)
# Convert split time from decimal minutes to seconds
for header in headers_split + ['pace']:
df[header] = df[header].apply(convert_minutes_to_seconds)
# Add year field
df['year'] = year
# Add empty columns for 15k split time and gender_place rank
df['15k'] = np.nan
df['gender_place'] = np.nan
df = df.rename(columns={'official': 'official_time', 'ctz': 'citizen'})
return df
# at least one row in 2015 had an incomprehensible official finish time: 0.124548611111111
# I believe there was only one, but the try/catch below should handle
# missing values, placeholder '-', and bad values
def convert_string_to_seconds(time_str):
"""
Convert time in a string format 'HH:MM:SS' into (int) seconds.
:param str time_str: Time in a string format 'HH:MM:SS'
:return: Time expressed in seconds
:rtype: int
"""
try:
hours_str, minutes_str, seconds_str = time_str.split(':')
return int(hours_str) * 3600 + int(minutes_str) * 60 + int(seconds_str)
except:
return np.nan
def transform_rojour_boston_data(df, year):
"""
Transform 2015-2107 Boston Marathon data from rojour's Github repo into a standard form for downstream processing.
Namely, split times are converted to integer seconds.
:param pandas.DataFrame df: DataFrame representing 2015-2017 Boston Marathon data from rojour's Github repo.
:param int year: Year of Boston Marathon
:return: DataFrame of transformed marathon data
:rtype: pandas.DataFrame
"""
# Drop unnecessary columns
if year == 2016:
df.drop('Proj Time', axis=1)
else:
df.drop([df.columns[0], 'Proj Time'], axis=1)
# The split times in the Kaggle data are formatted as strings hh:mm:ss. We want these times in total seconds.
headers_split = ['5K', '10K', '15K', '20K', 'Half', '25K', '30K', '35K', '40K', 'Official Time']
for header in headers_split:
df[header] = df[header].apply(convert_string_to_seconds)
if year == 2015:
df.dropna(subset=['Official Time'])
# Create a year field and an empty field for 'genderdiv'
df['year'] = year
df['genderdiv'] = np.nan
# Map of field names to rename headers of df to ensure consistency with transform_llimllib_boston_data
headers_map = {'Age': 'age', 'Official Time': 'official_time', 'Bib': 'bib', 'Citizen': 'citizen',
'Overall': 'overall', 'Pace': 'pace', 'State': 'state', 'Country': 'country', 'City': 'city',
'Name': 'name', 'Division': 'division', 'M/F': 'gender', '5K': '5k', '10K': '10k', '15K': '15k',
'20K': '20k', 'Half': 'half', '25K': '25k', '30K': '30k', '35K': '35k', '40K': '40k',
'Gender': 'gender_place'}
# The rojour data has an unnamed field that varies depending on the year.
# We can't drop this field since it's used later to remove certain records.
if year == 2016:
headers_map.update({'Unnamed: 8': 'para_status'})
else:
headers_map.update({'Unnamed: 9': 'para_status'})
df = df.rename(columns=headers_map)
# Drop all runners with a 'para' status and then drop the para_status field
df = df[df.para_status != 'MI']
df = df[df.para_status != 'VI']
df = df.drop('para_status', axis=1)
return df
def band_age(age):
"""
Banding method that maps a Boston Marathon runner's (integer) age to a labeled age band and level.
**Note**: The age brackets on the BAA website are as follows:
* 14-19*, 20-24, 25-29, 30-34, 35-39, 40-44, 45-49, 50-54, 55-59, 60-64, 65-70, 70-74, 75-79, and 80
This places 70 into two brackets. We have assumed this is a typo and use the bands '65-69' and '70-74'.
We have also ignored the minimum age in case it has not been the same in every year
:param int age: Age of Boston Marathon runner
:return: (banded_level, age_banding) where: banded_level is banded level of age for Boston Marathon runner and
age_banding is banding of age for Boston Marathon runner in 5 year increments
:rtype: (int, str)
"""
if age <= 19:
bid = 1
bstr = '<= 19'
elif age <= 24:
bid = 2
bstr = '20-24'
elif age <= 29:
bid = 3
bstr = '25-29'
elif age <= 34:
bid = 4
bstr = '30-34'
elif age <= 39:
bid = 5
bstr = '35-39'
elif age <= 44:
bid = 6
bstr = '40-44'
elif age <= 49:
bid = 7
bstr = '45-49'
elif age <= 54:
bid = 8
bstr = '50-54'
elif age <= 59:
bid = 9
bstr = '55-59'
elif age <= 64:
bid = 10
bstr = '60-64'
elif age <= 69:
bid = 11
bstr = '65-69'
elif age <= 74:
bid = 12
bstr = '70-74'
elif age <= 79:
bid = 13
bstr = '75-79'
else:
bid = 14
bstr = '80+'
return bid, bstr
def append_age_banding(df):
"""
Method that appends a banding of the age field, which is consistent with the method `band_age`.
**Note**: This method assumes that the DataFrame `df` include the (int) field named 'age', which is
:param pandas.DataFrame df: DataFrame of transformed marathon data
:return: DataFrame of transformed marathon data that includes a banding of age consistent with method `band_age`
:rtype: pandas.DataFrame
"""
return pd.concat((
df,
df['age'].apply(lambda cell: pd.Series(band_age(cell), index=['age_bucket', 'age_range']))
), axis=1)
def combine_boston_data(list_dfs):
"""
Method that takes the union of a list of DataFrames each representing different years of Boston Marathon data. The
field named 'age' is also used to append a banding for runners' age.
:param list[pandas.DataFrame] list_dfs: List of DataFrames containing transformed marathon data
:return: DataFrame of transformed and unioned marathon data that includes a banding of age consistent with method
`band_age`
:rtype: pandas.DataFrame
"""
df_combine = pd.concat(list_dfs, sort=True)
df_combine = append_age_banding(df_combine)
df_combine.drop(['pace', 'Proj Time', 'Unnamed: 0'], axis=1, inplace=True)
return df_combine
def pipe_reader(input_file):
"""
Read datasets without pandas read_csv when we have a pipe delimiter dataset
with commas inside columns
:param str input_file: File path
:return: The pipe delimited file as a DataFrame
:rtype: pandas.DataFrame
"""
with open(input_file, 'r') as f:
temp_file = f.read()
temp_file = temp_file.split('\n')
lis = []
for row in temp_file:
row = row.split('|')
if len(row) == 20:
lis.append(row)
temp_df = pd.DataFrame(lis, columns=lis[0])
temp_df = temp_df.drop(0, axis=0)
return temp_df
def process_boston_data():
"""
Method to import, transform, and combine Boston Marathon data.
:return: DataFrame of transformed and combined Boston Marathon data.
:rtype: pandas.DataFrame
"""
# Read in data
llimllib_boston_results_2013 = pd.read_csv('dashathon/data/external_data/llimllib_boston_results_2013.csv',
delimiter=',')
llimllib_boston_results_2014 = pd.read_csv('dashathon/data/external_data/llimllib_boston_results_2014.csv',
delimiter=',')
rojour_boston_results_2015 = pd.read_csv('dashathon/data/external_data/rojour_boston_results_2015.csv',
delimiter=',')
rojour_boston_results_2016 = pd.read_csv('dashathon/data/external_data/rojour_boston_results_2016.csv',
delimiter=',')
rojour_boston_results_2017 = pd.read_csv('dashathon/data/external_data/rojour_boston_results_2017.csv',
delimiter=',')
# Transform data
boston_results_2013 = transform_llimllib_boston_data(df=llimllib_boston_results_2013, year=2013)
boston_results_2014 = transform_llimllib_boston_data(df=llimllib_boston_results_2014, year=2014)
boston_results_2015 = transform_rojour_boston_data(df=rojour_boston_results_2015, year=2015)
boston_results_2016 = transform_rojour_boston_data(df=rojour_boston_results_2016, year=2016)
boston_results_2017 = transform_rojour_boston_data(df=rojour_boston_results_2017, year=2017)
# Combine Boston data
boston_results = combine_boston_data(list_dfs=[boston_results_2013, boston_results_2014, boston_results_2015,
boston_results_2016, boston_results_2017])
# Append host city to distinguish among other marathon results
boston_results['host_city'] = 'Boston'
# Removing gender 'W' from bib in boston base
boston_results.bib = boston_results.bib.str.replace('W', '')
return boston_results
def process_nyc_data():
"""
Method to import, transform, and combine NYC Marathon data.
:return: DataFrame of transformed and combine NYC Marathon data.
:rtype: pandas.DataFrame
"""
andreanr_nyc_results_2015 = pd.read_csv('dashathon/data/external_data/andreanr_nyc_results_2015.csv')
andreanr_nyc_results_2016 = pd.read_csv('dashathon/data/external_data/andreanr_nyc_results_2016.csv')
andreanr_nyc_results_2017 = pd.read_csv('dashathon/data/external_data/andreanr_nyc_results_2017.csv')
andreanr_nyc_results_2018 = pd.read_csv('dashathon/data/external_data/andreanr_nyc_results_2018.csv')
# Merging all nyc datasets first
andreanr_nyc_results = andreanr_nyc_results_2015.append([andreanr_nyc_results_2016, andreanr_nyc_results_2017,
andreanr_nyc_results_2018], ignore_index=True)
# Removing records with missing split times
headers_nyc_splits = ['splint_10k', 'splint_15k', 'splint_20k', 'splint_25k', 'splint_30k', 'splint_35k',
'splint_40k', 'splint_5k', 'splint_half', 'official_time']
andreanr_nyc_results = andreanr_nyc_results.dropna(subset=headers_nyc_splits + ['age'])
# Consistent age
andreanr_nyc_results.age = andreanr_nyc_results.age.astype('int64')
# Converting HH:MM:SS to seconds
for header in headers_nyc_splits:
andreanr_nyc_results[header] = andreanr_nyc_results[header].apply(convert_string_to_seconds)
# Assuming na values for absent columns in nyc data
andreanr_nyc_results['citizen'] = None
andreanr_nyc_results['division'] = None
andreanr_nyc_results['genderdiv'] = None
andreanr_nyc_results['age_bucket'] = None
andreanr_nyc_results['age_range'] = None
# Extracting and renaming relevant columns
andreanr_nyc_results = andreanr_nyc_results[['splint_10k', 'splint_15k', 'splint_20k', 'splint_25k', 'splint_30k',
'splint_35k', 'splint_40k', 'splint_5k', 'age', 'bib', 'citizen',
'city', 'country', 'division', 'gender', 'place_gender', 'genderdiv',
'splint_half', 'name', 'official_time', 'place_overall',
'pace_per_mile', 'state', 'year', 'age_range', 'age_bucket']]
# Pace_per_mile of NYC data is assumed to be same as pace of Boston data
andreanr_nyc_results = andreanr_nyc_results.rename(columns={"splint_10k": "10k", "splint_15k": "15k",
"splint_20k": "20k", "splint_25k": "25k",
"splint_30k": "30k", "splint_35k": "35k",
"splint_40k": "40k", "splint_5k": "5k",
"place_gender": "gender_place", "splint_half": "half",
"place_overall": "overall", "pace_per_mile": "pace"})
# Adding host city
andreanr_nyc_results['host_city'] = 'NYC'
return andreanr_nyc_results
def process_chicago_data():
"""
Method to import, transform, and combine Chicago Marathon data.
:return: DataFrame of transformed and combined Chicago Marathon data.
:rtype: pandas.DataFrame
"""
chi14m = pipe_reader('dashathon/data/scraped_data/chicago_marathon_2014_M.csv')
chi14w = pipe_reader('dashathon/data/scraped_data/chicago_marathon_2014_W.csv')
chi15m = pipe_reader('dashathon/data/scraped_data/chicago_marathon_2015_M.csv')
chi15w = pipe_reader('dashathon/data/scraped_data/chicago_marathon_2015_W.csv')
chi16m = pipe_reader('dashathon/data/scraped_data/chicago_marathon_2016_M.csv')
chi16w = pipe_reader('dashathon/data/scraped_data/chicago_marathon_2016_W.csv')
chi17m = pipe_reader('dashathon/data/scraped_data/chicago_marathon_2017_M.csv')
chi17w = pipe_reader('dashathon/data/scraped_data/chicago_marathon_2017_W.csv')
# Merging all chicago datasets first
chicago_results = chi14m.append([chi14w, chi15m, chi15w, chi16m, chi16w, chi17m, chi17w], ignore_index=True)
# Bringing around required datatypes
chicago_results[['year', 'bib', 'rank_gender', 'rank_age_group', 'overall']] = chicago_results[
['year', 'bib', 'rank_gender', 'rank_age_group', 'overall']].astype('int64')
chicago_results[['5k', '10k', '15k', '20k', 'half', '25k', '30k', '35k', '40k', 'finish']] = chicago_results[
['5k', '10k', '15k', '20k', 'half', '25k', '30k', '35k', '40k', 'finish']].astype('float64')
chicago_results['age'] = np.nan
chicago_results['age_bucket'] = None
chicago_results['citizen'] = None
chicago_results['division'] = None
chicago_results['genderdiv'] = None
chicago_results['name'] = None
chicago_results['official_time'] = None
chicago_results['pace'] = None
chicago_results['host_city'] = 'Chicago'
chicago_results = chicago_results.rename(columns={'age_group': 'age_range', 'rank_gender': 'gender_place'})
chicago_results = chicago_results.drop(['rank_age_group', 'finish'], axis=1)
return chicago_results
def process_london_data():
"""
Method to import, transform, and combine London Marathon data.
:return: DataFrame of transformed and combined London Marathon data.
:rtype: pandas.DataFrame
"""
# Reading in the datasets
lon14m = pd.read_csv('dashathon/data/scraped_data/london_marathon_2014_M.csv',
sep='|', usecols=['year', 'bib', 'age_group', 'gender', 'country', 'overall', 'rank_gender',
'rank_age_group', '5k', '10k', '15k', '20k', 'half', '25k', '30k', '35k',
'40k',
'finish'])
lon14me = pd.read_csv('dashathon/data/scraped_data/london_marathon_2014_M_elite.csv', sep='|',
usecols=['year', 'bib', 'age_group', 'gender', 'country', 'overall', 'rank_gender',
'rank_age_group', '5k', '10k', '15k', '20k', 'half', '25k', '30k', '35k', '40k',
'finish'])
lon14w = pd.read_csv('dashathon/data/scraped_data/london_marathon_2014_W.csv', sep='|',
usecols=['year', 'bib', 'age_group', 'gender', 'country', 'overall', 'rank_gender',
'rank_age_group', '5k', '10k', '15k', '20k', 'half', '25k', '30k', '35k', '40k',
'finish'])
lon14we = pd.read_csv('dashathon/data/scraped_data/london_marathon_2014_W_elite.csv', sep='|',
usecols=['year', 'bib', 'age_group', 'gender', 'country', 'overall', 'rank_gender',
'rank_age_group', '5k', '10k', '15k', '20k', 'half', '25k', '30k', '35k', '40k',
'finish'])
lon15m = pd.read_csv('dashathon/data/scraped_data/london_marathon_2015_M.csv', sep='|',
usecols=['year', 'bib', 'age_group', 'gender', 'country', 'overall', 'rank_gender',
'rank_age_group', '5k', '10k', '15k', '20k', 'half', '25k', '30k', '35k', '40k',
'finish'])
lon15me = pd.read_csv('dashathon/data/scraped_data/london_marathon_2015_M_elite.csv', sep='|',
usecols=['year', 'bib', 'age_group', 'gender', 'country', 'overall', 'rank_gender',
'rank_age_group', '5k', '10k', '15k', '20k', 'half', '25k', '30k', '35k', '40k',
'finish'])
lon15w = pd.read_csv('dashathon/data/scraped_data/london_marathon_2015_W.csv', sep='|',
usecols=['year', 'bib', 'age_group', 'gender', 'country', 'overall', 'rank_gender',
'rank_age_group', '5k', '10k', '15k', '20k', 'half', '25k', '30k', '35k', '40k',
'finish'])
lon15we = pd.read_csv('dashathon/data/scraped_data/london_marathon_2015_W_elite.csv', sep='|',
usecols=['year', 'bib', 'age_group', 'gender', 'country', 'overall', 'rank_gender',
'rank_age_group', '5k', '10k', '15k', '20k', 'half', '25k', '30k', '35k', '40k',
'finish'])
lon16m = pd.read_csv('dashathon/data/scraped_data/london_marathon_2016_M.csv', sep='|',
usecols=['year', 'bib', 'age_group', 'gender', 'country', 'overall', 'rank_gender',
'rank_age_group', '5k', '10k', '15k', '20k', 'half', '25k', '30k', '35k', '40k',
'finish'])
lon16me = pd.read_csv('dashathon/data/scraped_data/london_marathon_2016_M_elite.csv', sep='|',
usecols=['year', 'bib', 'age_group', 'gender', 'country', 'overall', 'rank_gender',
'rank_age_group', '5k', '10k', '15k', '20k', 'half', '25k', '30k', '35k', '40k',
'finish'])
lon16w = pd.read_csv('dashathon/data/scraped_data/london_marathon_2016_W.csv', sep='|',
usecols=['year', 'bib', 'age_group', 'gender', 'country', 'overall', 'rank_gender',
'rank_age_group', '5k', '10k', '15k', '20k', 'half', '25k', '30k', '35k', '40k',
'finish'])
lon16we = pd.read_csv('dashathon/data/scraped_data/london_marathon_2016_W_elite.csv', sep='|',
usecols=['year', 'bib', 'age_group', 'gender', 'country', 'overall', 'rank_gender',
'rank_age_group', '5k', '10k', '15k', '20k', 'half', '25k', '30k', '35k', '40k',
'finish'])
lon17m = pd.read_csv('dashathon/data/scraped_data/london_marathon_2017_M.csv', sep='|',
usecols=['year', 'bib', 'age_group', 'gender', 'country', 'overall', 'rank_gender',
'rank_age_group', '5k', '10k', '15k', '20k', 'half', '25k', '30k', '35k', '40k',
'finish'])
lon17me = pd.read_csv('dashathon/data/scraped_data/london_marathon_2017_M_elite.csv', sep='|',
usecols=['year', 'bib', 'age_group', 'gender', 'country', 'overall', 'rank_gender',
'rank_age_group', '5k', '10k', '15k', '20k', 'half', '25k', '30k', '35k', '40k',
'finish'])
lon17w = pd.read_csv('dashathon/data/scraped_data/london_marathon_2017_W.csv', sep='|',
usecols=['year', 'bib', 'age_group', 'gender', 'country', 'overall', 'rank_gender',
'rank_age_group', '5k', '10k', '15k', '20k', 'half', '25k', '30k', '35k', '40k',
'finish'])
lon17we = pd.read_csv('dashathon/data/scraped_data/london_marathon_2017_W_elite.csv', sep='|',
usecols=['year', 'bib', 'age_group', 'gender', 'country', 'overall', 'rank_gender',
'rank_age_group', '5k', '10k', '15k', '20k', 'half', '25k', '30k', '35k', '40k',
'finish'])
london_results = lon14m.append([lon14me, lon14w, lon14we, lon15m, lon15me, lon15w, lon15we, lon16m, lon16me, lon16w,
lon16we, lon17m, lon17me, lon17w, lon17we], ignore_index=True)
london_results['city'] = None
london_results['state'] = None
london_results['host_city'] = 'London'
return london_results
def process_berlin_data():
"""
Method to import, transform, and combine Berlin Marathon data.
:return: DataFrame of transformed and combined Berlin Marathon data.
:rtype: pandas.DataFrame
"""
ber14m = pd.read_csv('dashathon/data/scraped_data/london_marathon_2014_M.csv', sep='|',
usecols=['year', 'bib', 'age_group', 'gender', 'country', 'overall', 'rank_gender',
'rank_age_group', '5k', '10k', '15k', '20k', 'half', '25k', '30k', '35k', '40k',
'finish'])
ber14w = pd.read_csv('dashathon/data/scraped_data/london_marathon_2014_M_elite.csv', sep='|',
usecols=['year', 'bib', 'age_group', 'gender', 'country', 'overall', 'rank_gender',
'rank_age_group', '5k', '10k', '15k', '20k', 'half', '25k', '30k', '35k', '40k',
'finish'])
ber15m = pd.read_csv('dashathon/data/scraped_data/london_marathon_2014_W.csv', sep='|',
usecols=['year', 'bib', 'age_group', 'gender', 'country', 'overall', 'rank_gender',
'rank_age_group', '5k', '10k', '15k', '20k', 'half', '25k', '30k', '35k', '40k',
'finish'])
ber15w = pd.read_csv('dashathon/data/scraped_data/london_marathon_2014_W_elite.csv', sep='|',
usecols=['year', 'bib', 'age_group', 'gender', 'country', 'overall', 'rank_gender',
'rank_age_group', '5k', '10k', '15k', '20k', 'half', '25k', '30k', '35k', '40k',
'finish'])
ber16m = pd.read_csv('dashathon/data/scraped_data/london_marathon_2015_M.csv', sep='|',
usecols=['year', 'bib', 'age_group', 'gender', 'country', 'overall', 'rank_gender',
'rank_age_group', '5k', '10k', '15k', '20k', 'half', '25k', '30k', '35k', '40k',
'finish'])
ber16w = pd.read_csv('dashathon/data/scraped_data/london_marathon_2015_M_elite.csv', sep='|',
usecols=['year', 'bib', 'age_group', 'gender', 'country', 'overall', 'rank_gender',
'rank_age_group', '5k', '10k', '15k', '20k', 'half', '25k', '30k', '35k', '40k',
'finish'])
ber17m = pd.read_csv('dashathon/data/scraped_data/london_marathon_2015_W.csv', sep='|',
usecols=['year', 'bib', 'age_group', 'gender', 'country', 'overall', 'rank_gender',
'rank_age_group', '5k', '10k', '15k', '20k', 'half', '25k', '30k', '35k', '40k',
'finish'])
ber17w = pd.read_csv('dashathon/data/scraped_data/london_marathon_2015_W_elite.csv', sep='|',
usecols=['year', 'bib', 'age_group', 'gender', 'country', 'overall', 'rank_gender',
'rank_age_group', '5k', '10k', '15k', '20k', 'half', '25k', '30k', '35k', '40k',
'finish'])
berlin_results = ber14m.append([ber14w, ber15m, ber15w, ber16m, ber16w, ber17m, ber17w], ignore_index=True)
berlin_results['city'] = None
berlin_results['state'] = None
berlin_results['host_city'] = 'Berlin'
return berlin_results
def process_all_data():
"""
Method to import, transform, and combine all Marathon data.
:return: DataFrame of all transformed and combined Marathon data.
:rtype: pandas.DataFrame
"""
boston_results = process_boston_data()
nyc_results = process_nyc_data()
chicago_results = process_chicago_data()
london_results = process_london_data()
berlin_results = process_berlin_data()
dashathon_data = boston_results.append([nyc_results, chicago_results], ignore_index=True)
london_berlin_results = london_results.append(berlin_results, ignore_index=True)
london_berlin_results['age'] = pd.np.nan
london_berlin_results['age_bucket'] = None
london_berlin_results['citizen'] = None
london_berlin_results['division'] = None
london_berlin_results['genderdiv'] = None
london_berlin_results['name'] = None
london_berlin_results['official_time'] = None
london_berlin_results['pace'] = None
london_berlin_results = london_berlin_results.rename(columns={'age_group': 'age_range',
'rank_gender': 'gender_place'})
london_berlin_results = london_berlin_results.drop(['rank_age_group', 'finish'], axis=1)
dashathon_data = dashathon_data.append(london_berlin_results, ignore_index=True)
# Aggregated changes
dashathon_data['genderdiv'] = dashathon_data['genderdiv'].astype('float64')
dashathon_data['official_time'] = dashathon_data['official_time'].astype('float64')
# Dropping age_bucket
dashathon_data = dashathon_data.drop(columns=['age_bucket'])
# Creating a new age_range column on basis of age
dashathon_data = dashathon_data.drop(columns={'age_range'})
age_map = pd.read_csv('dashathon/merging/age_map.csv')
dashathon_data = pd.merge(dashathon_data, age_map, on='age', how='left')
# Make gender consistent across all datasets
dashathon_data['gender'] = dashathon_data['gender'].replace('W', 'F')
# Dropping pace column for now
dashathon_data = dashathon_data.drop(columns={'pace'})
# Converting three letter country names to full country names
country_code =
|
pd.read_csv('dashathon/merging/country_code_web.csv', usecols=['country', 'code'], encoding='latin-1')
|
pandas.read_csv
|
"""
analys module implements analysis functions related to Bioinformatics, Statistics, and Machine learning:
High-throughput Bioinformatics data analysis
Bioinformatics file handling and parsing
Molecular marker analysis
Bioinformatics file format conversions
Biostatistical analysis
Functional enrichment analysis (GenFam)
Importing defined datasets
"""
import pandas as pd
import re
import os
import numpy as np
from bioinfokit.visuz import general
from bioinfokit import visuz
from itertools import groupby, chain
import sys
import csv
import scipy.stats as stats
from tabulate import tabulate
from statsmodels.stats.multitest import multipletests
from textwrap3 import wrap
from statsmodels.formula.api import ols
import statsmodels.api as sm
from sklearn.linear_model import LinearRegression
from decimal import Decimal
from pathlib import Path
from collections import defaultdict
from shutil import which
from subprocess import check_output, STDOUT, CalledProcessError
from statsmodels.stats.libqsturng import psturng, qsturng
import collections
import glob
__all__ = ['Fasta', 'HtsAna', 'fastq', 'analys_general', 'marker', 'format', 'stat', 'gff', 'norm', 'assembly', 'lncrna',
'genfam', 'anot', 'get_data']
def tcsv(file="tab_file"):
tab_file = csv.reader(open(file, 'r'), dialect=csv.excel_tab)
csv_file = csv.writer(open('out.csv', 'w', newline=''), dialect=csv.excel)
for record in tab_file:
csv_file.writerow(record)
class Fasta:
def __init__(self):
pass
# adapted from https://www.biostars.org/p/710/
@staticmethod
def fasta_reader(file="fasta_file"):
read_file = open(file, "rU")
fasta_iter = (rec[1] for rec in groupby(read_file, lambda line: line[0] == ">"))
for record in fasta_iter:
fasta_header = record.__next__()[1:].strip()
fasta_header = re.split("\s+", fasta_header)[0]
seq = "".join(s.strip() for s in fasta_iter.__next__())
yield fasta_header, seq
@staticmethod
def rev_com(seq=None, file=None):
if seq is not None:
rev_seq = seq[::-1]
rev_seq = rev_seq.translate(str.maketrans("ATGCUN", "TACGAN"))
return rev_seq
elif file is not None:
out_file = open("output_revcom.fasta", 'w')
fasta_iter = fasta_reader(file)
for record in fasta_iter:
fasta_header, seq = record
rev_seq = seq[::-1]
rev_seq = rev_seq.translate(str.maketrans("ATGCUN", "TACGAN"))
out_file.write(">" + fasta_header + "\n" + rev_seq + "\n")
out_file.close()
@staticmethod
def ext_subseq(file="fasta_file", id="chr", st="start", end="end", strand="plus"):
fasta_iter = Fasta.fasta_reader(file)
for record in fasta_iter:
fasta_header, seq = record
if id == fasta_header.strip() and strand == "plus":
# -1 is necessary as it counts from 0
sub_seq = seq[int(st - 1):int(end)]
print(sub_seq)
elif id == fasta_header.strip() and strand == "minus":
sub_seq = seq[int(st - 1):int(end)]
sub_seq_rc = Fasta.rev_com(seq=sub_seq)
print(sub_seq_rc)
@staticmethod
def extract_seq(file='fasta_file', id='id_file_or_pd_dataframe'):
# extract seq from fasta file based on id match
if isinstance(id, pd.Series):
id_list = list(id)
else:
id_list = []
id_file = open(id, "rU")
for line in id_file:
id_name = line.rstrip('\n')
id_list.append(id_name)
out_file = open("output.fasta", 'w')
list_len = len(id_list)
value = [1] * list_len
# id_list converted to dict for faster search
dict_list = dict(zip(id_list, value))
fasta_iter = Fasta.fasta_reader(file)
for record in fasta_iter:
fasta_header, seq = record
if fasta_header.strip() in dict_list.keys():
out_file.write(">" + fasta_header + "\n" + '\n'.join(wrap(seq, 60)) + "\n")
out_file.close()
if not isinstance(id, pd.Series):
id_file.close()
@staticmethod
def extract_seq_nomatch(file="fasta_file", id="id_file"):
# remove seqs which match to ids in id file
if isinstance(id, pd.Series):
id_list = list(id)
else:
id_list = []
id_file = open(id, "rU")
for line in id_file:
id_name = line.rstrip('\n')
id_list.append(id_name)
out_file = open("output.fasta", 'w')
list_len = len(id_list)
value = [1] * list_len
# id_list converted to dict for faster search
dict_list = dict(zip(id_list, value))
fasta_iter = Fasta.fasta_reader(file)
for record in fasta_iter:
fasta_header, seq = record
if fasta_header.strip() not in dict_list.keys():
out_file.write(">" + fasta_header + "\n" + seq + "\n")
out_file.close()
if not isinstance(id, pd.Series):
id_file.close()
@staticmethod
def split_fasta(file="fasta_file", n=2, bases_per_line=60):
seq_ids = []
fasta_iter = Fasta.fasta_reader(file)
for record in fasta_iter:
header, seq = record
seq_ids.append(header)
split_ids_list = np.array_split(seq_ids, n)
for ind, i in enumerate(split_ids_list):
out_file = open('output_'+str(ind)+'.fasta', 'w')
value = [1] * len(i)
dict_list = dict(zip(i, value))
fasta_iter = Fasta.fasta_reader(file)
for record in fasta_iter:
fasta_header, seq = record
if fasta_header.strip() in dict_list.keys():
out_file.write(">" + fasta_header + "\n" + '\n'.join(wrap(seq, bases_per_line)) + "\n")
out_file.close()
@staticmethod
def split_seq(seq=None, seq_size=3, seq_overlap=True, any_cond=False, outfmt='list'):
"""
Split a nucleotide sequence into smaller chunks
Parameters
seq: Nucleotide sequence to split
seq_size: Sequence chunk size
seq_overlap: Split sequence in overlap mode
any_cond: any conditions for splitting; not yet defined
outfmt: Split sequence ouput format (list or fasta) [default: fasta]
"""
if outfmt not in ['list', 'fasta']:
raise ValueError('Invalid value for outfmt')
if seq is None:
raise ValueError('Provide the input sequence')
chunk_counter = 1
temp_chunks = []
if seq_overlap:
seq_chunks = [seq[i:i+seq_size] for i in range(0, len(seq), seq_size-(seq_size-1))]
if any_cond:
for s in seq_chunks:
if s[-1] != 'G':
temp_chunks.append(s[:-1])
else:
seq_chunks = [seq[i:i+seq_size] for i in range(0, len(seq), seq_size)]
if any_cond:
seq_chunks = temp_chunks
seq_size = seq_size-1
if outfmt == 'fasta':
out_fasta_file = open('output_chunks.fasta', 'w')
for s in seq_chunks:
if len(s) == seq_size:
out_fasta_file.write(">" + str(chunk_counter) + "\n" + '\n'.join(wrap(s, 60)) + "\n")
chunk_counter += 1
elif outfmt == 'list':
print([s for s in seq_chunks if len(s)==seq_size])
class fastq:
def __init__(self):
pass
def fastq_reader(file="fastq_file"):
fastq_file = open(file, "r")
for line in fastq_file:
header_1 = line.rstrip()
read = next(fastq_file).rstrip()
header_2 = next(fastq_file).rstrip()
read_qual_asc = next(fastq_file).rstrip()
yield header_1, read, header_2, read_qual_asc
def fqreadcounter(file="fastq_file"):
read_file = open(file, "rU")
num_lines = 0
total_len = 0
for line in read_file:
num_lines += 1
header_1 = line.rstrip()
read = next(read_file).rstrip()
len_read = len(read)
total_len += len_read
header_2 = next(read_file).rstrip()
read_qual = next(read_file).rstrip()
read_file.close()
num_reads = num_lines / 4
return num_reads, total_len
def fastq_format_check(file="fastq_file"):
read_file = open(file, 'r')
x = 0
for line in read_file:
header = line.rstrip()
if not header.startswith('@'):
x = 1
else:
x = 0
break
return x
def detect_fastq_variant(file="fastq_file"):
count = 0
check = []
fastq_file = open(file, 'rU')
for line in fastq_file:
header_1 = line.rstrip()
read = next(fastq_file).rstrip()
header_2 = next(fastq_file).rstrip()
read_qual_asc = next(fastq_file).rstrip()
asc_list = list(read_qual_asc)
asc_list = list(map(ord, asc_list))
min_q = min(asc_list)
max_q = max(asc_list)
check.append(min_q)
check.append(max_q)
count += 1
if count == 40000:
break
fastq_file.close()
min_q = min(check)
max_q = max(check)
if 64 > min_q >= 33 and max_q == 74:
return 1
elif min_q >= 64 and 74 < max_q <= 104:
return 2
elif 64 > min_q >= 33 and max_q <= 73:
return 3
def split_fastq(file="fastq_file"):
x = fastq.fastq_format_check(file)
if x == 1:
print("Error: Sequences are not in sanger fastq format")
sys.exit(1)
fastq_iter = fastq.fastq_reader(file)
out_file_name_1 = open(Path(file).stem+'_1.fastq', 'w')
out_file_name_2 = open(Path(file).stem+'_2.fastq', 'w')
i = 1
for record in fastq_iter:
header_1, read, header_2, read_qual_asc = record
if (i % 2) == 0:
out_file_name_2.write(header_1+'\n'+read+'\n'+header_2+'\n'+read_qual_asc+'\n')
else:
out_file_name_1.write(header_1+'\n'+read+'\n'+header_2+'\n'+read_qual_asc+'\n')
i += 1
out_file_name_1.close()
out_file_name_2.close()
def sra_bd(file='sra_list_in_file', paired=False, prog='fasterq-dump', t=4, other_opts=None):
if which(prog) is None:
raise Exception(prog + ' does not exist. Please install sra toolkit and add to system path')
if prog not in 'fasterq-dump':
raise Exception('Only fasterq-dump program supported')
read_f = open(file, 'r')
for sra in read_f:
print('Donwloading ' + sra.strip() + '\n')
if paired:
try:
if other_opts:
cmd = [prog, '-e', str(t), '--split-files']
cmd.extend(other_opts.split())
cmd.extend(sra.strip())
check_output(cmd, stderr=STDOUT)
else:
check_output([prog, '-e', str(t), '--split-files', sra.strip()], stderr=STDOUT)
except CalledProcessError as e:
print('Error: there is something wrong with the subprocess command or input fastq already '
'available\n See detaled error \n')
print(e.returncode, e.output, '\n')
else:
try:
if other_opts:
cmd = [prog, '-e', str(t)]
cmd.extend(other_opts.split())
cmd.extend([sra.strip()])
check_output(cmd, stderr=STDOUT)
else:
check_output([prog, '-e', str(t), sra.strip()], stderr=STDOUT)
except CalledProcessError as e:
print('Error: there is something wrong with the subprocess command or input fastq already '
'available\n See detaled error \n' )
print(e.returncode, e.output, '\n')
read_f.close()
def seqcov(file="fastq_file", gs="genome_size"):
x = fastq.fastq_format_check(file)
if x == 1:
print("Error: Sequences are not in fastq format")
sys.exit(1)
num_reads, total_len = fastq.fqreadcounter(file)
# haploid genome_size must be in Mbp; convert in bp
gs = gs * 1e6
cov = round(float(total_len / gs), 2)
print("Sequence coverage for", file, "is", cov)
class analys_general:
@staticmethod
def keep_uniq(_list):
uniq_list = []
for ele in _list:
if ele not in uniq_list:
uniq_list.append(ele)
return uniq_list
@staticmethod
def get_list_from_df(df=None, xfac_var=None, res_var=None, funct=None):
group_list = []
mult_group = dict()
mult_group_count = dict()
df_counts = 0
sample_size_r = None
if isinstance(xfac_var, list) and len(xfac_var) == 2:
# exclude if same group provided multiple times
xfac_var = analys_general.keep_uniq(xfac_var)
levels1 = df[xfac_var[0]].unique()
levels2 = df[xfac_var[1]].unique()
sample_size_r = len(levels1) * len(levels2)
for ele1 in levels1:
for ele2 in levels2:
if funct == 'get_list':
group_list.append(list(df[(df[xfac_var[0]] == ele1) & (df[xfac_var[1]] == ele2)][res_var]))
df_counts += 1
elif funct == 'get_dict':
mult_group[(ele1, ele2)] = df[(df[xfac_var[0]] == ele1) &
(df[xfac_var[1]] == ele2)].mean().loc[res_var]
mult_group_count[(ele1, ele2)] = df[(df[xfac_var[0]] == ele1) &
(df[xfac_var[1]] == ele2)].shape[0]
elif isinstance(xfac_var, list) and len(xfac_var) == 3:
# exclude if same group provided multiple times
xfac_var = analys_general.keep_uniq(xfac_var)
levels1 = df[xfac_var[0]].unique()
levels2 = df[xfac_var[1]].unique()
levels3 = df[xfac_var[2]].unique()
sample_size_r = len(levels1) * len(levels2) * len(levels3)
for ele1 in levels1:
for ele2 in levels2:
for ele3 in levels3:
if funct == 'get_list':
group_list.append(list(df[(df[xfac_var[0]] == ele1) & (df[xfac_var[1]] == ele2) &
(df[xfac_var[2]] == ele3)][res_var]))
df_counts += 1
elif funct == 'get_dict':
mult_group[(ele1, ele2, ele3)] = df[(df[xfac_var[0]] == ele1) & (df[xfac_var[1]] == ele2) &
(df[xfac_var[2]] == ele3)].mean().loc[res_var]
mult_group_count[(ele1, ele2, ele3)] = df[(df[xfac_var[0]] == ele1) &
(df[xfac_var[1]] == ele2) &
(df[xfac_var[2]] == ele3)].shape[0]
elif isinstance(xfac_var, str):
levels = df[xfac_var].unique()
sample_size_r = len(levels)
for ele in levels:
if funct == 'get_list':
group_list.append(list(df[df[xfac_var] == ele][res_var]))
df_counts += 1
elif funct == 'get_dict':
mult_group[ele] = df[df[xfac_var] == ele].mean().loc[res_var]
mult_group_count[ele] = df[df[xfac_var] == ele].shape[0]
elif isinstance(xfac_var, list) and len(xfac_var) > 3:
raise Exception('Only three factors supported')
if funct == 'get_list':
return group_list, df_counts
elif funct == 'get_dict':
return mult_group, mult_group_count, sample_size_r
class marker:
def __init__(self):
pass
def mergevcf(file="vcf_file_com_sep"):
print('mergevcf renamed to concatvcf')
def concatvcf(file="vcf_file_com_sep"):
vcf_files = file.split(",")
merge_vcf = open("concat_vcf.vcf", "w+")
file_count = 0
print("concatenating vcf files...")
for f in vcf_files:
if file_count == 0:
read_file = open(f, "rU")
for line in read_file:
merge_vcf.write(line)
read_file.close()
elif file_count > 0:
read_file = open(f, "rU")
for line in read_file:
if not line.startswith("#"):
merge_vcf.write(line)
read_file.close()
file_count += 1
merge_vcf.close()
def splitvcf(file='vcf_file', id='#CHROM'):
read_vcf_file = open(file, 'r')
info_lines, headers = [], []
for line in read_vcf_file:
if line.startswith(id):
headers = line.strip().split('\t')
elif line.startswith('##'):
info_lines.append(line.strip())
read_vcf_file.close()
assert len(headers) != 0, "Non matching id parameter"
read_vcf_file_df = pd.read_csv(file, sep='\t', comment='#', header=None)
read_vcf_file_df.columns = headers
chrom_ids = read_vcf_file_df[id].unique()
for r in range(len(chrom_ids)):
sub_df = read_vcf_file_df[read_vcf_file_df[id]==chrom_ids[r]]
# out_vcf_file = open(chrom_ids[r]+'.vcf'
with open(chrom_ids[r]+'.vcf', 'w') as out_vcf_file:
for l in info_lines:
out_vcf_file.write(l+'\n')
sub_df.to_csv(chrom_ids[r]+'.vcf', mode='a', sep='\t', index=False)
out_vcf_file.close()
def vcfreader(file='vcf_file', id='#CHROM'):
read_vcf_file = open(file, 'r')
info_lines, headers = [], []
for line in read_vcf_file:
if line.startswith(id):
headers = line.strip().split('\t')
elif line.startswith('##'):
info_lines.append(line.strip())
else:
var_lines = line.strip().split('\t')
yield headers, info_lines, var_lines
read_vcf_file.close()
assert len(headers) != 0, "Non matching id parameter"
def vcf_anot(file='vcf_file', gff_file='gff_file', id='#CHROM', anot_attr=None):
gff_iter = gff.gffreader(gff_file)
gene_cord = defaultdict(list)
cds_cord = defaultdict(list)
exon_cord = defaultdict(list)
ftr_cord = defaultdict(list)
ttr_cord = defaultdict(list)
sc_cord = defaultdict(list)
st_cord = defaultdict(list)
igenic_cord = defaultdict(list)
intragenic_cord = defaultdict(list)
# also for introns between the exons
intragenic_cord_exon = defaultdict(list)
gene_id_dict = dict()
transcript_name_dict = dict()
transcript_strand_dict = dict()
chr_list = set([])
for record in gff_iter:
chr, gene_id, gene_name, transcript_id, source, feature_type, st, ende, strand, attr = record
if feature_type == 'gene':
if chr not in chr_list:
gene_number_1 = 1
chr_list.add(chr)
gene_cord[(chr, gene_id, gene_number_1)]=[st, ende]
gene_id_dict[(chr, gene_number_1)] = gene_id
gene_number_1 += 1
elif feature_type == 'mRNA' or feature_type == 'transcript':
cds_cord[(chr, transcript_id)] = []
exon_cord[(chr, transcript_id)] = []
ftr_cord[transcript_id] = []
ttr_cord[transcript_id] = []
sc_cord[transcript_id] = []
st_cord[transcript_id] = []
transcript_strand_dict[transcript_id] = strand
if anot_attr:
transcript_name_dict[transcript_id] = re.search(anot_attr+'=(.+?)(;|$)', attr).group(1)
elif feature_type == 'CDS':
cds_cord[(chr, transcript_id)].append([st, ende])
elif feature_type == 'exon':
exon_cord[(chr, transcript_id)].append([st, ende])
elif feature_type == 'five_prime_UTR':
ftr_cord[(chr, transcript_id)].append([st, ende])
elif feature_type == 'three_prime_UTR':
ttr_cord[(chr, transcript_id)].append([st, ende])
elif feature_type == 'start_codon':
sc_cord[(chr, transcript_id)].append([st, ende])
elif feature_type == 'stop_codon':
st_cord[(chr, transcript_id)].append([st, ende])
# get intergenic regions
for gene, cord in gene_cord.items():
chr, gene_id, gene_number = gene[0], gene[1], gene[2]
for x in chr_list:
if x == chr and gene_number == 1:
igenic_cord[(chr, gene_id)] = [1, int(cord[0])-1]
elif x == chr and gene_number != 1:
igenic_cord[(chr, gene_id)] = \
[int(gene_cord[(chr, gene_id_dict[(chr, int(gene_number)-1)], int(gene_number)-1)][1])+1, int(cord[0])-1]
# get intragenic regions based on CDS
for transcript, cord in cds_cord.items():
chr, transcript_id = transcript[0], transcript[1]
intragenic_cord[(chr, transcript_id)] = []
for x in chr_list:
if x == chr:
cord.sort(key=lambda k: k[0])
if len(cord) > 1:
for y in range(len(cord)-1):
intragenic_cord[(chr, transcript_id)].append([int(cord[y][1])+1, int(cord[y+1][0])-1])
# get intragenic regions based on exon
for transcript, cord in exon_cord.items():
chr, transcript_id = transcript[0], transcript[1]
intragenic_cord_exon[(chr, transcript_id)] = []
for x in chr_list:
if x == chr:
cord.sort(key=lambda k: k[0])
if len(cord) > 1:
for y in range(len(cord) - 1):
intragenic_cord_exon[(chr, transcript_id)].append([int(cord[y][1]) + 1, int(cord[y + 1][0]) - 1])
def var_region_check(_dict, _chr, _region, _anot_attr, _transcript_name_dict, _var_region, _transcript_name,
_transcript_id, _transcript_strand):
for transcript, cord in _dict.items():
for i in range(len(cord)):
if transcript[0] == chr and int(cord[i][0]) <= int(var_pos) <= int(cord[i][1]):
_var_region = _region
_transcript_id = transcript[1]
_transcript_strand = transcript_strand_dict[_transcript_id]
if anot_attr:
_transcript_name = transcript_name_dict[_transcript_id]
break
if _var_region:
break
return _var_region, _transcript_name, _transcript_id, _transcript_strand
vcf_iter = marker.vcfreader(file, id)
try:
# os.remove(Path(file).stem+'_anot.vcf')
os.remove(Path(file).stem + '_anot.txt')
except OSError:
pass
# vcf_out_anot = open(Path(file).stem+'_anot.vcf', 'a')
vcf_out_anot = open(Path(file).stem + '_anot.txt', 'a')
for_info_lines = 1
transcript_id=None
transcript_name=None
transcript_strand=None
transcript_name_return=transcript_name
transcript_id_return=transcript_id
transcript_strand_return=transcript_strand
for record in vcf_iter:
headers, info_lines, chr, var_pos = record[0], record[1], record[2][0], record[2][1]
if for_info_lines == 1:
for_info_lines = 0
# for l in info_lines:
# vcf_out_anot.write(l+'\n')
headers.extend(['genomic region', 'transcript ID', 'transcript name', 'strand'])
vcf_out_anot.write('\t'.join(x for x in headers) + '\n')
var_region = None
if var_region is None:
for transcript, cord in igenic_cord.items():
if transcript[0] == chr and int(cord[0]) <= int(var_pos) <= int(cord[1]):
var_region = 'Intergenic'
transcript_id_return = None
transcript_strand_return = None
if anot_attr:
transcript_name_return = None
break
if var_region is None:
var_region, transcript_name_return, transcript_id_return, transcript_strand_return = var_region_check(cds_cord, chr, 'CDS',
anot_attr, transcript_name_dict, var_region, transcript_name, transcript_id, transcript_strand)
if var_region is None:
var_region, transcript_name_return, transcript_id_return, transcript_strand_return = var_region_check(ftr_cord, chr,
'five_prime_UTR', anot_attr, transcript_name_dict, var_region, transcript_name, transcript_id, transcript_strand)
if var_region is None:
var_region, transcript_name_return, transcript_id_return, transcript_strand_return = var_region_check(ttr_cord, chr,
'three_prime_UTR', anot_attr, transcript_name_dict, var_region, transcript_name, transcript_id, transcript_strand)
if var_region is None:
var_region, transcript_name_return, transcript_id_return, transcript_strand_return = var_region_check(sc_cord, chr,
'start_codon', anot_attr, transcript_name_dict, var_region, transcript_name, transcript_id, transcript_strand)
if var_region is None:
var_region, transcript_name_return, transcript_id_return, transcript_strand_return = var_region_check(st_cord, chr,
'stop_codon', anot_attr, transcript_name_dict, var_region, transcript_name, transcript_id, transcript_strand)
if var_region is None:
var_region, transcript_name_return, transcript_id_return, transcript_strand_return = var_region_check(exon_cord, chr,
'exon', anot_attr, transcript_name_dict, var_region, transcript_name, transcript_id, transcript_strand)
if var_region is None:
for transcript, cord in intragenic_cord.items():
transcript_strand_return = transcript_strand_dict[transcript[1]]
transcript_id_return = transcript[1]
if len(cord) >= 1:
for i in range(len(cord)):
if transcript[0] == chr and int(cord[i][0]) <= int(var_pos) <= int(cord[i][1]):
var_region = 'Introns'
break
if var_region:
break
if var_region is None:
for transcript, cord in intragenic_cord_exon.items():
transcript_strand_return = transcript_strand_dict[transcript[1]]
transcript_id_return = transcript[1]
if len(cord) >= 1:
for i in range(len(cord)):
if transcript[0] == chr and int(cord[i][0]) <= int(var_pos) <= int(cord[i][1]):
var_region = 'Introns'
break
if var_region:
break
vcf_out_anot.write('\t'.join(str(x) for x in record[2])+'\t'+str(var_region)+'\t'+str(transcript_id_return)+
'\t'+str(transcript_name_return)+'\t'+str(transcript_strand_return)+'\n')
class format:
def __init__(self):
pass
def fqtofa(file="fastq_file"):
x = fastq.fastq_format_check(file)
if x == 1:
print("Error: Sequences are not in sanger fastq format")
sys.exit(1)
read_file = open(file, "rU")
out_file = open("output.fasta", 'w')
for line in read_file:
header_1 = line.rstrip()
read = next(read_file).rstrip()
header_2 = next(read_file).rstrip()
read_qual = next(read_file).rstrip()
out_file.write(header_1+"\n"+'\n'.join(wrap(read, 60))+"\n")
read_file.close()
def tabtocsv(file="tab_file"):
tab_file = csv.reader(open(file, 'r'), dialect=csv.excel_tab)
csv_file = csv.writer(open('output.csv', 'w', newline=''), dialect=csv.excel)
for record in tab_file:
csv_file.writerow(record)
def csvtotab(file="csv_file"):
csv_file = csv.reader(open(file, 'r'), dialect=csv.excel)
tab_file = csv.writer(open('output.txt', 'w', newline=''), dialect=csv.excel_tab)
for record in csv_file:
tab_file.writerow(record)
def hmmtocsv(file="hmm_file"):
hmm_file = open(file, "rU")
csv_file = open("ouput_hmm.csv", "w")
for line in hmm_file:
line = line.strip()
if not line.startswith("#"):
data = re.split(' +', line)
if len(data) == 19:
data[18] = data[18].replace(',', ' ')
csv_file.write(str.join(',', data))
csv_file.write("\n")
elif len(data) > 19:
ele = list(range(18, len(data)))
data[18] = " ".join([e for i, e in enumerate(data) if i in ele])
data[18] = data[18].replace(',', '')
csv_file.write(str.join(',', data[0:19]))
csv_file.write("\n")
hmm_file.close()
csv_file.close()
# find sanger fastq phred quality encoding format
def fq_qual_var(file=None):
if file is None:
print("Error: No sanger fastq file provided")
sys.exit(1)
x = fastq.fastq_format_check(file)
if x == 1:
print("Error: Sequences are not in sanger fastq format")
sys.exit(1)
qual_format = fastq.detect_fastq_variant(file)
if qual_format == 1:
print("The fastq quality format is illumina 1.8+ (Offset +33)")
elif qual_format == 2:
print("The fastq quality format is illumina 1.3/1.4 (Offset +64)")
elif qual_format == 3:
print("The fastq quality format is Sanger (Offset +33)")
else:
print("\nError: Wrong quality format\n")
sys.exit(1)
class HtsAna:
def __init__(self):
pass
@staticmethod
def merge_featureCount(pattern='*.txt', gene_column_name='Geneid'):
count_files = glob.glob(pattern)
iter = 0
for f in count_files:
df = pd.read_csv(f, sep='\t', comment='#')
if iter == 0:
df_count_mat = df.iloc[:, [0, 6]]
iter += 1
elif iter > 0:
df_temp = df.iloc[:, [0, 6]]
df_count_mat = pd.merge(df_count_mat, df_temp, how='left', on=gene_column_name)
df_count_mat.to_csv('gene_matrix_count.csv', index=False)
class stat:
def __init__(self):
self.anova_summary = None
self.data_summary = None
self.tukey_summary = None
self.tukey_groups = None
# unstack single factor
self.unstack_df = None
# chi square
self.expected_df = None
self.summary =None
self.bartlett_summary = None
self.anova_model_out = None
self.levene_summary = None
self.anova_std_residuals = None
self.reg_metric_df = None
self.bin_dict = None
@staticmethod
def _data_summary(df='dataframe', xfac_var=None, res_var=None):
data_summary_dict = dict()
data_summary_dict['Group'] = []
data_summary_dict['Count'] = []
data_summary_dict['Mean'] = []
data_summary_dict['Std Dev'] = []
data_summary_dict['Min'] = []
data_summary_dict['25%'] = []
data_summary_dict['50%'] = []
data_summary_dict['75%'] = []
data_summary_dict['Max'] = []
levels = df[xfac_var].unique()
for i in levels:
temp = df.loc[df[xfac_var] == i, res_var]
data_summary_dict['Group'].append(i)
data_summary_dict['Count'].append(temp.describe().to_numpy()[0])
data_summary_dict['Mean'].append(temp.describe().to_numpy()[1])
data_summary_dict['Std Dev'].append(temp.describe().to_numpy()[2])
data_summary_dict['Min'].append(temp.describe().to_numpy()[3])
data_summary_dict['25%'].append(temp.describe().to_numpy()[4])
data_summary_dict['50%'].append(temp.describe().to_numpy()[5])
data_summary_dict['75%'].append(temp.describe().to_numpy()[6])
data_summary_dict['Max'].append(temp.describe().to_numpy()[7])
return pd.DataFrame(data_summary_dict)
def bartlett(self, df=None, xfac_var=None, res_var=None):
# get bartlett test from stacked dataframe
df = df.dropna()
if xfac_var is None or res_var is None:
raise ValueError('Invalid value for xfac_var or res_var')
group_list, df_counts = analys_general.get_list_from_df(df, xfac_var, res_var, 'get_list')
test_stat, p = stats.bartlett(*group_list)
df_counts = df_counts-1
self.bartlett_summary = pd.DataFrame({'Parameter':
['Test statistics (T)', 'Degrees of freedom (Df)', 'p value'],
'Value': [test_stat, df_counts, p]}).round(4)
def levene(self, df=None, xfac_var=None, res_var=None, center='median'):
# get bartlett test from stacked dataframe
df = df.dropna()
if xfac_var is None or res_var is None:
raise ValueError('Invalid value for xfac_var or res_var')
group_list, df_counts = analys_general.get_list_from_df(df, xfac_var, res_var, 'get_list')
test_stat, p = stats.levene(*group_list, center=center)
df_counts = df_counts-1
self.levene_summary = pd.DataFrame({'Parameter':
['Test statistics (W)', 'Degrees of freedom (Df)', 'p value'],
'Value': [test_stat, df_counts, p]}).round(4)
def tukey_hsd(self, df="dataframe", res_var=None, xfac_var=None, anova_model=None, phalpha=0.05, ss_typ=2):
df = df.dropna()
if xfac_var is None or anova_model is None or res_var is None:
raise ValueError('Invalid value for xfac_var or anova_model or res_var')
if ss_typ not in [1, 2, 3]:
raise ValueError('Invalid SS type')
tukey_phoc = dict()
tukey_phoc['group1'] = []
tukey_phoc['group2'] = []
tukey_phoc['Diff'] = []
tukey_phoc['Lower'] = []
tukey_phoc['Upper'] = []
tukey_phoc['q-value'] = []
tukey_phoc['p-value'] = []
# group_letter = dict()
group_pval = dict()
# group_let = dict()
# share_let = dict()
mult_group, mult_group_count, sample_size_r = analys_general.get_list_from_df(df, xfac_var, res_var, 'get_dict')
# self.anova_stat(df, res_var, anova_xfac_var)
self.anova_stat(df, anova_model, ss_typ)
df_res = self.anova_summary.df.Residual
mse = self.anova_summary.sum_sq.Residual / df_res
# self.data_summary = stat._data_summary(df, xfac_var, res_var)
# q critical
q_crit = qsturng(1 - phalpha, sample_size_r, df_res)
# t critical tcrit = qcrit /\sqrt 2.
# t_crit = q_crit / np.sqrt(2)
# tuke_hsd_crit = q_crit * np.sqrt(mse / len(levels))
# let_num = 97
# let_num_list = []
# sharing_letter = dict()
comp_pairs = [(ele1, ele2) for i, ele1 in enumerate(list(mult_group)) for ele2 in list(mult_group)[i + 1:]]
for p in comp_pairs:
mean_diff = max(mult_group[p[0]], mult_group[p[1]]) - min(mult_group[p[0]], mult_group[p[1]])
# count for groups; this is useful when sample size not equal -- Tukey-Kramer
group1_count, group2_count = mult_group_count[p[0]], mult_group_count[p[1]]
# https://www.uvm.edu/~statdhtx/StatPages/MultipleComparisons/unequal_ns_and_mult_comp.html
# also for considering unequal sample size
mse_factor = np.sqrt(np.divide(mse, group1_count) + np.divide(mse, group2_count))
q_val = mean_diff / np.divide(mse_factor, np.sqrt(2))
tukey_phoc['group1'].append(p[0])
tukey_phoc['group2'].append(p[1])
tukey_phoc['Diff'].append(mean_diff)
# when equal sample size
tukey_phoc['Lower'].append(mean_diff - (q_crit * np.sqrt(np.divide(mse, 2) *
(np.divide(1, group1_count) +
np.divide(1, group2_count)))))
tukey_phoc['Upper'].append(mean_diff + (q_crit * np.sqrt(np.divide(mse, 2) *
(np.divide(1, group1_count) +
np.divide(1, group2_count)))))
# tukey_phoc['Significant'].append(np.abs(mean_diff) > tuke_hsd_crit)
# t test related to qvalue as q = sqrt(2) t
# ref https://www.real-statistics.com/one-way-analysis-of-variance-anova/unplanned-comparisons/tukey-hsd/
tukey_phoc['q-value'].append(q_val)
if isinstance(psturng(np.abs(q_val), sample_size_r, df_res), np.ndarray):
group_pval[(mult_group[p[0]], mult_group[p[1]])] = psturng(np.abs(q_val), sample_size_r, df_res)
tukey_phoc['p-value'].append(psturng(np.abs(q_val), sample_size_r, df_res)[0])
else:
group_pval[(mult_group[p[0]], mult_group[p[1]])] = psturng(np.abs(q_val), sample_size_r, df_res)
tukey_phoc['p-value'].append(psturng(np.abs(q_val), sample_size_r, df_res))
# group_letter_chars = {m: ''.join(list(map(chr, n))) for m, n in group_let.items()}
self.tukey_summary = pd.DataFrame(tukey_phoc)
# self.tukey_groups = pd.DataFrame({'': list(group_letter_chars.keys()), 'groups':
# list(group_letter_chars.values())})
def anova_stat(self, df="dataframe", anova_model=None, ss_typ=2, res_var=None, xfac_var=None):
# x = '{} ~ '
if res_var:
x = res_var + '~'
# y = 'C({})'
z = '+'
if isinstance(xfac_var, list) and len(xfac_var) > 1 and anova_model is None:
for i in range(len(xfac_var)):
x += 'C({})'.format(xfac_var[i])
if i < len(xfac_var) - 1:
x += z
model = ols(x, data=df).fit()
elif isinstance(xfac_var, str) and anova_model is None:
# create and run model
# model = ols('{} ~ C({})'.format(res_var, xfac_var), data=df).fit()
x += 'C({})'.format(xfac_var)
model = ols(x, data=df).fit()
elif anova_model:
model = ols(anova_model, data=df).fit()
else:
raise TypeError('xfac_var in anova must be string or list')
anova_table = sm.stats.anova_lm(model, typ=ss_typ)
self.anova_summary = anova_table
self.anova_summary.insert(loc=2, column='mean_sq', value=list(anova_table.sum_sq/anova_table.df))
column_names = ['df', 'sum_sq', 'mean_sq', 'F', 'PR(>F)']
self.anova_summary = self.anova_summary.reindex(columns=column_names)
# self.anova_summary = anova_table
self.anova_model_out = model
self.anova_std_residuals = model.resid / np.sqrt(self.anova_summary.loc['Residual', 'mean_sq'])
def lin_reg(self, df="dataframe", y=None, x=None):
df = df.dropna()
assert x and y is not None, "Provide proper column names for X and Y variables"
assert type(x) is list or type(y) is list, "X or Y column names should be list"
# min data should be 4 or more
assert df.shape[0] >= 4, "Very few data"
self.X = df[x].to_numpy()
self.Y = df[y].to_numpy()
# number of independent variables
p = len(x)
# number of parameter estimates (+1 for intercept and slopes)
e = p+1
# number of samples/observations
n = len(df[y])
# run regression
reg_out = LinearRegression().fit(self.X, self.Y)
# coefficient of determination
r_sq = round(reg_out.score(self.X, self.Y), 4)
# Correlation coefficient (r)
# Adjusted r-Squared
r_sq_adj = round(1 - (1 - r_sq) * ((n - 1)/(n-p-1)), 4)
# RMSE
# RMSE = standard deviation of the residuals
rmse = round(np.sqrt(1-r_sq) * np.std(self.Y), 4)
# intercept and slopes
reg_intercept = reg_out.intercept_
reg_slopes = reg_out.coef_
# predicted values
self.y_hat = reg_out.predict(self.X)
# residuals
self.residuals = self.Y - self.y_hat
# sum of squares
regSS = np.sum((self.y_hat - np.mean(self.Y)) ** 2) # variation explained by linear model
residual_sse = np.sum((self.Y - self.y_hat) ** 2) # remaining variation
sst = np.sum((self.Y - np.mean(self.Y)) ** 2) # total variation
eq = ""
for i in range(p):
eq = eq+' + '+ '(' + str(round(reg_slopes[0][i], 4))+'*'+x[i] + ')'
self.reg_eq = str(round(reg_intercept[0], 4)) + eq
# variance and std error
# Residual variance = MSE and sqrt of MSE is res stnd error
sigma_sq_hat = round(residual_sse/(n-e), 4)
# residual std dev
res_stdev = round(np.sqrt(sigma_sq_hat))
# standardized residuals
self.std_residuals = self.residuals/res_stdev
# https://stackoverflow.com/questions/22381497/python-scikit-learn-linear-model-parameter-standard-error
# std error
X_mat = np.empty(shape=(n, e), dtype=np.float)
X_mat[:, 0] = 1
X_mat[:, 1:e] = self.X
var_hat = np.linalg.inv(X_mat.T @ X_mat) * sigma_sq_hat
standard_error = []
for param in range(e):
standard_error.append(round(np.sqrt(var_hat[param, param]), 4))
# t = b1 / SE
params = list(chain(*[["Intercept"], x]))
estimates = list(chain(*[[reg_intercept[0]], reg_slopes[0]]))
tabulate_list = []
for param in range(e):
tabulate_list.append([params[param], estimates[param], standard_error[param],
estimates[param]/standard_error[param],
'%.4E' % Decimal(stats.t.sf(np.abs(estimates[param]/standard_error[param]), n-e)*2) ])
# anova
anova_table = []
anova_table.append(["Model", p, regSS, round(regSS/p, 4), round((regSS/p)/(residual_sse/(n-e)), 4),
'%.4E' % Decimal(stats.f.sf((regSS/p)/(residual_sse/(n-e)), p, n-e))])
anova_table.append(["Error", n-e, residual_sse, round(residual_sse/(n-e), 4), "", ""])
anova_table.append(["Total", n-1, sst, "", "", ""])
print("\nRegression equation:\n")
print(self.reg_eq)
print("\nRegression Summary:")
print(tabulate([["Dependent variables", x], ["Independent variables", y],
["Coefficient of determination (r-squared)", r_sq], ["Adjusted r-squared", r_sq_adj],
["Root Mean Square Error (RMSE)", rmse],
["Mean of Y", round(np.mean(self.Y), 4)], ["Residual standard error", round(np.sqrt(sigma_sq_hat), 4)],
["No. of Observations", n]], "\n"))
print("\nRegression Coefficients:\n")
print(tabulate(tabulate_list, headers=["Parameter", "Estimate", "Std Error", "t-value", "P-value Pr(>|t|)"]), "\n")
print("\nANOVA Summary:\n")
print(tabulate(anova_table, headers=["Source", "Df", "Sum Squares", "Mean Squares", "F", "Pr(>F)"]),
"\n")
# VIF for MLR
# VIF computed as regressing X on remaining X
# using correlation
if p > 1:
vif_table = []
vif_df = df[x]
df_corr = vif_df.corr()
vif_mat = np.linalg.inv(df_corr)
self.vif = vif_mat.diagonal()
for i in range(len(self.vif)):
vif_table.append([x[i], self.vif[i]])
print("\nVariance inflation factor (VIF)\n")
print(tabulate(vif_table, headers=["Variable", "VIF"]),
"\n")
def ttest(self, df='dataframe', xfac=None, res=None, evar=True, alpha=0.05, test_type=None, mu=None):
# drop NaN
df = df.dropna()
if df.shape[0] < 2:
raise Exception("Very few observations to run t-test")
if alpha < 0 or alpha > 1:
raise Exception("alpha value must be in between 0 and 1")
if test_type == 1:
if res and mu is None:
raise ValueError("res or mu parameter value is missing")
if res not in df.columns:
raise ValueError("res column is not in dataframe")
a_val = df[res].to_numpy()
res_out = stats.ttest_1samp(a=a_val, popmean=mu, nan_policy='omit')
sem = df[res].sem()
if sem == 0:
print("\nWarning: the data is constant\n")
ci = (1 - alpha) * 100
tcritvar = stats.t.ppf((1 + (1 - alpha)) / 2, len(a_val)-1)
# print results
self.summary = "\nOne Sample t-test \n" + "\n" + \
tabulate([["Sample size", len(a_val)], ["Mean", df[res].mean()], ["t", res_out[0]],
["Df", len(a_val)-1], ["P-value (one-tail)", res_out[1]/2],
["P-value (two-tail)", res_out[1]],
["Lower " + str(ci) + "%", df[res].mean() - (tcritvar * sem)],
["Upper " + str(ci) + "%", df[res].mean() + (tcritvar * sem)]])
elif test_type == 2:
if xfac and res is None:
raise Exception("xfac or res variable is missing")
if res not in df.columns or xfac not in df.columns:
raise ValueError("res or xfac column is not in dataframe")
levels = df[xfac].unique()
levels.sort()
if len(levels) != 2:
raise Exception("there must be only two levels")
a_val = df.loc[df[xfac] == levels[0], res].to_numpy()
b_val = df.loc[df[xfac] == levels[1], res].to_numpy()
a_count, b_count = len(a_val), len(b_val)
count = [a_count, b_count]
mean = [df.loc[df[xfac] == levels[0], res].mean(), df.loc[df[xfac] == levels[1], res].mean()]
sem = [df.loc[df[xfac] == levels[0], res].sem(), df.loc[df[xfac] == levels[1], res].sem()]
sd = [df.loc[df[xfac] == levels[0], res].std(), df.loc[df[xfac] == levels[1], res].std()]
ci = (1-alpha)*100
# degree of freedom
# a_count, b_count = np.split(count, 2)
dfa = a_count - 1
dfb = b_count - 1
# sample variance
with np.errstate(invalid='ignore'):
var_a = np.nan_to_num(np.var(a_val, ddof=1))
var_b = np.nan_to_num(np.var(b_val, ddof=1))
mean_diff = mean[0] - mean[1]
# variable 95% CI
varci_low = []
varci_up = []
tcritvar = [(stats.t.ppf((1 + (1-alpha)) / 2, dfa)), (stats.t.ppf((1 + (1-alpha)) / 2, dfb))]
for i in range(len(levels)):
varci_low.append(mean[i] - (tcritvar[i] * sem[i]))
varci_up.append(mean[i] + (tcritvar[i] * sem[i]))
var_test = 'equal'
# perform levene to check for equal variances
w, pvalue = stats.levene(a_val, b_val)
if pvalue < alpha:
print("\nWarning: the two group variance are not equal. Rerun the test with evar=False\n")
if evar is True:
# pooled variance
message = 'Two sample t-test with equal variance'
p_var = (dfa * var_a + dfb * var_b) / (dfa + dfb)
# std error
se = np.sqrt(p_var * (1.0 / a_count + 1.0 / b_count))
dfr = dfa + dfb
else:
# Welch's t-test for unequal variance
# calculate se
message = 'Two sample t-test with unequal variance (Welch\'s t-test)'
if a_count == 1 or b_count == 1:
raise Exception('Not enough observation for either levels. The observations should be > 1 for both levels')
a_temp = var_a / a_count
b_temp = var_b / b_count
dfr = ((a_temp + b_temp) ** 2) / ((a_temp ** 2) / (a_count - 1) + (b_temp ** 2) / (b_count - 1))
se = np.sqrt(a_temp + b_temp)
var_test = 'unequal'
tval = np.divide(mean_diff, se)
oneside_pval = stats.t.sf(np.abs(tval), dfr)
twoside_pval = oneside_pval * 2
# 95% CI for diff
# 2.306 t critical at 0.05
tcritdiff = stats.t.ppf((1 + (1-alpha)) / 2, dfr)
diffci_low = mean_diff - (tcritdiff * se)
diffci_up = mean_diff + (tcritdiff * se)
# print results
self.summary = '\n' + message + '\n\n' + tabulate([["Mean diff", mean_diff], ["t", tval], ["Std Error", se], ["df", dfr],
["P-value (one-tail)", oneside_pval], ["P-value (two-tail)", twoside_pval],
["Lower "+str(ci)+"%", diffci_low], ["Upper "+str(ci)+"%", diffci_up]]) + '\n\n' + \
'Parameter estimates\n\n' + tabulate([[levels[0], count[0], mean[0], sd[0], sem[0], varci_low[0],
varci_up[0]], [levels[1], count[1], mean[1], sd[1], sem[1],
varci_low[1], varci_up[1]]],
headers=["Level", "Number", "Mean", "Std Dev", "Std Error",
"Lower "+str(ci)+"%", "Upper "+str(ci)+"%"]) + '\n'
elif test_type == 3:
if not isinstance(res, (tuple, list)) and len(res) != 2:
raise Exception("res should be either list of tuple of length 2")
if sorted(res) != sorted(df.columns):
raise ValueError("one or all of res columns are not in dataframe")
df = df.drop(['diff_betw_res'], axis=1, errors='ignore')
df['diff_betw_res'] = df[res[0]]-df[res[1]]
a_val = df['diff_betw_res'].to_numpy()
res_out = stats.ttest_1samp(a=a_val, popmean=0, nan_policy='omit')
sem = df['diff_betw_res'].sem()
ci = (1 - alpha) * 100
tcritvar = stats.t.ppf((1 + (1 - alpha)) / 2, len(a_val)-1)
# print results
self.summary = "\nPaired t-test \n" + "\n" + \
tabulate([["Sample size", len(a_val)], ["Difference Mean", df['diff_betw_res'].mean()], ["t", res_out[0]],
["Df", len(a_val)-1], ["P-value (one-tail)", res_out[1]/2],
["P-value (two-tail)", res_out[1]],
["Lower " + str(ci) + "%", df['diff_betw_res'].mean() - (tcritvar * sem)],
["Upper " + str(ci) + "%", df['diff_betw_res'].mean() + (tcritvar * sem)]])
else:
raise ValueError("Provide a value to test_type parameter for appropriate t-test")
def chisq(self, df='dataframe', p=None):
# d = pd.read_csv(table, index_col=0)
tabulate_list = []
if all(i < 0 for i in df.values.flatten()):
raise ValueError("The observation counts for each group must be non-negative number")
if p is None:
# assert df.shape[1] == 2, 'dataframe must 2-dimensional contingency table of observed counts'
chi_ps, p_ps, dof_ps, expctd_ps = stats.chi2_contingency(df.to_dict('split')['data'])
tabulate_list.append(["Pearson", dof_ps, chi_ps, p_ps])
chi_ll, p_ll, dof_ll, expctd_ll = stats.chi2_contingency(df.to_dict('split')['data'], lambda_="log-likelihood")
tabulate_list.append(["Log-likelihood", dof_ll, chi_ll, p_ll])
mosaic_dict = dict()
m = df.to_dict('split')
for i in range(df.shape[0]):
for j in range(df.shape[1]):
mosaic_dict[(m['index'][i], m['columns'][j])] = m['data'][i][j]
# print("\nChi-squared test\n")
# print(tabulate(tabulate_list, headers=["Test", "Df", "Chi-square", "P-value"]))
self.summary = '\nChi-squared test for independence\n' + '\n' + \
tabulate(tabulate_list, headers=["Test", "Df", "Chi-square", "P-value"]) + '\n'
# print("\nExpected frequency counts\n")
# print(tabulate(expctd_ps, headers=df.to_dict('split')['columns'], showindex="always"))
self.expected_df = '\nExpected frequency counts\n' + '\n' + \
tabulate(expctd_ps, headers=df.to_dict('split')['columns'], showindex="always") + '\n'
# labels = lambda k: "" if mosaic_dict[k] != 0 else ""
# mosaic(mosaic_dict, labelizer=labels)
# plt.savefig('mosaic.png', format='png', bbox_inches='tight', dpi=300)
# goodness of fit test
if p:
df = df.drop(['expected_counts'], axis=1, errors='ignore')
assert df.shape[1] == 1, 'dataframe must one-dimensional contingency table of observed counts'
assert len(p) == df.shape[0], 'probability values should be equal to observations'
assert isinstance(p, (tuple, list)) and round(sum(p), 10) == 1, 'probabilities must be list or tuple and ' \
'sum to 1'
df['expected_counts'] = [df.sum()[0] * i for i in p]
if (df['expected_counts'] < 5).any():
print('Warning: Chi-squared may not be valid as some of expected counts are < 5')
if all(i < 0 for i in p):
raise ValueError("The probabilities for each group must be non-negative number")
dof = df.shape[0] - 1
chi_gf, p_gf = stats.chisquare(f_obs=df[df.columns[0]].to_numpy(), f_exp=df[df.columns[1]].to_numpy())
tabulate_list.append([chi_gf, dof, p_gf, df[df.columns[0]].sum()])
# print('\nChi-squared goodness of fit test\n')
# print(tabulate(tabulate_list, headers=["Chi-Square", "Df", "P-value", "Sample size"]), '\n')
self.summary = '\nChi-squared goodness of fit test\n' + '\n' + \
tabulate(tabulate_list, headers=["Chi-Square", "Df", "P-value", "Sample size"]) + '\n'
self.expected_df = df
def unstack_single_factor(self, df='dataframe', xfac=None, res=None):
sample_dict = dict((k, 1) for k in df[xfac].unique())
df = df.set_index(xfac)
unstack_dict = dict()
df_dict = {k: v.to_dict(orient='records') for k, v in df.groupby(level=0)}
for k, v in df_dict.items():
if k in sample_dict:
unstack_dict[k] = []
for ele in v:
unstack_dict[k].append(ele[res])
self.unstack_df = pd.DataFrame(unstack_dict)
def unstack_two_factor(self, df='dataframe', row_fac=None, col_fac=None, res=None):
sample_row_dict = dict((k, 1) for k in df[row_fac].unique())
sample_col_dict = dict((k, 1) for k in df[col_fac].unique())
df = df.set_index(row_fac)
unstack_dict = dict()
for k in sample_row_dict:
for k1 in sample_col_dict:
unstack_dict[(k, k1)] = []
df_dict = {k: v.to_dict(orient='records') for k, v in df.groupby(level=0)}
max_rep = 1
for k, v in df_dict.items():
if k in sample_row_dict:
for ele in v:
if ele[col_fac] in sample_col_dict:
unstack_dict[(k, ele[col_fac])].append(ele[res])
for k, v in unstack_dict.items():
if len(v) > max_rep:
max_rep = len(v)
process_unstack_dict = dict()
for k in sample_col_dict:
process_unstack_dict[k] = []
sample_row_list = []
for k in sample_row_dict:
sample_row_list.extend(max_rep * [k])
# sample_row_list.sort()
process_unstack_dict['sample'] = sample_row_list
sample_col_list = df[col_fac].unique()
sample_row_list_uniq = []
for ele in sample_row_list:
if ele not in sample_row_list_uniq:
sample_row_list_uniq.append(ele)
unstack_dict_sorted = collections.OrderedDict(sorted(unstack_dict.items()))
for ele1 in sample_col_list:
# for ele in list(set(sample_row_list)):
for ele in sample_row_list_uniq:
for k, v in unstack_dict_sorted.items():
# print(k, v, ele1, ele, 'jjjjjjj')
if k[0] == ele and k[1] == ele1:
# print(k, v, ele1, ele)
process_unstack_dict[k[1]].extend(v)
self.unstack_df = pd.DataFrame(process_unstack_dict)
def reg_metric(self, y=None, yhat=None, resid=None):
if not all(isinstance(i, np.ndarray) for i in [y, yhat, resid]):
raise ValueError('y, residuals, and yhat must be numpy array')
if y.shape[0] != yhat.shape[0] != resid.shape[0]:
raise ValueError('y, residuals, and yhat must have same shape')
rmse = np.sqrt((np.sum((yhat-y)**2) / y.shape[0]))
# Mean squared error (MSE)
mse = np.mean(resid**2)
# Mean absolute error (MAE)
mae = np.mean(np.abs(y - yhat))
# Mean absolute percentage error (MAPE)
mape = np.mean(np.abs((y - yhat) / y))
self.reg_metric_df = pd.DataFrame({'Metrics':
['Root Mean Square Error (RMSE)', 'Mean Squared Error (MSE)', 'Mean Absolute Error (MAE)',
'Mean Absolute Percentage Error (MAPE)'], 'Value': [rmse, mse, mae, mape]}).round(4)
def bin_grouping(self, bin=None, bin_size=None):
if not isinstance(bin, (list, np.ndarray)):
raise ValueError('bin array must be list or numpy array')
bin_sorted = np.sort(bin)
incr_bin_size = bin_size
self.bin_dict = dict()
for i in bin_sorted:
if i <= bin_size:
if bin_size not in self.bin_dict:
self.bin_dict[bin_size] = 1
else:
self.bin_dict[bin_size] += 1
elif i > bin_size:
bin_size += incr_bin_size
if i <= bin_size:
if bin_size not in self.bin_dict:
self.bin_dict[bin_size] = 1
else:
self.bin_dict[bin_size] += 1
else:
bin_size += incr_bin_size
class gff:
def __init__(self):
pass
def gff_to_gtf(file='gff_file', trn_feature_name=None, parent_attr='Parent'):
read_gff_file_cds = open(file, 'r')
cds_dict_st, cds_dict_st_phase = dict(), dict()
cds_dict_end, cds_dict_end_phase = dict(), dict()
cds_ct = 0
for line in read_gff_file_cds:
if not line.startswith('#'):
line = re.split('\s+', line.strip())
if line[2] == 'mRNA' or line[2] == 'transcript' or line[2] == trn_feature_name:
# attr = re.split(';', line[8])
# transcript_id = attr[0].split('=')[1]
if 'ID=' in line[8]:
transcript_id = re.search('ID=(.+?)(;|$)', line[8]).group(1)
else:
raise Exception(
"ID required in GFF3 file in attribute field for mRNA/transcript"
" feature type")
cds_dict_st[transcript_id] = []
cds_dict_end[transcript_id] = []
elif line[2] == 'CDS':
if parent_attr+'=' in line[8]:
transcript_id_cds = re.search('Parent=(.+?)(;|$)', line[8]).group(1)
else:
raise Exception(
"Parent field required in GFF3 file in attribute field for CDS"
" feature type")
cds_ct += 1
cds_dict_st[transcript_id_cds].append(line[3])
cds_dict_end[transcript_id_cds].append(line[4])
# if CDS phase contains dot values
if line[7] == '.':
print('Warning: No valid phase values for CDS feature. making phase value to zero')
cds_dict_st_phase[(transcript_id_cds, line[3])] = 0
cds_dict_end_phase[(transcript_id_cds, line[4])] = 0
else:
cds_dict_st_phase[(transcript_id_cds, line[3])] = line[7]
cds_dict_end_phase[(transcript_id_cds, line[4])] = line[7]
read_gff_file_cds.close()
# check if CDS feature present in GFF3 file
if cds_ct == 0:
print("Warning: No CDS feature type found in given GFF3 file. GTF file requires CDS feature type\n")
read_gff_file = open(file, 'r')
out_gtf_file = open(Path(file).stem+'.gtf', 'w')
gene_id = ''
transcript_id = ''
gene_name = ''
mirna_id = ''
first_cds_present, last_cds_present, start_codon_present, end_codon_present = 0, 0, 0, 0
gene_trn = dict()
ttr_i, cds_i, exon_i, ftr_i = dict(), dict(), dict(), dict()
for line in read_gff_file:
if not line.startswith('#'):
line = re.split('\s+', line.strip())
if line[2] == 'gene':
# attr = re.split(';', line[8])
if 'ID=' in line[8]:
gene_id = re.search('ID=(.+?)(;|$)', line[8]).group(1)
if 'Name=' in line[8]:
gene_name = re.search('Name=(.+?)(;|$)', line[8]).group(1)
elif 'gene_name=' in line[8]:
gene_name = re.search('gene_name=(.+?)(;|$)', line[8]).group(1)
elif 'gene_id=' in line[8]:
gene_name = re.search('gene_id=(.+?)(;|$)', line[8]).group(1)
if 'ID=' not in line[8]:
raise Exception("ID field required in GFF3 file in attribute field for gene feature type")
gene_trn[gene_id] = []
gene_attr_gtf = 'gene_id "' + gene_id + '"; gene_name "' + gene_name + '"; gene_source "' + line[1]+'";'
out_gtf_file.write('\t'.join(line[0:8])+'\t'+gene_attr_gtf+'\n')
elif line[2] == 'mRNA' or line[2] == 'transcript' or line[2] == trn_feature_name:
# cds_i, exon_i, ftr_i, ttr_i = 1, 1, 1, 1
exon_i_int, cds_i_int, ftr_i_int, ttr_i_int = 0, 0, 0, 0
if 'ID=' in line[8]:
transcript_id = re.search('ID=(.+?)(;|$)', line[8]).group(1)
if 'ID=' not in line[8]:
raise Exception("ID field required in GFF3 file in attribute field for mRNA/transcript"
" feature type")
if line[2] == 'mRNA' or line[2] == trn_feature_name:
line[2] = 'transcript'
if gene_id != '':
gene_trn[gene_id].append(transcript_id)
ttr_i[transcript_id] = 0
cds_i[transcript_id] = 0
exon_i[transcript_id] = 0
ftr_i[transcript_id] = 0
gene_attr_gtf = 'gene_id "' + gene_id + '"; transcript_id "' + transcript_id + \
'"; gene_source "' + line[1] + '";'
out_gtf_file.write('\t'.join(line[0:8])+'\t'+gene_attr_gtf+'\n')
elif line[2] == 'CDS':
# attr = re.split(';', line[8])
if parent_attr+'=' in line[8]:
transcript_id_temp = re.search('Parent=(.+?)(;|$)', line[8]).group(1)
if parent_attr+'=' not in line[8]:
raise Exception(
"Parent field required in GFF3 file in attribute field for CDS"
" feature type")
if line[3] == min(cds_dict_st[transcript_id_temp], key=int):
first_cds_present = 1
if line[4] == max(cds_dict_end[transcript_id_temp], key=int):
last_cds_present = 1
if transcript_id_temp in gene_trn[gene_id]:
cds_i[transcript_id_temp] += 1
gene_attr_gtf = 'gene_id "' + gene_id + '"; transcript_id "' + transcript_id_temp + \
'"; cds_number "' + str(cds_i[transcript_id_temp]) + '"; gene_name "' + \
gene_name + '"; gene_source "' + line[1] + '";'
# for transcripts with shared CDS
elif ',' in transcript_id_temp:
gene_attr_gtf = 'gene_id "' + gene_id + '"; transcript_id "' + transcript_id_temp + \
'"; cds_number "' + '""' + '"; gene_name "' + \
gene_name + '"; gene_source "' + line[1] + '";'
# cds_i += 1
out_gtf_file.write('\t'.join(line[0:8])+'\t'+gene_attr_gtf+'\n')
elif line[2] == 'exon':
if parent_attr+'=' in line[8]:
transcript_id_temp = re.search('Parent=(.+?)(;|$)', line[8]).group(1)
if parent_attr+'=' not in line[8]:
raise Exception(
"Parent field required in GFF3 file in attribute field for exon"
" feature type")
if transcript_id_temp in gene_trn[gene_id]:
exon_i[transcript_id_temp] += 1
gene_attr_gtf = 'gene_id "' + gene_id + '"; transcript_id "' + transcript_id_temp + \
'"; exon_number "' + str(exon_i[transcript_id_temp]) + '"; gene_name "' + \
gene_name + '"; gene_source "' + line[1] + '";'
# for transcripts with shared exons
elif ',' in transcript_id_temp:
gene_attr_gtf = 'gene_id "' + gene_id + '"; transcript_id "' + transcript_id_temp + \
'"; exon_number "' + '""' + '"; gene_name "' + gene_name + \
'"; gene_source "' + line[1] + '";'
out_gtf_file.write('\t'.join(line[0:8])+'\t'+gene_attr_gtf+'\n')
elif line[2] == 'five_prime_UTR':
if parent_attr+'=' in line[8]:
transcript_id_temp = re.search('Parent=(.+?)(;|$)', line[8]).group(1)
if 'Parent=' not in line[8]:
raise Exception(
"Parent field required in GFF3 file in attribute field for five_prime_UTR"
" feature type")
if transcript_id_temp in gene_trn[gene_id]:
ftr_i[transcript_id_temp] += 1
gene_attr_gtf = 'gene_id "' + gene_id + '"; transcript_id "' + transcript_id_temp + \
'"; five_prime_UTR_number "' + str(ftr_i[transcript_id_temp]) + \
'"; gene_name "'+ gene_name + '"; gene_source "' + line[1] + '";'
# for transcripts with shared five_prime_UTR
elif ',' in transcript_id_temp:
gene_attr_gtf = 'gene_id "' + gene_id + '"; transcript_id "' + transcript_id_temp + \
'"; three_prime_UTR_number "' + '""' + \
'"; gene_name "' + gene_name + '"; gene_source "' + line[1] + '";'
out_gtf_file.write('\t'.join(line[0:8])+'\t'+gene_attr_gtf+'\n')
elif line[2] == 'three_prime_UTR':
if parent_attr+'=' in line[8]:
transcript_id_temp = re.search('Parent=(.+?)(;|$)', line[8]).group(1)
if 'Parent=' not in line[8]:
raise Exception(
"Parent field required in GFF3 file in attribute field for three_prime_UTR"
" feature type")
if transcript_id_temp in gene_trn[gene_id]:
ttr_i[transcript_id_temp] += 1
gene_attr_gtf = 'gene_id "' + gene_id + '"; transcript_id "' + transcript_id_temp + \
'"; three_prime_UTR_number "' + str(ttr_i[transcript_id_temp]) + \
'"; gene_name "' + gene_name + '"; gene_source "' + line[1] + '";'
# for transcripts with shared three_prime_UTR
elif ',' in transcript_id_temp:
gene_attr_gtf = 'gene_id "' + gene_id + '"; transcript_id "' + transcript_id_temp + \
'"; three_prime_UTR_number "' + "" + '"; gene_name "' + \
gene_name + '"; gene_source "' + line[1] + '";'
out_gtf_file.write('\t'.join(line[0:8])+'\t'+gene_attr_gtf+'\n')
elif line[2] == 'start_codon':
start_codon_present = 1
if parent_attr+'=' in line[8]:
transcript_id_temp = re.search('Parent=(.+?)(;|$)', line[8]).group(1)
if parent_attr+'=' not in line[8]:
raise Exception(
"Parent field required in GFF3 file in attribute field for three_prime_UTR"
" feature type")
if transcript_id_temp == transcript_id:
gene_attr_gtf = 'gene_id "' + gene_id + '"; transcript_id "' + transcript_id + '"; gene_name "' + \
gene_name+ '"; gene_source "' + line[1] + '";'
out_gtf_file.write('\t'.join(line[0:8])+'\t'+gene_attr_gtf+'\n')
elif line[2] == 'stop_codon':
end_codon_present = 1
if parent_attr+'=' in line[8]:
transcript_id_temp = re.search('Parent=(.+?)(;|$)', line[8]).group(1)
if parent_attr+'=' not in line[8]:
raise Exception(
"Parent field required in GFF3 file in attribute field for three_prime_UTR"
" feature type")
if transcript_id_temp == transcript_id:
gene_attr_gtf = 'gene_id "' + gene_id + '"; transcript_id "' + transcript_id + '"; gene_name "' + \
gene_name+ '"; gene_source "' + line[1] + '";'
out_gtf_file.write('\t'.join(line[0:8])+'\t'+gene_attr_gtf+'\n')
elif line[2] == 'miRNA':
if parent_attr+'=' in line[8]:
transcript_id_temp = re.search(parent_attr+'=(.+?)(;|$)', line[8]).group(1)
# this is tailored based gff3 files from mirbase
elif 'Derives_from=' in line[8]:
transcript_id_temp = re.search('Derives_from=(.+?)(;|$)', line[8]).group(1)
else:
raise Exception(
"Parent field required in GFF3 file in attribute field for CDS"
" feature type")
if 'ID=' in line[8]:
mirna_id = re.search('ID=(.+?)(;|$)', line[8]).group(1)
# for transcripts with shared CDS
gene_attr_gtf = 'gene_id "' + gene_id + '"; transcript_id "' + transcript_id + \
'"; miRNA_id "' + mirna_id + '"; gene_source "' + line[1] + '";'
# cds_i += 1
out_gtf_file.write('\t'.join(line[0:8])+'\t'+gene_attr_gtf+'\n')
if first_cds_present == 1 and start_codon_present == 0:
first_cds_present = 0
if parent_attr+'=' in line[8]:
transcript_id_temp = re.search('Parent=(.+?)(;|$)', line[8]).group(1)
if parent_attr+'=' not in line[8]:
raise Exception(
"Parent field required in GFF3 file in attribute field for CDS"
" feature type")
for k in gene_trn[gene_id]:
if k == transcript_id_temp:
if line[6] == '+':
codon_min_cord = int(min(cds_dict_st[transcript_id_temp], key=int))
cds_phase = int(
cds_dict_st_phase[
(transcript_id_temp, min(cds_dict_st[transcript_id_temp], key=int))])
line[2], line[3], line[4] = 'start_codon', codon_min_cord + cds_phase, \
codon_min_cord + cds_phase + 2
elif line[6] == '-':
codon_max_cord = int(max(cds_dict_end[transcript_id_temp], key=int))
cds_phase = int(
cds_dict_end_phase[
(transcript_id_temp, max(cds_dict_end[transcript_id_temp], key=int))])
line[2], line[3], line[4] = 'start_codon', codon_max_cord - 2 - cds_phase, \
codon_max_cord - cds_phase
gene_attr_gtf = 'gene_id "' + gene_id + '"; transcript_id "' + transcript_id_temp + '"; gene_name "' + \
gene_name + '"; gene_source "' + line[1] + '";'
out_gtf_file.write('\t'.join(str(x) for x in line[0:8]) + '\t' + gene_attr_gtf + '\n')
if last_cds_present == 1 and end_codon_present == 0:
last_cds_present = 0
if parent_attr+'=' in line[8]:
transcript_id_temp = re.search('Parent=(.+?)(;|$)', line[8]).group(1)
if parent_attr+'=' not in line[8]:
raise Exception(
"Parent field required in GFF3 file in attribute field for CDS"
" feature type")
for k in gene_trn[gene_id]:
if k == transcript_id_temp:
if line[6] == '+':
codon_max_cord = int(max(cds_dict_end[transcript_id_temp], key=int))
line[2], line[3], line[4] = 'stop_codon', codon_max_cord - 2, codon_max_cord
elif line[6] == '-':
codon_min_cord = int(min(cds_dict_st[transcript_id_temp], key=int))
line[2], line[3], line[4] = 'stop_codon', codon_min_cord, codon_min_cord + 2
gene_attr_gtf = 'gene_id "' + gene_id + '"; transcript_id "' + transcript_id_temp + \
'"; gene_name "' + gene_name + '"; gene_source "' + line[1] + '";'
out_gtf_file.write('\t'.join(str(x) for x in line[0:8]) + '\t' + gene_attr_gtf + '\n')
read_gff_file.close()
out_gtf_file.close()
def gffreader(file='gff_file'):
read_gff_file = open(file, 'r')
transcript_id = ''
for line in read_gff_file:
if not line.startswith('#'):
line = re.split('\t', line.strip())
if line[2]=='gene':
if 'ID=' in line[8]:
gene_id = re.search('ID=(.+?)(;|$)', line[8]).group(1)
if 'Name=' in line[8]:
gene_name = re.search('Name=(.+?)(;|$)', line[8]).group(1)
elif 'gene_name=' in line[8]:
gene_name = re.search('gene_name=(.+?)(;|$)', line[8]).group(1)
elif 'gene_id=' in line[8]:
gene_name = re.search('gene_id=(.+?)(;|$)', line[8]).group(1)
if 'ID=' not in line[8]:
raise Exception("ID field required in GFF3 file in attribute field for gene feature type")
yield (line[0], gene_id, gene_name, transcript_id, line[1], line[2], line[3], line[4], line[6], line[8])
elif line[2] == 'mRNA' or line[2] == 'transcript':
if 'ID=' in line[8]:
transcript_id = re.search('ID=(.+?)(;|$)', line[8]).group(1)
else:
raise Exception("ID field required in GFF3 file in attribute field for mRNA/transcript"
" feature type")
yield (line[0], gene_id, gene_name, transcript_id, line[1], line[2], line[3], line[4], line[6], line[8])
elif line[2]=='CDS':
if 'Parent=' in line[8]:
transcript_id_temp = re.search('Parent=(.+?)(;|$)', line[8]).group(1)
else:
raise Exception(
"Parent field required in GFF3 file in attribute field for CDS"
" feature type")
if transcript_id_temp == transcript_id:
yield (line[0], gene_id, gene_name, transcript_id, line[1], line[2], line[3], line[4], line[6], line[8])
elif line[2]=='exon':
if 'Parent=' in line[8]:
transcript_id_temp = re.search('Parent=(.+?)(;|$)', line[8]).group(1)
else:
raise Exception(
"Parent field required in GFF3 file in attribute field for exon"
" feature type")
if transcript_id_temp == transcript_id:
yield (line[0], gene_id, gene_name, transcript_id, line[1], line[2], line[3], line[4], line[6], line[8])
elif line[2]=='five_prime_UTR':
if 'Parent=' in line[8]:
transcript_id_temp = re.search('Parent=(.+?)(;|$)', line[8]).group(1)
else:
raise Exception(
"Parent field required in GFF3 file in attribute field for five_prime_UTR"
" feature type")
if transcript_id_temp == transcript_id:
yield (line[0], gene_id, gene_name, transcript_id, line[1], line[2], line[3], line[4], line[6], line[8])
elif line[2]=='three_prime_UTR':
if 'Parent=' in line[8]:
transcript_id_temp = re.search('Parent=(.+?)(;|$)', line[8]).group(1)
else:
raise Exception(
"Parent field required in GFF3 file in attribute field for three_prime_UTR"
" feature type")
if transcript_id_temp == transcript_id:
yield (line[0], gene_id, gene_name, transcript_id, line[1], line[2], line[3], line[4], line[6], line[8])
elif line[2]=='start_codon':
if 'Parent=' in line[8]:
transcript_id_temp = re.search('Parent=(.+?)(;|$)', line[8]).group(1)
else:
raise Exception(
"Parent field required in GFF3 file in attribute field for three_prime_UTR"
" feature type")
if transcript_id_temp == transcript_id:
yield (line[0], gene_id, gene_name, transcript_id, line[1], line[2], line[3], line[4], line[6], line[8])
elif line[2]=='stop_codon':
if 'Parent=' in line[8]:
transcript_id_temp = re.search('Parent=(.+?)(;|$)', line[8]).group(1)
else:
raise Exception(
"Parent field required in GFF3 file in attribute field for three_prime_UTR"
" feature type")
if transcript_id_temp == transcript_id:
yield (line[0], gene_id, gene_name, transcript_id, line[1], line[2], line[3], line[4], line[6], line[8])
read_gff_file.close()
class norm:
def __init__(self):
pass
def cpm(self, df='dataframe'):
df = df.dropna()
# check for non-numeric values
for i in df.columns:
assert general.check_for_nonnumeric(df[i]) == 0, \
'dataframe contains non-numeric values in {} column'.format(i)
self.lib_size = df.sum()
self.cpm_norm = (df * 1e6) / df.sum()
def rpkm(self, df='dataframe', gl=None):
df = df.dropna()
assert gl is not None, "Provide column name for gene length in bp"
# check for non-numeric values
for i in df.columns:
assert general.check_for_nonnumeric(df[i]) == 0, \
'dataframe contains non-numeric values in {} column'.format(i)
self.rpkm_norm = (df.div(df[gl], axis=0) * 1e9) / df.sum()
self.rpkm_norm = self.rpkm_norm.drop([gl], axis=1)
def tpm(self, df='dataframe', gl=None):
df = df.dropna()
assert gl is not None, "Provide column name for gene length in bp"
# check for non-numeric values
for i in df.columns:
assert general.check_for_nonnumeric(df[i]) == 0, \
'dataframe contains non-numeric values in {} column'.format(i)
# gene length must be in bp
self.a = df.div(df[gl], axis=0) * 1e3
self.tpm_norm = (self.a * 1e6) / self.a.sum()
self.tpm_norm = self.tpm_norm.drop([gl], axis=1)
class assembly:
def sizdist(self, file='fasta', n=50):
fasta_iter = Fasta.fasta_reader(file)
seq_len = []
total_len_sum = 0
for record in fasta_iter:
header, sequence = record
seq_len.append(len(sequence))
total_len_sum += len(sequence)
seq_len.sort(reverse=True)
class lncrna:
@staticmethod
def lincrna_types(gff_file='gff_file_with_lincrna', map_factor=200):
read_gff_file = open(gff_file, 'r')
out_file = open('same_conv_out.txt', 'w')
out_file_2 = open('dive_out.txt', 'w')
out_file_3 = open('lincrna_types.txt', 'w')
transcript_id = ''
lincrna_dict = dict()
mrna_dict = dict()
lincrna_dict_1 = dict()
mrna_dict_1 = dict()
line_num = 0
for line in read_gff_file:
if not line.startswith('#'):
line = re.split('\t', line.strip())
line_num += 1
line.extend([line_num])
if line[1] == 'Evolinc':
lincrna_trn_id = re.search('transcript_id (.+?)(;|$)', line[8]).group(1)
lincrna_dict[(line[0], int(line[3]), int(line[4]), line[6], line[9])] = lincrna_trn_id.strip('"')
lincrna_dict_1[line_num] = [line[0], int(line[3]), int(line[4]), line[6], line[9], lincrna_trn_id.strip('"')]
if line[2] == 'mRNA':
mrna_id = re.search('gene_id (.+?)(;|$)', line[8]).group(1)
mrna_dict[(line[0], int(line[3]), int(line[4]), line[6], line[9])] = mrna_id.strip('"')
mrna_dict_1[line_num] = [line[0], int(line[3]), int(line[4]), line[6], line[9],
mrna_id.strip('"')]
read_gff_file.close()
checked = dict()
checked_2 = dict()
# for same and convergent
for k in lincrna_dict_1:
if lincrna_dict_1[k][3] == '+':
k1 = k
for i in range(map_factor):
if k1 in mrna_dict_1:
linc_st = lincrna_dict_1[k][1]
mrna_st = mrna_dict_1[k1][1]
diff = mrna_st - linc_st + 1
mrna_id = mrna_dict_1[k1][5]
if 'PGSC' in mrna_dict_1[k1][5] and mrna_dict_1[k1][3] == '+' and lincrna_dict_1[k][5] not in \
checked and mrna_dict_1[k1][0] == lincrna_dict_1[k][0]:
checked[lincrna_dict_1[k][5]] = [diff, mrna_id, 'same']
out_file.write(lincrna_dict_1[k][5]+'\t'+mrna_dict_1[k1][5]+'\t'+str(diff)+'\t'+'same'+'\n')
elif 'PGSC' in mrna_dict_1[k1][5] and mrna_dict_1[k1][3] == '-' and lincrna_dict_1[k][5] not \
in checked and mrna_dict_1[k1][0] == lincrna_dict_1[k][0]:
checked[lincrna_dict_1[k][5]] = [diff, mrna_id, 'convergent']
out_file.write(lincrna_dict_1[k][5]+'\t'+mrna_dict_1[k1][5]+'\t'+str(diff)+'\t'+'convergent'+'\n')
else:
k1 += 1
elif lincrna_dict_1[k][3] == '-':
k1 = k
for i in range(map_factor):
if k1 in mrna_dict_1:
linc_st = lincrna_dict_1[k][1]
mrna_st = mrna_dict_1[k1][1]
diff = linc_st - mrna_st + 1
mrna_id = mrna_dict_1[k1][5]
if 'PGSC' in mrna_dict_1[k1][5] and mrna_dict_1[k1][3] == '-' and lincrna_dict_1[k][
5] not in checked and mrna_dict_1[k1][0] == lincrna_dict_1[k][0]:
checked[lincrna_dict_1[k][5]] = [diff, mrna_id, 'same']
out_file.write(lincrna_dict_1[k][5]+'\t'+mrna_dict_1[k1][5]+'\t'+str(diff)+'\t'+'same'+'\n')
elif 'PGSC' in mrna_dict_1[k1][5] and mrna_dict_1[k1][3] == '+' and lincrna_dict_1[k][
5] not in checked and mrna_dict_1[k1][0] == lincrna_dict_1[k][0]:
checked[lincrna_dict_1[k][5]] = [diff, mrna_id, 'convergent']
out_file.write(lincrna_dict_1[k][5]+'\t'+mrna_dict_1[k1][5]+'\t'+str(diff)+'\t'+'convergent'+'\n')
else:
k1 -= 1
# for divergent only
for k in lincrna_dict_1:
if lincrna_dict_1[k][3] == '+':
k1 = k
for i in range(map_factor):
if k1 in mrna_dict_1:
linc_st = lincrna_dict_1[k][1]
mrna_st = mrna_dict_1[k1][1]
diff = linc_st - mrna_st + 1
mrna_id = mrna_dict_1[k1][5]
if 'PGSC' in mrna_dict_1[k1][5] and mrna_dict_1[k1][3] == '-' and lincrna_dict_1[k][5] not in \
checked_2 and mrna_dict_1[k1][0] == lincrna_dict_1[k][0]:
checked_2[lincrna_dict_1[k][5]] = [diff, mrna_id, 'divergent']
out_file_2.write(lincrna_dict_1[k][5]+'\t'+mrna_dict_1[k1][5]+'\t'+str(diff)+'\t'+'divergent'+'\n')
else:
k1 -= 1
elif lincrna_dict_1[k][3] == '-':
k1 = k
for i in range(map_factor):
if k1 in mrna_dict_1:
linc_st = lincrna_dict_1[k][1]
mrna_st = mrna_dict_1[k1][1]
diff = mrna_st - linc_st + 1
mrna_id = mrna_dict_1[k1][5]
if 'PGSC' in mrna_dict_1[k1][5] and mrna_dict_1[k1][3] == '+' and lincrna_dict_1[k][
5] not in checked_2 and mrna_dict_1[k1][0] == lincrna_dict_1[k][0]:
checked_2[lincrna_dict_1[k][5]] = [diff, mrna_id, 'divergent']
out_file_2.write(lincrna_dict_1[k][5]+'\t'+mrna_dict_1[k1][5]+'\t'+str(diff)+'\t'+'divergent'+'\n')
else:
k1 += 1
for k in lincrna_dict_1:
if lincrna_dict_1[k][5] not in checked:
print(lincrna_dict_1[k][5])
for k in checked:
x = 0
for k1 in checked_2:
if k == k1 and checked[k][2] == 'same':
x = 1
out_file_3.write(k + '\t' + checked[k][1] + '\t' + checked[k][2] + '\t' + str(checked[k][0]) + '\n')
elif k == k1 and checked[k][2] == 'convergent' and checked_2[k][2] == 'divergent':
if checked[k][0] <= checked_2[k][0]:
x = 1
out_file_3.write(k + '\t' + checked[k][1] + '\t' + checked[k][2] + '\t' + str(checked[k][0]) + '\n')
else:
x = 1
out_file_3.write(
k + '\t' + checked_2[k1][1] + '\t' + checked_2[k1][2] + '\t' + str(checked_2[k1][0]) + '\n')
if x == 0:
out_file_3.write(k + '\t' + checked[k][1] + '\t' + checked[k][2] + '\t' + str(checked[k][0]) + '\n')
class genfam:
def __init__(self):
self.species_df = None
self.genfam_info= None
self.df_enrich = None
@staticmethod
def enrichment_analysis(user_provided_uniq_ids_len=None, get_user_id_count_for_gene_fam=None, gene_fam_count=None,
bg_gene_count=None, df_dict_glist=None, stat_sign_test=None, multi_test_corr=None,
uniq_p=None, uniq_c=None, uniq_f=None, get_gene_ids_from_user=None, short_fam_name=None,
min_map_ids=None):
pvalues = []
enrichment_result = []
# get total mapped genes from user list
mapped_query_ids = sum(get_user_id_count_for_gene_fam.values())
for k in get_user_id_count_for_gene_fam:
# first row and first column (k)
# from user list
# number of genes mapped to family in subset
gene_in_category = get_user_id_count_for_gene_fam[k]
short_fam = short_fam_name[k]
# first row and second column (m-k) _uniq_id_count_dict (m)
# n-k (n total mappable deg)
# from user list
# total subset - gene mapped to family
# gene_not_in_category_but_in_sample = _uniq_id_count_dict-gene_in_category
# calculate stat sign based on only ids mapped and not all query ids as in agrigo
gene_not_in_category_but_in_sample = mapped_query_ids - gene_in_category
# second row and first column (n-k)
# m-k
# genes that mapped to family are in genome - genes in category from user list
gene_not_in_catgory_but_in_genome = gene_fam_count[k] - gene_in_category
bg_gene_fam_ids = gene_fam_count[k]
# second row and second column gene_in_category+gene_not_in_catgory_and_in_genome (n)
# N-m-n+k
bg_in_genome = bg_gene_count - mapped_query_ids - (gene_in_category + gene_not_in_catgory_but_in_genome) \
+ gene_in_category
# for go terms
process = uniq_p[k]
function = uniq_f[k]
comp = uniq_c[k]
# gene ids from user list mapped to particular gene family
gene_ids = get_gene_ids_from_user[k]
# fisher exact test
if stat_sign_test == 1:
stat_test_name = 'Fisher exact test'
# run analysis if only mappable ids are >= 5
# if mapped_query_ids >= 5:
oddsratio, pvalue = stats.fisher_exact([[gene_in_category, gene_not_in_category_but_in_sample],
[gene_not_in_catgory_but_in_genome, bg_in_genome]], 'greater')
if int(gene_in_category) > 0:
enrichment_result.append([k, short_fam, user_provided_uniq_ids_len, gene_in_category, bg_gene_count,
bg_gene_fam_ids, oddsratio, pvalue, process, function, comp, gene_ids])
pvalues.append(pvalue)
# Hypergeometric
elif stat_sign_test == 2:
stat_test_name = 'Hypergeometric distribution'
oddsratio = 'NA'
pvalue = stats.hypergeom.sf(gene_in_category - 1, bg_gene_count, gene_fam_count[k], mapped_query_ids)
if int(gene_in_category) > 0:
enrichment_result.append([k, short_fam, user_provided_uniq_ids_len, gene_in_category, bg_gene_count,
bg_gene_fam_ids, oddsratio, pvalue, process, function, comp, gene_ids])
pvalues.append(pvalue)
# Binomial
elif stat_sign_test == 3:
stat_test_name = 'Binomial distribution'
oddsratio = 'NA'
# probability from the reference set for particular category
exp_pvalue = (gene_fam_count[k] / bg_gene_count)
pvalue = stats.binom_test(gene_in_category, mapped_query_ids, exp_pvalue, alternative='greater')
if int(gene_in_category) > 0:
enrichment_result.append([k, short_fam, user_provided_uniq_ids_len, gene_in_category, bg_gene_count,
bg_gene_fam_ids, oddsratio, pvalue, process, function, comp, gene_ids])
pvalues.append(pvalue)
# Chi-Square
elif stat_sign_test == 4:
stat_test_name = 'Chi-squared distribution'
oddsratio = 'NA'
chi2, pvalue, dof, exp = stats.chi2_contingency([[gene_in_category, gene_not_in_category_but_in_sample],
[gene_not_in_catgory_but_in_genome, bg_in_genome]],
correction=False)
# count cells where expected frequency < 5
cell_ct = sum(sum(i < 5 for i in exp))
cell_ct_per = 100 * float(cell_ct) / float(4)
# only report the family where observed count is more than expected gene in category count
# this is for getting highly enriched genes other under-represented genes will also be found
if int(gene_in_category) > 0 and gene_in_category >= exp[0][0]:
enrichment_result.append([k, short_fam, user_provided_uniq_ids_len, gene_in_category, bg_gene_count,
bg_gene_fam_ids, oddsratio, pvalue, process, function, comp, gene_ids,
cell_ct_per])
# print key, chi2, pvalue
pvalues.append(pvalue)
# FDR Bonferroni
if multi_test_corr == 1:
mult_test_name = 'Bonferroni'
fdr = multipletests(pvals=pvalues, method='bonferroni')[1]
# FDR Bonferroni-Holm
elif multi_test_corr == 2:
mult_test_name = 'Bonferroni-Holm'
fdr = multipletests(pvals=pvalues, method='holm')[1]
# FDR Benjamini-Hochberg
elif multi_test_corr == 3:
mult_test_name = 'Benjamini-Hochberg'
fdr = multipletests(pvals=pvalues, method='fdr_bh')[1]
return enrichment_result, fdr, mapped_query_ids, stat_test_name, mult_test_name
@staticmethod
def get_file_from_gd(url=None):
get_path = 'https://drive.google.com/uc?export=download&id=' + url.split('/')[-2]
return pd.read_csv(get_path)
@staticmethod
def get_bg_counts(species=None, check_ids=False):
if species == 'ahyp':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1Y5WHC-G7idvaMa_sX5xyy99MLXENaSfS/view?usp=sharing')
plant_name = 'Amaranthus hypochondriacus v2.1 (Amaranth)'
elif species == 'atri':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1PeuE4_r4o3YdczEwuJhiJgpS0SAD6kWi/view?usp=sharing')
plant_name = 'Amborella trichopoda v1.0 (Amborella)'
elif species == 'acom':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/198lQk2_kvI78qqE15QmEiuKNmR7IjIy0/view?usp=sharing')
plant_name = 'Ananas comosus v3 (Pineapple)'
elif species == 'alyr':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1xa8V_g0fJLJRuZ9lbJooZy_QC1xNypnC/view?usp=sharing')
plant_name = 'Arabidopsis lyrata v2.1 (Lyre-leaved rock cress)'
elif species == 'atha':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1akf_2LJU0ULLB31o7smQI6NwUC0_Cg1T/view?usp=sharing')
plant_name = 'Arabidopsis thaliana TAIR10 (Thale cress)'
elif species == 'aoff':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1D5dkCEwEf7uwPsb1bJySKQcH3HukDLUD/view?usp=sharing')
plant_name = 'Asparagus officinalis V1.1 (Asparagus)'
elif species == 'bstr':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1uJtA-WpEfgYdjDsiCAZbPdRX9OyMAmb1/view?usp=sharing')
plant_name = 'Boechera stricta v1.2 (Drummond rock cress)'
elif species == 'bdis':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1ju40AJAw9vYcm3CdKEYOnL8LonXF9t5H/view?usp=sharing')
plant_name = 'Brachypodium distachyon v3.1 (Purple false brome)'
elif species == 'bole':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1NOAEvLJBKPFKogwyUkQ_8bXCO4Wz8ZqF/view?usp=sharing')
plant_name = 'Brassica oleracea capitata v1.0 (Savoy cabbage)'
elif species == 'cgra':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1SI7PLZOuc5GTnuFxYA-zdCHZShLvleSk/view?usp=sharing')
plant_name = 'Capsella grandiflora v1.1'
elif species == 'crub':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1Sz9yPe67BzLujVj_N4Hpgvu8rG7rFSLl/view?usp=sharing')
plant_name = 'Capsella rubella v1.0 (Pink shepherds purse)'
elif species == 'cpap':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1pFLQJt2m-TDA2zGBDBCFQSiiiA5SEfQO/view?usp=sharing')
plant_name = 'Carica papaya ASGPBv0.4 (Papaya)'
elif species == 'cqui':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/10KLHnBgB-OC7aTEEhxYqp3BYbLqchfcj/view?usp=sharing')
plant_name = 'Chenopodium quinoa v1.0 (Quinoa)'
elif species == 'crei':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1YbQ457hRuKZ2KsuVhfSShgCeso9QTPZP/view?usp=sharing')
plant_name = 'Chlamydomonas reinhardtii v5.5 (Green algae)'
elif species == 'czof':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1WftatxF-m11Q7WWxvbSV_gEIJlLUp4X5/view?usp=sharing')
plant_name = 'Chromochloris zofingiensis v5.2.3.2'
elif species == 'cari':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/14EldoR_JBgvFAgyOa_MW4IqaE-_IVEA3/view?usp=sharing')
plant_name = 'Cicer arietinum v1.0 (Chickpea)'
elif species == 'ccle':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1xTrCDPgnWwfIObZFxeMdp0v-iNL2aO69/view?usp=sharing')
plant_name = 'Citrus clementina v1.0 (Clementine)'
elif species == 'csin':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1Zaif12UzenScHAuNP7NY2bqSHplBpx9a/view?usp=sharing')
plant_name = 'Citrus sinensis v1.1 (Sweet orange)'
elif species == 'csub':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1UXEF9n1kKNXtEDv-e9pGYJkKcMsJ203S/view?usp=sharing')
plant_name = 'Coccomyxa subellipsoidea C-169 v2.0'
elif species == 'csat':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1DeuURHM1yFtHk7TUjh341Au43BdXQqQK/view?usp=sharing')
plant_name = 'Cucumis sativus v1.0'
elif species == 'dcar':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1dUO1umk6RvzXdT7jilx7NYmXz6xWXWBj/view?usp=sharing')
plant_name = 'Daucus carota v2.0'
elif species == 'dsal':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1EibVmaLKl9jqIECQ5eI35dd1ePpycKw_/view?usp=sharing')
plant_name = 'Dunaliella salina v1.0'
elif species == 'egra':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1s6-HTCXKtd8TCvEzYSb2t2T_sCnJF3GT/view?usp=sharing')
plant_name = 'Eucalyptus grandis v2.0'
elif species == 'esal':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/18sMv6sK4DzWqEhpU7JuMQzwoI-DdPAgZ/view?usp=sharing')
plant_name = 'Eutrema salsugineum v1.0'
elif species == 'fves':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1GhNX0dkRZHOZiQIxuPTrPuOX6Yf7HZ4z/view?usp=sharing')
plant_name = 'Fragaria vesca v1.1'
elif species == 'gmax':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1j9nENpp5_bWnuuiM0ojoB3-shltl7QMn/view?usp=sharing')
plant_name = 'Glycine max Wm82.a2.v1'
elif species == 'grai':
df = genfam.get_file_from_gd('https://drive.google.com/file/d/11_Atm8NQpt87KzBS7hEVwnadq19x7SPk/view?usp=sharing')
plant_name = 'Gossypium raimondii v2.1'
elif species == 'hvul':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1gQmt5j8o4TaRrl-RE4GVUV5_a19haR5N/view?usp=sharing')
plant_name = 'Hordeum vulgare r1 (Barley)'
elif species == 'kfed':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1te-KkIUkKhwyaJeyMzxruji20VzV2Jp-/view?usp=sharing')
plant_name = 'Kalanchoe fedtschenkoi v1.1 (diploid Kalanchoe)'
elif species == 'lsat':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1V-fXPzpR2hV_FKGed1bcnVV0LCnA36vd/view?usp=sharing')
plant_name = 'Lactuca sativa V5 (Lettuce)'
elif species == 'lusi':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1RY97um9edfEyC_HoPLmi5FZuGvBvYK6M/view?usp=sharing')
plant_name = 'Linum usitatissimum v1.0 (Flax)'
elif species == 'mdom':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1d05-57Uxc-Zpg1P42oCmZWQscBETzwru/view?usp=sharing')
plant_name = 'Malus domestica v1.0 (Apple)'
elif species == 'mesc':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/131d1eiwgeRt691GASO9hEi5j2AVlgA5f/view?usp=sharing')
plant_name = 'Manihot esculenta v6.1 (Cassava)'
elif species == 'mpol':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1S4eJBldzkvov2HRigG68JDJ4kPz5o2i9/view?usp=sharing')
plant_name = 'Marchantia polymorpha v3.1 (Common liverwort)'
elif species == 'mtru':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/111BzdpRL6Vmrq32ErujKmRJGi8EZmQWG/view?usp=sharing')
plant_name = 'Medicago truncatula Mt4.0v1 (Barrel medic)'
elif species == 'mpus':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/109eZLEkzQFSyeq1DYHiznDPUuZ1X9D1y/view?usp=sharing')
plant_name = 'Micromonas pusilla CCMP1545 v3.0'
elif species == 'mgut':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1Ewqr7_xeSSMzADQgzEBJseu9YVyfsHC7/view?usp=sharing')
plant_name = 'Mimulus guttatus v2.0 (Monkey flower)'
elif species == 'macu':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1v9q_voRcqaMDXc7CpfYdkAt0uUwbHRbb/view?usp=sharing')
plant_name = 'Musa acuminata v1 (Banana)'
elif species == 'oeur':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1dJAipHwx4mPZmMAs53gJG0EiCzgizGtx/view?usp=sharing')
plant_name = 'Olea europaea var. sylvestris v1.0 (Wild olive)'
elif species == 'otho':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1eUnTqx4WMRG0HHZS2HBAhi0vil1a9_xj/view?usp=sharing')
plant_name = 'Oropetium thomaeum v1.0'
elif species == 'osat':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1GsQgmFrwxtlSMCgDAyamAvMXQvEaYjcE/view?usp=sharing')
plant_name = 'Oryza sativa v7_JGI (Rice)'
elif species == 'oluc':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1Ld57aW3j7GT7J6YSlfDou32pxsIlnB-S/view?usp=sharing')
plant_name = 'Ostreococcus lucimarinus v2.0'
elif species == 'ppat':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1uUlBWGJYiyScBLjZg67txZohtueOzo-L/view?usp=sharing')
plant_name = 'Physcomitrella patens v3.3'
elif species == 'ptri':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1fGsrLBwTuP72nwNTOIPR52nVu0-wzt5Z/view?usp=sharing')
plant_name = 'Populus trichocarpa v3.1 (Poplar)'
elif species == 'pumb':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1TdZ_bm78xP2kca6M_NY7l6lAunpkM4CU/view?usp=sharing')
plant_name = 'Porphyra umbilicalis v1.5 (Laver)'
elif species == 'pper':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1nfcMPj-Xmg8U7-Pfs6cJe74kQ31pZ0ac/view?usp=sharing')
plant_name = 'Prunus persica v2.1 (Peach)'
elif species == 'rcom':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1UYXzGirLozWX-N_uuOBirsJ0pKSJoarv/view?usp=sharing')
plant_name = 'Ricinus communis v0.1 (Castor bean)'
elif species == 'smoe':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1VPQfN5-_SX91JgmcIFf2ZdvsO7QWEOEr/view?usp=sharing')
plant_name = 'Selaginella moellendorffii v1.0 (Spikemoss)'
elif species == 'sita':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1N97CkOLPE7Atgjs9Alst3VrvsRj-igtJ/view?usp=sharing')
plant_name = 'Setaria italica v2.2 (Foxtail millet)'
elif species == 'slyc':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1_MxCNxyPFXk4V7-6vR9NTFhdgH2Yg04W/view?usp=sharing')
plant_name = 'Solanum lycopersicum iTAG2.4 (Tomato)'
elif species == 'stub':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1XttvKbhHr4oEKiHSWKFYIN0gWAjO6oTC/view?usp=sharing')
plant_name = 'Solanum tuberosum'
elif species == 'sbio':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1KXNKCiWynvev2OePQPtxIbBtjffaf3UK/view?usp=sharing')
plant_name = 'Sorghum bicolor v3.1.1 (Cereal grass)'
elif species == 'spol':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1i6ux7UVrEF3XNRuf3uF9ZsRThrM7jO11/view?usp=sharing')
plant_name = 'Spirodela polyrhiza v2 (Greater duckweed)'
elif species == 'tcac':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/10wDbFyC9uo_lmnWbdLNxecuV7ZdsuwC4/view?usp=sharing')
plant_name = 'Theobroma cacao v1.1 (Cacao)'
elif species == 'tpra':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1Vt2lyBmOqTk1bHY6LG7Va6zMGEDZs0oq/view?usp=sharing')
plant_name = 'Trifolium pratense v2 (Red clover)'
elif species == 'taes':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1qOzqfXfVRstYfY7FF4EgMEjsaUiZ9au-/view?usp=sharing')
plant_name = 'Triticum aestivum v2.2 (Common wheat)'
elif species == 'vvin':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1-B0Rut8GZ_buDCsDM723kevbNeBNGnXg/view?usp=sharing')
plant_name = 'Vitis vinifera Genoscope.12X (Common grape vine)'
elif species == 'vcar':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1QFDNUoPXhxkSfnf8qC5UQxxn1KqKWVdb/view?usp=sharing')
plant_name = 'Volvox carteri v2.1 (Volvox)'
elif species == 'zmay':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1ufO2Ueml0i0bqw2aWAa6rF7zI52JKC3Z/view?usp=sharing')
plant_name = 'Zea mays Ensembl-18 (Maize)'
elif species == 'zmar':
df = genfam.get_file_from_gd(
'https://drive.google.com/file/d/1ufO2Ueml0i0bqw2aWAa6rF7zI52JKC3Z/view?usp=sharing')
plant_name = 'Zostera marina v2.2 (Common eelgrass)'
else:
raise ValueError('Invalid value for species \n')
bg_gene_count, bg_trn_count, bg_phytid_count = df['loc_len'].sum(), df['trn_len'].sum(), df['phyt_id_len'].sum()
if check_ids:
return df, plant_name
else:
return df, bg_gene_count, bg_trn_count, bg_phytid_count, plant_name
@staticmethod
def get_rec_dicts(df=None, glist=None, sname=None, loclen=None, gop=None, gof=None, goc=None):
df1_glist = df[glist]
df1_sname = df[sname]
df1_loclen = df[loclen]
df1_gop = df[gop]
df1_gof = df[gof]
df1_goc = df[goc]
df2_glist = df1_glist.replace('(^\{|\}$)', '', regex=True)
df_dict_glist = df2_glist.set_index('gene_fam').T.to_dict('list')
df_dict_sname = df1_sname.set_index('gene_fam').T.to_dict('list')
df_dict_loclen = df1_loclen.set_index('gene_fam').T.to_dict('list')
df_dict_gop = df1_gop.set_index('gene_fam').T.to_dict('list')
df_dict_gof = df1_gof.set_index('gene_fam').T.to_dict('list')
df_dict_goc = df1_goc.set_index('gene_fam').T.to_dict('list')
df_dict_glist = {key: value[0].split(',') for key, value in df_dict_glist.items()}
return df_dict_glist, df_dict_sname, df_dict_loclen, df_dict_gop, df_dict_gof, df_dict_goc
def fam_enrich(self, id_file='text_file_with_gene_ids', species=None, id_type=None, stat_sign_test=1,
multi_test_corr=3, min_map_ids=5, alpha=0.05):
if id_type not in [1, 2, 3]:
raise ValueError('Invalid value for id_type')
if stat_sign_test not in [1, 2, 3, 4]:
raise ValueError('Invalid value for stat_sign_test')
if multi_test_corr not in [1, 2, 3]:
raise ValueError('Invalid value for multi_test_corr')
df, bg_gene_count, bg_trn_count, bg_phytid_count, plant_name = genfam.get_bg_counts(species)
# phytozome locus == 1
get_gene_ids_from_user = dict()
gene_fam_count = dict()
short_fam_name = dict()
get_user_id_count_for_gene_fam = dict()
uniq_p = dict()
uniq_f = dict()
uniq_c = dict()
user_provided_uniq_ids = dict()
# phytozome locus
if id_type == 1:
df_dict_glist, df_dict_sname, df_dict_loclen, df_dict_gop, df_dict_gof, df_dict_goc = \
genfam.get_rec_dicts(df, ['gene_fam', 'array_agg'], ['gene_fam', 'fam_short'], ['gene_fam', 'loc_len'],
['gene_fam', 'uniq_p'], ['gene_fam', 'uniq_f'], ['gene_fam', 'uniq_c'])
for item in df_dict_glist:
df_dict_glist[item] = [x.upper() for x in df_dict_glist[item]]
get_gene_ids_from_user[item] = []
# gene family short name
short_fam_name[item] = df_dict_sname[item][0]
# count for each gene family for background genome
gene_fam_count[item] = df_dict_loclen[item][0]
# user gene id counts for each gene family
get_user_id_count_for_gene_fam[item] = 0
# GO terms
uniq_p[item] = df_dict_gop[item]
uniq_f[item] = df_dict_gof[item]
uniq_c[item] = df_dict_goc[item]
bg_gene_count = bg_gene_count
# phytozome transcript
elif id_type == 2:
df_dict_glist, df_dict_sname, df_dict_loclen, df_dict_gop, df_dict_gof, df_dict_goc = \
genfam.get_rec_dicts(df, ['gene_fam', 'trn_array'], ['gene_fam', 'fam_short'], ['gene_fam', 'trn_len'],
['gene_fam', 'uniq_p'], ['gene_fam', 'uniq_f'], ['gene_fam', 'uniq_c'])
for item in df_dict_glist:
df_dict_glist[item] = [x.upper() for x in df_dict_glist[item]]
get_gene_ids_from_user[item] = []
# gene family short name
short_fam_name[item] = df_dict_sname[item][0]
# count for each gene family for background genome
gene_fam_count[item] = df_dict_loclen[item][0]
# user gene id counts for each gene family
get_user_id_count_for_gene_fam[item] = 0
# GO terms
uniq_p[item] = df_dict_gop[item]
uniq_f[item] = df_dict_gof[item]
uniq_c[item] = df_dict_goc[item]
bg_gene_count = bg_trn_count
# phytozome pacId
elif id_type == 3:
df_dict_glist, df_dict_sname, df_dict_loclen, df_dict_gop, df_dict_gof, df_dict_goc = \
genfam.get_rec_dicts(df, ['gene_fam', 'phyt_id_array'], ['gene_fam', 'fam_short'], ['gene_fam', 'phyt_id_len'],
['gene_fam', 'uniq_p'], ['gene_fam', 'uniq_f'], ['gene_fam', 'uniq_c'])
for item in df_dict_glist:
df_dict_glist[item] = [x.upper() for x in df_dict_glist[item]]
get_gene_ids_from_user[item] = []
# gene family short name
short_fam_name[item] = df_dict_sname[item][0]
# count for each gene family for background genome
gene_fam_count[item] = df_dict_loclen[item][0]
# user gene id counts for each gene family
get_user_id_count_for_gene_fam[item] = 0
# GO terms
uniq_p[item] = df_dict_gop[item]
uniq_f[item] = df_dict_gof[item]
uniq_c[item] = df_dict_goc[item]
bg_gene_count = bg_phytid_count
if isinstance(id_file, pd.DataFrame):
for ix, rows in id_file.iterrows():
gene_id = rows[0].strip().upper()
user_provided_uniq_ids[gene_id] = 0
else:
read_id_file = open(id_file, 'r')
for gene_id in read_id_file:
gene_id = gene_id.strip().upper()
# remove the duplicate ids and keep unique
user_provided_uniq_ids[gene_id] = 0
read_id_file.close()
# get the annotation count. number of genes from user input present in genfam database
anot_count = 0
for k1 in df_dict_glist:
for k2 in user_provided_uniq_ids:
if k2 in df_dict_glist[k1]:
# if the user input id present in df_dict_glist increment count
get_gene_ids_from_user[k1].append(k2)
get_user_id_count_for_gene_fam[k1] += 1
anot_count += 1
if anot_count == 0:
raise Exception('Provided gene ID does not match to selected ID type')
enrichment_result, fdr, mapped_query_ids, stat_test_name, mult_test_name = \
genfam.enrichment_analysis(len(user_provided_uniq_ids), get_user_id_count_for_gene_fam, gene_fam_count,
bg_gene_count, df_dict_glist, stat_sign_test, multi_test_corr, uniq_p, uniq_c,
uniq_f, get_gene_ids_from_user, short_fam_name, min_map_ids)
# if the number input ids are less than 5, the process will stop
# check if list is empty; this is in case of wrong input by user for gene ids or not as per phytozome format or
# wrong species selected
# if len(uniq_id_count_dict) >= 5 and enrichment_result and _plant_select!="z":
if mapped_query_ids < min_map_ids:
raise Exception('The minimum mapped gene IDs must be greater than ', min_map_ids)
# replace all fdr values which are greater than 1 to 1
fdr[fdr > 1] = 1
fam_out_enrich_file = open('fam_enrich_out.txt', 'w')
fam_out_all_file = open('fam_all_out.txt', 'w')
cell_ct_chi = 0
# Query IDs = total query ids from user (k)
# Annotated query IDs = query ids annotated to particular gene family
# background ids = particular gene family ids present in whole genome backround
# background total = total genome ids
for x in range(0, len(fdr)):
enrichment_result[x].insert(3, anot_count)
enrichment_result[x].insert(9, fdr[x])
if stat_sign_test == 4:
# chi-s test only
fam_out_enrich_file.write(
"Gene Family" + "\t" + "Short Name" + "\t" + "Query total" + "\t" + "Annotated query total" + "\t" +
"Annotated query per family" + "\t" + "Background annotated total" + "\t" +
"Background annotated per family" + "\t" + "Odds ratio" + "\t" + "P-value" + "\t" + "FDR" + "\t" +
"GO biological process" + "\t" + "GO molecular function" + "\t" + "GO cellular component" + "\t" +
"Gene IDs" + "\t" + "Cells with expected frequency <5 (%)" + "\n")
fam_out_all_file.write(
"Gene Family" + "\t" + "Short Name" + "\t" + "Query total" + "\t" + "Annotated query total" + "\t" + "Annotated query per family"
+ "\t" + "Background annotated total" + "\t" + "Background annotated per family" + "\t" + "Odds ratio" +
"\t" + "P-value" + "\t" + "FDR" + "\t" + "GO biological process" + "\t" + "GO molecular function" + "\t" +
"GO cellular component" + "\t" + "Gene IDs" + "\t" + "Cells with expected frequency <5 (%)" + "\n")
else:
fam_out_enrich_file.write(
"Gene Family" + "\t" + "Short Name" + "\t" + "Query total" + "\t" + "Annotated query total" + "\t" +
"Annotated query per family" + "\t" + "Background annotated total" + "\t" +
"Background annotated per family" + "\t" + "Odds ratio" + "\t" + "P-value" + "\t" + "FDR" + "\t" +
"GO biological process" + "\t" + "GO molecular function" + "\t" + "GO cellular component" + "\t" +
"Gene IDs" + "\n")
fam_out_all_file.write(
"Gene Family" + "\t" + "Short Name" + "\t" + "Query total" + "\t" + "Annotated query total" + "\t" + "Annotated query per family"
+ "\t" + "Background annotated total" + "\t" + "Background annotated per family" + "\t" + "Odds ratio" +
"\t" + "P-value" + "\t" + "FDR" + "\t" + "GO biological process" + "\t" + "GO molecular function" + "\t" +
"GO cellular component" + "\t" + "Gene IDs" + "\n")
genfam_for_df, sname_for_df, pv_for_df, fdr_for_df = [], [], [], []
for x in range(0, len(enrichment_result)):
fam_out_all_file.write('\t'.join(str(v) for v in enrichment_result[x]) + "\n")
# check pvalue less than 0.05
if float(enrichment_result[x][8]) <= alpha:
fam_out_enrich_file.write('\t'.join(str(v) for v in enrichment_result[x]) + "\n")
genfam_for_df.append(enrichment_result[x][0])
sname_for_df.append(enrichment_result[x][1])
pv_for_df.append(enrichment_result[x][8])
fdr_for_df.append(enrichment_result[x][9])
if stat_sign_test == 4:
if float(enrichment_result[x][14]) > 20.0:
cell_ct_chi += 1
if cell_ct_chi > 0:
print('\n WARNING: Some gene families have more than 20% cells with expected frequency count < 5. \n '
'Chi-square test may not be valid test. You can validate those gene families \n for enrichment using '
'fisher exact test. \n Please check output file and see last column for expected frequency count. \n')
# console result
self.df_enrich = pd.DataFrame({'Gene Family': genfam_for_df, 'Short name': sname_for_df, 'p value': pv_for_df,
'FDR': fdr_for_df})
self.df_enrich = self.df_enrich.sort_values(by=['p value'])
# console info
self.genfam_info = pd.DataFrame({'Parameter': ['Total query gene IDs', 'Number of genes annotated',
'Number of enriched gene families (p < 0.05)', 'Plant species',
'Statistical test for enrichment', 'Multiple testing correction method',
'Significance level'],
'Value':[len(user_provided_uniq_ids), mapped_query_ids, self.df_enrich.shape[0],
plant_name, stat_test_name, mult_test_name, alpha]})
fam_out_all_file.close()
fam_out_enrich_file.close()
# get figure for enrichment results
df_enrich_fig = self.df_enrich.copy()
df_enrich_fig['log10p'] = -(np.log10(df_enrich_fig['p value']))
visuz.stat.normal_bar(df=df_enrich_fig, x_col_name='Short name', y_col_name='log10p', axxlabel='Gene Family',
axylabel='-log10(p value)', ar=(90, 0), figname='genfam_enrich', r=1000)
@staticmethod
def allowed_ids(locus=None, trn=None, pacid=None, species=None):
print('\nAllowed ID types for ', species, '\n\n', 'Phytozome locus: ', locus, '\n', 'Phytozome transcript: ', trn,
'\n', 'Phytozome pacID: ', pacid, '\n')
@staticmethod
def check_allowed_ids(species=None):
df, plant_name = genfam.get_bg_counts(species, True)
genfam.allowed_ids(df['array_agg'].iloc[0].strip('\{').split(',')[0],
df['trn_array'].iloc[0].strip('\{').split(',')[0],
df['phyt_id_array'].iloc[0].strip('\{').split(',')[0],
plant_name)
class anot:
def __init__(self):
self.mapped_ids = None
def id_map(self, species=None, input_id=None, look_id=None, fetch_id=None):
if input_id is None or look_id is None or fetch_id is None:
raise ValueError('Invalid value for input_id or look_id or fetch_id options')
if species == 'stub':
df = general.get_file_from_gd(
'https://drive.google.com/file/d/1jrw95f3PX1qpFK_XYLDCwZ8hlIVUr4FM/view?usp=sharing')
print(df)
df = df[[look_id, fetch_id]]
input_id_df = pd.read_csv(input_id, header=None)
input_id_df.rename(columns={input_id_df.columns[0]: look_id}, inplace=True)
print(input_id_df)
# self.mapped_ids = pd.merge(input_id_df, df, how='left', on=look_id)
class get_data:
def __init__(self, data=None):
if data=='mlr':
self.data = pd.read_csv("https://reneshbedre.github.io/assets/posts/reg/test_reg.csv")
elif data=='boston':
self.data = pd.read_csv("https://reneshbedre.github.io/assets/posts/reg/boston.csv")
elif data=='volcano':
self.data =
|
pd.read_csv("https://reneshbedre.github.io/assets/posts/volcano/testvolcano.csv")
|
pandas.read_csv
|
# Generated by nuclio.export.NuclioExporter
import os
import pandas as pd
import numpy as np
import scipy as sp
import pickle
import datetime
import v3io_frames as v3f
import matplotlib.pyplot as plt
from sklearn.preprocessing import KBinsDiscretizer
def to_observations(context, t, u, key):
t = (
t.apply(lambda row: f"{'_'.join([str(row[val]) for val in t.columns])}", axis=1)
.value_counts()
.sort_index()
)
u = (
u.apply(lambda row: f"{'_'.join([str(row[val]) for val in u.columns])}", axis=1)
.value_counts()
.sort_index()
)
joined_uniques = pd.DataFrame([t, u]).T.fillna(0).sort_index()
joined_uniques.columns = ["t", "u"]
t_obs = joined_uniques.loc[:, "t"]
u_obs = joined_uniques.loc[:, "u"]
t_pdf = t_obs / t_obs.sum()
u_pdf = u_obs / u_obs.sum()
context.log_dataset(f"{key}_t_pdf", pd.DataFrame(t_pdf), format="parquet")
context.log_dataset(f"{key}_u_pdf", pd.DataFrame(u_pdf), format="parquet")
return t_pdf, u_pdf
def tvd(t, u):
return sum(abs(t - u)) / 2
def helinger(t, u):
return (np.sqrt(np.sum(np.power(np.sqrt(t) - np.sqrt(u), 2)))) / np.sqrt(2)
def kl_divergence(t, u):
t_u = np.sum(np.where(t != 0, t * np.log(t / u), 0))
u_t = np.sum(np.where(u != 0, u * np.log(u / t), 0))
return t_u + u_t
def all_metrics(t, u):
return tvd(t, u), helinger(t, u), kl_divergence(t, u)
def drift_magnitude(
context,
t: pd.DataFrame,
u: pd.DataFrame,
label_col=None,
prediction_col=None,
discretizers: dict = None,
n_bins=5,
stream_name: str = "some_stream",
results_tsdb_container: str = "bigdata",
results_tsdb_table: str = "concept_drift/drift_magnitude",
):
"""Drift magnitude metrics
Computes drift magnitude metrics between base dataset t and dataset u.
Metrics:
- TVD (Total Variation Distance)
- Helinger
- KL Divergence
:param context: MLRun context
:param t: Base dataset for the drift metrics
:param u: Test dataset for the drift metrics
:param label_col: Label colum in t and u
:param prediction_col: Predictions column in t and u
:param discritizers: Dictionary of dicsritizers for the features if available
(Created automatically if not provided)
:param n_bins: Number of bins to be used for histrogram creation from continuous variables
:param stream_name: Output stream to push metrics to
:param results_tsdb_container: TSDB table container to push metrics to
:param results_tsdb_table: TSDB table to push metrics to
"""
v3io_client = v3f.Client("framesd:8081", container=results_tsdb_container)
try:
v3io_client.create("tsdb", results_tsdb_table, if_exists=1, rate="1/s")
except:
v3io_client.create(
"tsdb", results_tsdb_table, if_exists=1, attrs={"rate": "1/s"}
)
df_t = t.as_df()
df_u = u.as_df()
drop_columns = []
if label_col is not None:
drop_columns.append(label_col)
if prediction_col is not None:
drop_columns.append(prediction_col)
continuous_features = df_t.select_dtypes(["float"])
if discretizers is None:
discretizers = {}
for feature in continuous_features.columns:
context.logger.info(f"Fitting discretizer for {feature}")
discretizer = KBinsDiscretizer(
n_bins=n_bins, encode="ordinal", strategy="uniform"
)
discretizer.fit(continuous_features.loc[:, feature].values.reshape(-1, 1))
discretizers[feature] = discretizer
os.makedirs(context.artifact_path, exist_ok=True)
discretizers_path = os.path.abspath(f"{context.artifact_path}/discritizer.pkl")
with open(discretizers_path, "wb") as f:
pickle.dump(discretizers, f)
context.log_artifact("discritizers", target_path=discretizers_path)
context.logger.info("Discretizing featuers")
for feature, discretizer in discretizers.items():
df_t[feature] = discretizer.transform(
df_t.loc[:, feature].values.reshape(-1, 1)
)
df_u[feature] = discretizer.transform(
df_u.loc[:, feature].values.reshape(-1, 1)
)
df_t[feature] = df_t[feature].astype("int")
df_u[feature] = df_u[feature].astype("int")
context.log_dataset("t_discrete", df_t, format="parquet")
context.log_dataset("u_discrete", df_u, format="parquet")
context.logger.info("Compute prior metrics")
results = {}
t_prior, u_prior = to_observations(
context,
df_t.drop(drop_columns, axis=1),
df_u.drop(drop_columns, axis=1),
"features",
)
results["prior_tvd"], results["prior_helinger"], results["prior_kld"] = all_metrics(
t_prior, u_prior
)
if prediction_col is not None:
context.logger.info("Compute prediction metrics")
t_predictions =
|
pd.DataFrame(df_t.loc[:, prediction_col])
|
pandas.DataFrame
|
"""
Collection of query wrappers / abstractions to both facilitate data
retrieval and to reduce dependency on DB-specific API.
"""
from __future__ import print_function, division
from datetime import datetime, date, timedelta
import warnings
import itertools
import numpy as np
import pandas.core.common as com
from pandas.compat import lzip, map, zip, raise_with_traceback, string_types
from pandas.core.api import DataFrame
from pandas.core.base import PandasObject
from pandas.tseries.tools import to_datetime
class SQLAlchemyRequired(ImportError):
pass
class DatabaseError(IOError):
pass
#------------------------------------------------------------------------------
# Helper functions
def _convert_params(sql, params):
"""convert sql and params args to DBAPI2.0 compliant format"""
args = [sql]
if params is not None:
args += list(params)
return args
def _safe_col_name(col_name):
#TODO: probably want to forbid database reserved names, such as "database"
return col_name.strip().replace(' ', '_')
def _handle_date_column(col, format=None):
if isinstance(format, dict):
return to_datetime(col, **format)
else:
if format in ['D', 's', 'ms', 'us', 'ns']:
return to_datetime(col, coerce=True, unit=format)
elif issubclass(col.dtype.type, np.floating) or issubclass(col.dtype.type, np.integer):
# parse dates as timestamp
format = 's' if format is None else format
return to_datetime(col, coerce=True, unit=format)
else:
return to_datetime(col, coerce=True, format=format)
def _parse_date_columns(data_frame, parse_dates):
""" Force non-datetime columns to be read as such.
Supports both string formatted and integer timestamp columns
"""
# handle non-list entries for parse_dates gracefully
if parse_dates is True or parse_dates is None or parse_dates is False:
parse_dates = []
if not hasattr(parse_dates, '__iter__'):
parse_dates = [parse_dates]
for col_name in parse_dates:
df_col = data_frame[col_name]
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
data_frame[col_name] = _handle_date_column(df_col, format=fmt)
return data_frame
def execute(sql, con, cur=None, params=None, flavor='sqlite'):
"""
Execute the given SQL query using the provided connection object.
Parameters
----------
sql : string
Query to be executed
con : SQLAlchemy engine or DBAPI2 connection (legacy mode)
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, a supported SQL flavor must also be provided
cur : depreciated, cursor is obtained from connection
params : list or tuple, optional
List of parameters to pass to execute method.
flavor : string "sqlite", "mysql"
Specifies the flavor of SQL to use.
Ignored when using SQLAlchemy engine. Required when using DBAPI2 connection.
Returns
-------
Results Iterable
"""
pandas_sql = pandasSQL_builder(con, flavor=flavor)
args = _convert_params(sql, params)
return pandas_sql.execute(*args)
def tquery(sql, con, cur=None, params=None, flavor='sqlite'):
"""
Returns list of tuples corresponding to each row in given sql
query.
If only one column selected, then plain list is returned.
Parameters
----------
sql: string
SQL query to be executed
con: SQLAlchemy engine or DBAPI2 connection (legacy mode)
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object is given, a supported SQL flavor must also be provided
cur: depreciated, cursor is obtained from connection
params: list or tuple, optional
List of parameters to pass to execute method.
flavor : string "sqlite", "mysql"
Specifies the flavor of SQL to use.
Ignored when using SQLAlchemy engine. Required when using DBAPI2
connection.
Returns
-------
Results Iterable
"""
warnings.warn(
"tquery is depreciated, and will be removed in future versions",
DeprecationWarning)
pandas_sql = pandasSQL_builder(con, flavor=flavor)
args = _convert_params(sql, params)
return pandas_sql.tquery(*args)
def uquery(sql, con, cur=None, params=None, engine=None, flavor='sqlite'):
"""
Does the same thing as tquery, but instead of returning results, it
returns the number of rows affected. Good for update queries.
Parameters
----------
sql: string
SQL query to be executed
con: SQLAlchemy engine or DBAPI2 connection (legacy mode)
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object is given, a supported SQL flavor must also be provided
cur: depreciated, cursor is obtained from connection
params: list or tuple, optional
List of parameters to pass to execute method.
flavor : string "sqlite", "mysql"
Specifies the flavor of SQL to use.
Ignored when using SQLAlchemy engine. Required when using DBAPI2
connection.
Returns
-------
Number of affected rows
"""
warnings.warn(
"uquery is depreciated, and will be removed in future versions",
DeprecationWarning)
pandas_sql = pandasSQL_builder(con, flavor=flavor)
args = _convert_params(sql, params)
return pandas_sql.uquery(*args)
#------------------------------------------------------------------------------
# Read and write to DataFrames
def read_sql(sql, con, index_col=None, flavor='sqlite', coerce_float=True,
params=None, parse_dates=None):
"""
Returns a DataFrame corresponding to the result set of the query
string.
Optionally provide an `index_col` parameter to use one of the
columns as the index, otherwise default integer index will be used.
Parameters
----------
sql : string
SQL query to be executed
con : SQLAlchemy engine or DBAPI2 connection (legacy mode)
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object is given, a supported SQL flavor must also be provided
index_col : string, optional
column name to use for the returned DataFrame object.
flavor : string, {'sqlite', 'mysql'}
The flavor of SQL to use. Ignored when using
SQLAlchemy engine. Required when using DBAPI2 connection.
coerce_float : boolean, default True
Attempt to convert values to non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
cur : depreciated, cursor is obtained from connection
params : list or tuple, optional
List of parameters to pass to execute method.
parse_dates : list or dict
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
Returns
-------
DataFrame
See also
--------
read_table
"""
pandas_sql = pandasSQL_builder(con, flavor=flavor)
return pandas_sql.read_sql(sql,
index_col=index_col,
params=params,
coerce_float=coerce_float,
parse_dates=parse_dates)
def to_sql(frame, name, con, flavor='sqlite', if_exists='fail', index=True):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame
name : string
Name of SQL table
con : SQLAlchemy engine or DBAPI2 connection (legacy mode)
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object is given, a supported SQL flavor must also be provided
flavor : {'sqlite', 'mysql'}, default 'sqlite'
The flavor of SQL to use. Ignored when using SQLAlchemy engine.
Required when using DBAPI2 connection.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column
"""
pandas_sql = pandasSQL_builder(con, flavor=flavor)
pandas_sql.to_sql(frame, name, if_exists=if_exists, index=index)
def has_table(table_name, con, meta=None, flavor='sqlite'):
"""
Check if DataBase has named table.
Parameters
----------
table_name: string
Name of SQL table
con: SQLAlchemy engine or DBAPI2 connection (legacy mode)
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object is given, a supported SQL flavor name must also be provided
flavor: {'sqlite', 'mysql'}, default 'sqlite'
The flavor of SQL to use. Ignored when using SQLAlchemy engine.
Required when using DBAPI2 connection.
Returns
-------
boolean
"""
pandas_sql = pandasSQL_builder(con, flavor=flavor)
return pandas_sql.has_table(table_name)
def read_table(table_name, con, meta=None, index_col=None, coerce_float=True,
parse_dates=None, columns=None):
"""Given a table name and SQLAlchemy engine, return a DataFrame.
Type convertions will be done automatically.
Parameters
----------
table_name : string
Name of SQL table in database
con : SQLAlchemy engine
Legacy mode not supported
meta : SQLAlchemy meta, optional
If omitted MetaData is reflected from engine
index_col : string, optional
Column to set as index
coerce_float : boolean, default True
Attempt to convert values to non-string, non-numeric objects (like
decimal.Decimal) to floating point. Can result in loss of Precision.
parse_dates : list or dict
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
columns : list
List of column names to select from sql table
Returns
-------
DataFrame
See also
--------
read_sql
"""
pandas_sql = PandasSQLAlchemy(con, meta=meta)
table = pandas_sql.read_table(table_name,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
if table is not None:
return table
else:
raise ValueError("Table %s not found" % table_name, con)
def pandasSQL_builder(con, flavor=None, meta=None):
"""
Convenience function to return the correct PandasSQL subclass based on the
provided parameters
"""
try:
import sqlalchemy
if isinstance(con, sqlalchemy.engine.Engine):
return PandasSQLAlchemy(con, meta=meta)
else:
warnings.warn(
"""Not an SQLAlchemy engine,
attempting to use as legacy DBAPI connection""")
if flavor is None:
raise ValueError(
"""PandasSQL must be created with an SQLAlchemy engine
or a DBAPI2 connection and SQL flavour""")
else:
return PandasSQLLegacy(con, flavor)
except ImportError:
warnings.warn("SQLAlchemy not installed, using legacy mode")
if flavor is None:
raise SQLAlchemyRequired
else:
return PandasSQLLegacy(con, flavor)
class PandasSQLTable(PandasObject):
"""
For mapping Pandas tables to SQL tables.
Uses fact that table is reflected by SQLAlchemy to
do better type convertions.
Also holds various flags needed to avoid having to
pass them between functions all the time.
"""
# TODO: support for multiIndex
def __init__(self, name, pandas_sql_engine, frame=None, index=True,
if_exists='fail', prefix='pandas'):
self.name = name
self.pd_sql = pandas_sql_engine
self.prefix = prefix
self.frame = frame
self.index = self._index_name(index)
if frame is not None:
# We want to write a frame
if self.pd_sql.has_table(self.name):
if if_exists == 'fail':
raise ValueError("Table '%s' already exists." % name)
elif if_exists == 'replace':
self.pd_sql.drop_table(self.name)
self.table = self._create_table_statement()
self.create()
elif if_exists == 'append':
self.table = self.pd_sql.get_table(self.name)
if self.table is None:
self.table = self._create_table_statement()
else:
self.table = self._create_table_statement()
self.create()
else:
# no data provided, read-only mode
self.table = self.pd_sql.get_table(self.name)
if self.table is None:
raise ValueError("Could not init table '%s'" % name)
def exists(self):
return self.pd_sql.has_table(self.name)
def sql_schema(self):
return str(self.table.compile())
def create(self):
self.table.create()
def insert_statement(self):
return self.table.insert()
def maybe_asscalar(self, i):
try:
return np.asscalar(i)
except AttributeError:
return i
def insert(self):
ins = self.insert_statement()
data_list = []
# to avoid if check for every row
keys = self.frame.columns
if self.index is not None:
for t in self.frame.itertuples():
data = dict((k, self.maybe_asscalar(v))
for k, v in zip(keys, t[1:]))
data[self.index] = self.maybe_asscalar(t[0])
data_list.append(data)
else:
for t in self.frame.itertuples():
data = dict((k, self.maybe_asscalar(v))
for k, v in zip(keys, t[1:]))
data_list.append(data)
self.pd_sql.execute(ins, data_list)
def read(self, coerce_float=True, parse_dates=None, columns=None):
if columns is not None and len(columns) > 0:
from sqlalchemy import select
cols = [self.table.c[n] for n in columns]
if self.index is not None:
cols.insert(0, self.table.c[self.index])
sql_select = select(cols)
else:
sql_select = self.table.select()
result = self.pd_sql.execute(sql_select)
data = result.fetchall()
column_names = result.keys()
self.frame = DataFrame.from_records(
data, columns=column_names, coerce_float=coerce_float)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
# Assume if the index in prefix_index format, we gave it a name
# and should return it nameless
if self.index == self.prefix + '_index':
self.frame.index.name = None
return self.frame
def _index_name(self, index):
if index is True:
if self.frame.index.name is not None:
return _safe_col_name(self.frame.index.name)
else:
return self.prefix + '_index'
elif isinstance(index, string_types):
return index
else:
return None
def _create_table_statement(self):
from sqlalchemy import Table, Column
safe_columns = map(_safe_col_name, self.frame.dtypes.index)
column_types = map(self._sqlalchemy_type, self.frame.dtypes)
columns = [Column(name, typ)
for name, typ in zip(safe_columns, column_types)]
if self.index is not None:
columns.insert(0, Column(self.index,
self._sqlalchemy_type(
self.frame.index),
index=True))
return Table(self.name, self.pd_sql.meta, *columns)
def _harmonize_columns(self, parse_dates=None):
""" Make a data_frame's column type align with an sql_table
column types
Need to work around limited NA value support.
Floats are always fine, ints must always
be floats if there are Null values.
Booleans are hard because converting bool column with None replaces
all Nones with false. Therefore only convert bool if there are no
NA values.
Datetimes should already be converted
to np.datetime if supported, but here we also force conversion
if required
"""
# handle non-list entries for parse_dates gracefully
if parse_dates is True or parse_dates is None or parse_dates is False:
parse_dates = []
if not hasattr(parse_dates, '__iter__'):
parse_dates = [parse_dates]
for sql_col in self.table.columns:
col_name = sql_col.name
try:
df_col = self.frame[col_name]
# the type the dataframe column should have
col_type = self._numpy_type(sql_col.type)
if col_type is datetime or col_type is date:
if not issubclass(df_col.dtype.type, np.datetime64):
self.frame[col_name] = _handle_date_column(df_col)
elif col_type is float:
# floats support NA, can always convert!
self.frame[col_name].astype(col_type, copy=False)
elif len(df_col) == df_col.count():
# No NA values, can convert ints and bools
if col_type is int or col_type is bool:
self.frame[col_name].astype(col_type, copy=False)
# Handle date parsing
if col_name in parse_dates:
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
self.frame[col_name] = _handle_date_column(
df_col, format=fmt)
except KeyError:
pass # this column not in results
def _sqlalchemy_type(self, arr_or_dtype):
from sqlalchemy.types import Integer, Float, Text, Boolean, DateTime, Date, Interval
if arr_or_dtype is date:
return Date
if com.is_datetime64_dtype(arr_or_dtype):
try:
tz = arr_or_dtype.tzinfo
return DateTime(timezone=True)
except:
return DateTime
if com.is_timedelta64_dtype(arr_or_dtype):
return Interval
elif com.is_float_dtype(arr_or_dtype):
return Float
elif
|
com.is_integer_dtype(arr_or_dtype)
|
pandas.core.common.is_integer_dtype
|
import pandas as pd
import sqlite3 as sql
from asn1crypto._ffi import null
from pandas.tests.frame.test_sort_values_level_as_str import ascending
class FunctionMgr:
def __init__(self,sql_con):
self.sql_con = sql_con
pass
'''
获取大于某换手率的股票列表
'''
def GetTurnoverRateList(self,rate,start_date,end_date):
data = pd.read_sql_query('select ts_code,trade_date,turnover_rate,turnover_rate_f from daily_basic where trade_date <='+str(end_date)+' and trade_date >='+str(start_date)+' and turnover_rate_f >='+str(rate),self.sql_con)
return data
'''
获取时间段内平均换手率排名列表
'''
def GetTurnoverRateMeanSortList(self,start_date,end_date):
data = self.GetTurnoverRateList(1, start_date, end_date)
group = data.groupby(by = 'ts_code')
def func(item):
tmp = dict()
tmp['mean_rate_f'] = item.turnover_rate_f.mean()
tmp['mean_rate'] = item.turnover_rate.mean()
tmp['mean_count'] = len(item)
return pd.Series(tmp)
ans = group.apply(func)
return (ans)
'''
根据代码查询概念
'''
def GetConceptByCode(self,code):
name_data =
|
pd.read_sql_query('select * from concept_info',self.sql_con)
|
pandas.read_sql_query
|
#%% Script to correct the metmast signals for metmast shadow effects
# Steps to be followed in detail:
# Import Lidar data and metmast data for one height into Pandas DataFrame [#### 80%] -> 115m, 85m, 55m*, 25m*
# plot weibull and windrose
# plot mast effects at the height [#### 80%]
# combine and correct the mast effects [#### 80%]
# verify the correction with metmast [#### 70%]
# verify the correction with windcube [#### 70%]
# %% user modules
# %matplotlib qt
import pandas as pd
import sys, os, glob
sys.path.append(r"../../userModules")
sys.path.append(r"../fun")
import numpy as np
import runpy as rp
import matplotlib.pyplot as plt
import tikzplotlib as tz
import re
import seaborn as sns
import datetime as DT
from datetime import datetime
sys.path.append(r"../../OneDasExplorer/Python Connector")
sys.path.append(r"c:\Users\giyash\OneDrive - Fraunhofer\Python\Scripts\OneDasExplorer\Python Connector")
sys.path.append(r"c:\Users\giyash\OneDrive - Fraunhofer\Python\Scripts\userModules")
from odc_exportData import odc_exportData
from FnImportOneDas import FnImportOneDas
from FnImportFiles import FnImportFiles
import matlab2py as m2p
from pythonAssist import *
from FnWsRange import *
from FnTurbLengthScale import FnTurbLengthScale
#%% user definitions
inp=struct()
inp.h115m = struct()
out = struct()
out.h115m = struct()
inp.tiny = 12
inp.Small = 14
inp.Large = 18
inp.Huge = 22
plt.rc('font', size=inp.Small) # controls default text sizes
plt.rc('axes', titlesize=inp.Small) # fontsize of the axes title
plt.rc('axes', labelsize=inp.Large) # fontsize of the x and y labels
plt.rc('xtick', labelsize=inp.Small) # fontsize of the tick labels
plt.rc('ytick', labelsize=inp.Small) # fontsize of the tick labels
plt.rc('legend', fontsize=inp.Small) # legend fontsize
plt.rc('figure', titlesize=inp.Huge) # fontsize of the figure title
# %% import channels and projects from OneDAS
from FnGetChannelNames import FnGetChannelNames
inp.prj_url = f"https://onedas.iwes.fraunhofer.de/api/v1/projects/%2FAIRPORT%2FAD8_PROTOTYPE%2FMETMAST_EXTENSION/channels"
inp.channelNames, inp.prj_paths, inp.channel_paths, inp.units = FnGetChannelNames(inp.prj_url)
## Initializations
inp.saveFig=1
inp.h115m.tstart = DT.datetime.strptime('2020-01-01_00-00-00', '%Y-%m-%d_%H-%M-%S') # Select start date in the form yyyy-mm-dd_HH-MM-SS
inp.h115m.tend = DT.datetime.strptime('2020-12-31_00-00-00', '%Y-%m-%d_%H-%M-%S') # Select start date in the form yyyy-mm-dd_HH-MM-SS
inp.h115m.sampleRate = [1/600]
inp.device = ['Ammonit']
inp.target_folder = r"../data"
inp.target_folder = ""
# comment the following line to read the data VerificationData.dat from ../data/
# inp.searchStr = ".*M0.*(V|D)\d"
inp.statStr = 's_mean'
# %% import Lidar data
try:
inp.searchStr
import pickle
from more_itertools import chunked
from FnFilterChannels import FnFilterChannels
try:
with open(r'../data/runVerification_MM.pickle', 'rb') as f:
DF = pickle.load(f)
print('[{0}] File (*.pickle) loaded into DF'.format(now()))
except FileNotFoundError:
print('[{0}] File (*.pickle) not found'.format(now()))
DF = pd.DataFrame()
for i in range(len(inp.device)):
inp.h115m.ch_names, inp.h115m.paths = FnFilterChannels(inp.prj_url, inp.h115m.sampleRate[i], inp.device[i], inp.searchStr)
for chunks in list(chunked(inp.h115m.ch_names,len(inp.ch_names))):
_, df, t = FnImportOneDas(inp.h115m.tstart, inp.h115m.tend, inp.paths, chunks, inp.h115m.sampleRate[i], inp.target_folder)
DF=
|
pd.concat([DF,df],axis=1)
|
pandas.concat
|
"""Tests suite for Period handling.
Parts derived from scikits.timeseries code, original authors:
- <NAME> & <NAME>
- pierregm_at_uga_dot_edu - mattknow_ca_at_hotmail_dot_com
"""
from unittest import TestCase
from datetime import datetime, timedelta
from numpy.ma.testutils import assert_equal
from pandas.tseries.period import Period, PeriodIndex
from pandas.tseries.index import DatetimeIndex, date_range
from pandas.tseries.tools import to_datetime
import pandas.core.datetools as datetools
import numpy as np
from pandas import Series, TimeSeries
from pandas.util.testing import assert_series_equal
class TestPeriodProperties(TestCase):
"Test properties such as year, month, weekday, etc...."
#
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_interval_constructor(self):
i1 = Period('1/1/2005', freq='M')
i2 = Period('Jan 2005')
self.assertEquals(i1, i2)
i1 = Period('2005', freq='A')
i2 = Period('2005')
i3 = Period('2005', freq='a')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
i4 = Period('2005', freq='M')
i5 = Period('2005', freq='m')
self.assert_(i1 != i4)
self.assertEquals(i4, i5)
i1 = Period.now('Q')
i2 = Period(datetime.now(), freq='Q')
i3 = Period.now('q')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
# Biz day construction, roll forward if non-weekday
i1 = Period('3/10/12', freq='B')
i2 = Period('3/12/12', freq='D')
self.assertEquals(i1, i2.asfreq('B'))
i3 = Period('3/10/12', freq='b')
self.assertEquals(i1, i3)
i1 = Period(year=2005, quarter=1, freq='Q')
i2 = Period('1/1/2005', freq='Q')
self.assertEquals(i1, i2)
i1 = Period(year=2005, quarter=3, freq='Q')
i2 = Period('9/1/2005', freq='Q')
self.assertEquals(i1, i2)
i1 = Period(year=2005, month=3, day=1, freq='D')
i2 = Period('3/1/2005', freq='D')
self.assertEquals(i1, i2)
i3 = Period(year=2005, month=3, day=1, freq='d')
self.assertEquals(i1, i3)
i1 = Period(year=2012, month=3, day=10, freq='B')
i2 = Period('3/12/12', freq='B')
self.assertEquals(i1, i2)
i1 = Period('2005Q1')
i2 = Period(year=2005, quarter=1, freq='Q')
i3 = Period('2005q1')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
i1 = Period('05Q1')
self.assertEquals(i1, i2)
lower = Period('05q1')
self.assertEquals(i1, lower)
i1 = Period('1Q2005')
self.assertEquals(i1, i2)
lower = Period('1q2005')
self.assertEquals(i1, lower)
i1 = Period('1Q05')
self.assertEquals(i1, i2)
lower = Period('1q05')
self.assertEquals(i1, lower)
i1 = Period('4Q1984')
self.assertEquals(i1.year, 1984)
lower = Period('4q1984')
self.assertEquals(i1, lower)
i1 = Period('1982', freq='min')
i2 = Period('1982', freq='MIN')
self.assertEquals(i1, i2)
i2 = Period('1982', freq=('Min', 1))
self.assertEquals(i1, i2)
def test_freq_str(self):
i1 = Period('1982', freq='Min')
self.assert_(i1.freq[0] != '1')
i2 =
|
Period('11/30/2005', freq='2Q')
|
pandas.tseries.period.Period
|
"""
Co-occurrence Matrix / Associations
===============================================================================
>>> from techminer2 import *
>>> directory = "data/"
>>> co_occurrence_matrix_associations(
... column='author_keywords',
... min_occ=3,
... directory=directory,
... ).head()
word_A word_B co_occ
0 financial inclusion fintech 15
1 financial technologies fintech 14
2 blockchain fintech 13
3 innovation fintech 10
4 regulation fintech 10
"""
import pandas as pd
from .co_occurrence_matrix import co_occurrence_matrix
def co_occurrence_matrix_associations(
column,
min_occ=1,
directory="./",
):
coc_matrix = co_occurrence_matrix(
column,
min_occ=min_occ,
directory=directory,
)
# -------------------------------------------------------------------------
names = coc_matrix.columns.get_level_values(0)
n_cols = coc_matrix.shape[1]
edges = []
for i_row in range(1, n_cols):
for i_col in range(0, i_row):
if coc_matrix.iloc[i_row, i_col] > 0:
edges.append(
{
"word_A": names[i_row],
"word_B": names[i_col],
"co_occ": coc_matrix.iloc[i_row, i_col],
}
)
# -------------------------------------------------------------------------
co_occ =
|
pd.DataFrame(edges)
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import random
def weighted_avg_and_std(values, weights):
"""computes weighted averages and stdevs
Args:
values (np.array): variable of interest
weight (np.array): weight to assign each observation
Returns:
list of weighted mean and stdev
"""
average = np.average(values, weights=weights)
variance = np.average((values - average)**2, weights=weights)
ess = np.sum(weights)**2 / np.sum(weights**2)
return [average, np.sqrt(variance) / np.sqrt(ess)]
def erupt(y, tmt, optim_tmt, weights=None, names=None):
"""Calculates Expected Response Under Proposed Treatments
Args:
y (np.array): response variables
tmt (np.array): treatment randomly assigned to observation
optim_tmt (np.array): proposed treatment model assigns
weights (np.array): weights for each observation. useful if treatment
was not uniformly assigned
names (list of str): names of response variables
Returns:
Expected means and stdevs of proposed treatments
"""
if weights is None:
weights = np.ones(y.shape[0])
equal_locs = np.where(optim_tmt == tmt)[0]
erupts = [weighted_avg_and_std(y[equal_locs][:,
x].reshape(-1,
1),
weights[equal_locs].reshape(-1,
1)) for x in range(y.shape[1])]
erupts = pd.DataFrame(erupts)
erupts.columns = ['mean', 'std']
if names is not None:
erupts['response_var_names'] = names
else:
erupts['response_var_names'] = [
'var_' + str(x) for x in range(y.shape[1])]
return erupts
def get_best_tmts(objective_weights, ice, unique_tmts):
"""Calulcates Optimal Treatment for a set of counterfactuals and weights.
Args:
objective_weights (np.array): list of weights to maximize in objective function
ice (np.array): 3-d array of counterfactuals.
num_tmts x num_observations x num_responses
unique_tmts (np.array): list of treatments
Returns:
Optimal Treatment with specific objective function
"""
weighted_ice = [x * objective_weights for x in ice]
sum_weighted_ice = np.array(weighted_ice).sum(axis=2)
max_values = sum_weighted_ice.T.argmax(axis=1)
best_tmt = unique_tmts[np.array(max_values)]
return(best_tmt)
def get_weights(tmts):
"""Calculates weights to apply to tmts. Weight is inversely proportional to
how common each weight is.
Args:
tmts (np.array): treatments randomly assigned to
Returns:
array of length tmts that is weight to assign in erupt calculation
"""
tmts_pd = pd.DataFrame(tmts)
tmts_pd.columns = ['tmt']
weight_mat = pd.DataFrame(pd.DataFrame(
tmts).iloc[:, 0].value_counts() / len(tmts))
weight_mat['tmt'] = weight_mat.index
weight_mat.columns = ['weight', 'tmt']
tmts_pd = tmts_pd.merge(weight_mat, on='tmt', how='left')
observation_weights = 1.0 / np.array(tmts_pd['weight']).reshape(-1, 1)
return observation_weights
def get_erupts_curves_aupc(y, tmt, ice, unique_tmts, objective_weights,
names=None):
"""Calculates optimal treatments and returns erupt
Args:
y (np.array): response variables
tmt (np.array): treatment randomly assigned to observation
ice (np.array): 3-d array of counterfactuals.
num_tmts x num_observations x num_responses
objective_weights (np.array): list of weights to maximize in objective
function
names (list of str): names of response variables
Returns:
Dataframe of ERUPT metrics for each response variable for a given set
of objective weights
"""
all_erupts = []
all_distributions = []
observation_weights = get_weights(tmt)
for obj_weight in objective_weights:
optim_tmt = get_best_tmts(obj_weight, ice, unique_tmts)
random_tmt = optim_tmt.copy()[
np.random.choice(
len(optim_tmt),
len(optim_tmt),
replace=False)]
str_obj_weight = ','.join([str(q) for q in obj_weight])
erupts = erupt(y, tmt, optim_tmt, weights=observation_weights,
names=names)
erupts_random = erupt(y, tmt, random_tmt, weights=observation_weights,
names=names)
erupts_random['weights'] = str_obj_weight
erupts['weights'] = str_obj_weight
erupts_random['assignment'] = 'random'
erupts['assignment'] = 'model'
erupts = pd.concat([erupts, erupts_random], axis=0)
dists =
|
pd.DataFrame(optim_tmt)
|
pandas.DataFrame
|
from __future__ import print_function
import numpy as np
import time, os, sys
import matplotlib.pyplot as plt
from scipy import ndimage as ndi
from skimage import color, feature, filters, io, measure, morphology, segmentation, img_as_ubyte, transform
import warnings
import math
import pandas as pd
import argparse
import subprocess
import re
import glob
from skimage.segmentation import clear_border
from ortools.graph import pywrapgraph
import time
from fatetrack_connections import buildFeatureFrame, buildOffsetFrame, generateCandidates, generateLinks, DivSimScore, DivSetupScore, DivisionCanditates, UpdateConnectionsDiv, TranslationTable, SolveMinCostTable, ReviewCostTable
def TranslateConnections(ConnectionTable, TranslationTable, timepoint, preference = "Master_ID"):
subTranslationTable_0 = TranslationTable.loc[:,[preference,"slabel"]]
subTranslationTable_0['slabel_t0'] = subTranslationTable_0['slabel']
subTranslationTable_1 = TranslationTable.loc[:,[preference,"slabel"]]
subTranslationTable_1['slabel_t1'] = subTranslationTable_1['slabel']
merge_0 = pd.merge(ConnectionTable, subTranslationTable_0, on="slabel_t0")
merge = pd.merge(merge_0, subTranslationTable_1, on="slabel_t1")
pref = str(preference)
result = merge.loc[:,[pref+"_x",pref+"_y"]]
result = result.drop_duplicates()
result = result.dropna(thresh=1)
result = result.reset_index(drop=True)
result = result.rename(columns = {(pref+"_x") : (pref+"_"+str(timepoint)), (pref+"_y") : (pref+"_"+str(timepoint+1))})
return(result)
def RajTLG_wrap(filename_t0, filename_t1,timepoint,ConnectionTable,TranslationTable,path="./"):
frame0 = buildFeatureFrame(filename_t0,timepoint,pathtoimage=path);
frame1 = buildFeatureFrame(filename_t1,timepoint+1,pathtoimage=path);
frames = pd.concat([frame0,frame1])
frames["timepoint"] = frames["time"]
InfoDF = pd.merge(frames,TranslationTable, on=['label','timepoint'])
RajTLG_translation = TranslateConnections(ConnectionTable=ConnectionTable, TranslationTable=TranslationTable, timepoint=timepoint, preference="RajTLG_ID")
RajTLGFrame = pd.DataFrame()
if (timepoint == 0):
for i in range(RajTLG_translation.shape[0]):
tmpID = RajTLG_translation.loc[i,"RajTLG_ID"+"_"+str(timepoint)]
tmpFrame = int(InfoDF.loc[InfoDF["RajTLG_ID"] == RajTLG_translation.loc[i,"RajTLG_ID"+"_"+str(timepoint)],"frame"])
tmpX = int(InfoDF.loc[InfoDF["RajTLG_ID"] == RajTLG_translation.loc[i,"RajTLG_ID"+"_"+str(timepoint)],"centroid-1"])
tmpY = int(InfoDF.loc[InfoDF["RajTLG_ID"] == RajTLG_translation.loc[i,"RajTLG_ID"+"_"+str(timepoint)],"centroid-0"])
tmpParent = "NaN"
RajTLGFrame = RajTLGFrame.append(pd.DataFrame([tmpID,tmpFrame,tmpX,tmpY,tmpParent]).T)
for i in range(RajTLG_translation.shape[0]):
tmpID = RajTLG_translation.loc[i,"RajTLG_ID"+"_"+str(timepoint+1)]
tmpFrame = int(InfoDF.loc[InfoDF["RajTLG_ID"] == RajTLG_translation.loc[i,"RajTLG_ID"+"_"+str(timepoint+1)],"frame"])
tmpX = int(InfoDF.loc[InfoDF["RajTLG_ID"] == RajTLG_translation.loc[i,"RajTLG_ID"+"_"+str(timepoint+1)],"centroid-1"])
tmpY = int(InfoDF.loc[InfoDF["RajTLG_ID"] == RajTLG_translation.loc[i,"RajTLG_ID"+"_"+str(timepoint+1)],"centroid-0"])
tmpParent = int(RajTLG_translation.loc[RajTLG_translation["RajTLG_ID"+"_"+str(timepoint+1)] == tmpID,
"RajTLG_ID"+"_"+str(timepoint)])
RajTLGFrame = RajTLGFrame.append(pd.DataFrame([tmpID,tmpFrame,tmpX,tmpY,tmpParent]).T)
RajTLGFrame = RajTLGFrame.reset_index(drop=True)
RajTLGFrame = RajTLGFrame.rename(columns={0:"pointID", 1:"frameNumber",
2:"xCoord",3:"yCoord",4:"parentID"})
RajTLGFrame["annotation"] = "none"
#RajTLGFrame.to_csv(outfilename,index=False)
return(RajTLGFrame)
def MatchToGoldStd(FileCompare,FileGoldSTD):
GoldSTD = pd.read_csv(FileGoldSTD)
FateTrack = pd.read_csv(FileCompare)
GoldTranslationTable = pd.DataFrame()
for obj in range(FateTrack.shape[0]):
FateID = FateTrack.loc[obj,"pointID"]
frame = FateTrack.loc[obj,"frameNumber"]
xC = FateTrack.loc[obj,"xCoord"]
yC = FateTrack.loc[obj,"yCoord"]
tmpGold = GoldSTD.loc[GoldSTD["frameNumber"] == frame,]
tmpGold = tmpGold.reset_index(drop=True)
dist = np.array(np.sqrt((tmpGold["xCoord"]-xC)**2 + (tmpGold["yCoord"]-yC)**2))
GoldIndex = np.where(dist == dist.min())[0][0]
GoldID = tmpGold.loc[GoldIndex,"pointID"]
GoldTranslationTable = GoldTranslationTable.append(pd.DataFrame([GoldID,FateID]).T)
GoldTranslationTable = GoldTranslationTable.rename(columns={0:"GoldID",1:"FateID"})
return(GoldTranslationTable)
def CheckAccuracy(frame,FileCompare,FileGoldSTD,skip=0):
TranslateGold = MatchToGoldStd(FileCompare,FileGoldSTD)
GoldSTD = pd.read_csv(FileGoldSTD)
FateTrack =
|
pd.read_csv(FileCompare)
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
import os
from datetime import date
import pprint
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import sqlite3
import pandas as pd
from db_tools import otherm_db_reader
from utilities import misc_functions
mpl.use('Qt5Agg')
C_to_F = misc_functions.C_to_F
def get_mfr_data(heat_pump):
db_name = os.path.join('../temp_files/MfrPD.db')
mfr_db_con = sqlite3.connect(db_name)
sql = """SELECT * from '%s' """ % heat_pump
pd_table = pd.read_sql(sql, mfr_db_con)
mfr_db_con.close()
return pd_table
def mfr_data(HP_ids):
mfr_pl = get_mfr_data(HP_ids[0])
mfr_pl['EWT [F]'] = pd.Series(mfr_pl['EWT [F]']).fillna(method='ffill')
mfr_pl['Flow [GPM]'] =
|
pd.Series(mfr_pl['Flow [GPM]'])
|
pandas.Series
|
# -*- coding: utf-8 -*-
from os import listdir, remove
from os.path import join
from boto3 import Session
from moto import mock_s3
import numpy as np
import pandas as pd
import pytest
@pytest.fixture(scope="session")
def clean_receiving_dir():
# Clean receiving directory
for fname in listdir("receiving"):
if fname not in (".gitkeep", ".gitignore"):
remove(join("receiving", fname))
@pytest.fixture
def CAN_parquet_data():
columns = ["provider", "dt", "location_id", "location", "location_type", "variable_name",
"measurement", "unit", "age", "race", "ethnicity", "sex", "last_updated", "value"]
data = [
["cdc", "2021-01-01", "iso1:us#iso2:us-al#fips:01001", 1001, "county", "pcr_tests_positive",
"rolling_average_7_day", "percentage", "all", "all", "all", "all", "2021-01-02 19:00:00", 50.0],
["cdc", "2021-01-01", "iso1:us#iso2:us-al#fips:01003", 1003, "county", "pcr_tests_positive",
"rolling_average_7_day", "percentage", "all", "all", "all", "all", "2021-01-02 19:00:00", 25.0],
["cdc", "2021-01-01", "iso1:us#iso2:us-al#fips:01005", 1005, "county", "pcr_tests_positive",
"rolling_average_7_day", "percentage", "all", "all", "all", "all", "2021-01-02 19:00:00", 50.0],
["cdc", "2021-01-01", "iso1:us#iso2:us-al#fips:01001", 1001, "county", "pcr_tests_total",
"rolling_average_7_day", "specimens", "all", "all", "all", "all", "2021-01-02 19:00:00", 10.0],
["cdc", "2021-01-01", "iso1:us#iso2:us-al#fips:01003", 1003, "county", "pcr_tests_total",
"rolling_average_7_day", "specimens", "all", "all", "all", "all", "2021-01-02 19:00:00", 20.0],
["cdc", "2021-01-01", "iso1:us#iso2:us-al#fips:01005", 1005, "county", "pcr_tests_total",
"rolling_average_7_day", "specimens", "all", "all", "all", "all", "2021-01-02 19:00:00", 20.0],
["cdc", "2021-01-01", "iso1:us#iso2:us-pa#fips:42001", 42001, "county", "pcr_tests_positive",
"rolling_average_7_day", "percentage", "all", "all", "all", "all", "2021-01-02 19:00:00", 50.0],
["cdc", "2021-01-01", "iso1:us#iso2:us-pa#fips:42003", 42003, "county", "pcr_tests_positive",
"rolling_average_7_day", "percentage", "all", "all", "all", "all", "2021-01-02 19:00:00", 20.0],
["cdc", "2021-01-01", "iso1:us#iso2:us-pa#fips:42005", 42005, "county", "pcr_tests_positive",
"rolling_average_7_day", "percentage", "all", "all", "all", "all", "2021-01-02 19:00:00", 10.0],
["cdc", "2021-01-01", "iso1:us#iso2:us-pa#fips:42001", 42001, "county", "pcr_tests_total",
"rolling_average_7_day", "specimens", "all", "all", "all", "all", "2021-01-02 19:00:00", 10.0],
["cdc", "2021-01-01", "iso1:us#iso2:us-pa#fips:42003", 42003, "county", "pcr_tests_total",
"rolling_average_7_day", "specimens", "all", "all", "all", "all", "2021-01-02 19:00:00", 20.0],
["cdc", "2021-01-01", "iso1:us#iso2:us-pa#fips:42005", 42005, "county", "pcr_tests_total",
"rolling_average_7_day", "specimens", "all", "all", "all", "all", "2021-01-02 19:00:00", 10.0],
["SOME_SOURCE", "2021-01-15", "iso1:us#iso2:us-fl#fips:12093", 12093, "county", "SOME_OTHER_METRIC",
"SOME_MEASUREMENT", "SOME_UNITS", "all", "all", "all", "all", "2021-01-21 19:00:00", 123.0],
]
df_pq = pd.DataFrame(data, columns=columns)
return df_pq
@pytest.fixture
def CAN_county_testing_data():
columns = ["fips", "timestamp", "pcr_tests_positive", "pcr_tests_total", "pcr_positivity_rate"]
data = [
["01001", "2021-01-01", 5, 10, 0.5],
["01003", "2021-01-01", 5, 20, 0.25],
["01005", "2021-01-01", 10, 20, 0.5],
["42001", "2021-01-01", 5, 10, 0.5],
["42003", "2021-01-01", 4, 20, 0.2],
["42005", "2021-01-01", 1, 10, 0.1],
]
df = pd.DataFrame(data, columns=columns)
df["timestamp"] = pd.to_datetime(df["timestamp"])
p, n = df.pcr_positivity_rate, df.pcr_tests_total
df["se"] = np.sqrt(p * (1 - p) / n)
return df
@pytest.fixture
def CAN_state_testing_data():
columns = ["fips", "timestamp", "pcr_tests_positive", "pcr_tests_total", "pcr_positivity_rate"]
data = [
["al", "2021-01-01", 20, 50, 0.4],
["pa", "2021-01-01", 10, 40, 0.25]
]
df = pd.DataFrame(data, columns=columns)
df["timestamp"] = pd.to_datetime(df["timestamp"])
p, n = df.pcr_positivity_rate, df.pcr_tests_total
df["se"] = np.sqrt(p * (1 - p) / n)
return df
@pytest.fixture
def CAN_msa_testing_data():
columns = ["fips", "timestamp", "pcr_tests_positive", "pcr_tests_total", "pcr_positivity_rate"]
data = [
["19300", "2021-01-01", 5, 20, 0.25],
["23900", "2021-01-01", 5, 10, 0.5],
["33860", "2021-01-01", 5, 10, 0.5],
["38300", "2021-01-01", 5, 30, 5 / 30],
]
df = pd.DataFrame(data, columns=columns)
df["timestamp"] = pd.to_datetime(df["timestamp"])
p, n = df.pcr_positivity_rate, df.pcr_tests_total
df["se"] = np.sqrt(p * (1 - p) / n)
return df
@pytest.fixture
def CAN_hrr_testing_data():
columns = ["fips", "timestamp", "pcr_tests_positive", "pcr_tests_total", "pcr_positivity_rate"]
data = [
["1", "2021-01-01", 0.195525, 0.391050, 0.5],
["134", "2021-01-01", 0.159989, 0.639958, 0.25],
["2", "2021-01-01", 9.743599, 19.487198, 0.5],
["351", "2021-01-01", 0.0145052, 0.145052, 0.1],
["352", "2021-01-01", 2.690298, 5.380595, 0.5],
["357", "2021-01-01", 4.985495, 29.854948, 0.166991],
["363", "2021-01-01", 2.309702, 4.619405, 0.5],
["6", "2021-01-01", 4.840011, 19.360042, 0.25],
["7", "2021-01-01", 5.060876, 10.121752, 0.5],
]
df = pd.DataFrame(data, columns=columns)
df["timestamp"] = pd.to_datetime(df["timestamp"])
p, n = df.pcr_positivity_rate, df.pcr_tests_total
df["se"] = np.sqrt(p * (1 - p) / n)
return df
@pytest.fixture
def CAN_hhs_testing_data():
columns = ["fips", "timestamp", "pcr_tests_positive", "pcr_tests_total", "pcr_positivity_rate"]
data = [
["3", "2021-01-01", 10, 40, 0.25],
["4", "2021-01-01", 20, 50, 0.4],
]
df = pd.DataFrame(data, columns=columns)
df["timestamp"] = pd.to_datetime(df["timestamp"])
p, n = df.pcr_positivity_rate, df.pcr_tests_total
df["se"] = np.sqrt(p * (1 - p) / n)
return df
@pytest.fixture
def CAN_nation_testing_data():
columns = ["fips", "timestamp", "pcr_tests_positive", "pcr_tests_total", "pcr_positivity_rate"]
data = [
["us", "2021-01-01", 30, 90, 30 / 90],
]
df =
|
pd.DataFrame(data, columns=columns)
|
pandas.DataFrame
|
import os
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from .. import read_sql
@pytest.fixture(scope="module") # type: ignore
def oracle_url() -> str:
conn = os.environ["ORACLE_URL"]
return conn
@pytest.mark.xfail
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_on_non_select(oracle_url: str) -> None:
query = "CREATE TABLE non_select(id INTEGER NOT NULL)"
read_sql(oracle_url, query)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_complex_join(oracle_url: str) -> None:
query = "SELECT a.test_int, b.test_date, c.test_num_int FROM test_table a left join test_types b on a.test_int = b.test_num_int cross join (select test_num_int from test_types) c where c.test_num_int < 3"
df = read_sql(oracle_url, query)
df = df.sort_values("TEST_INT").reset_index(drop=True)
expected = pd.DataFrame(
data={
"TEST_INT": pd.Series([1, 2, 4, 5, 5, 2333], dtype="Int64"),
"TEST_DATE": pd.Series(
["2019-05-21", None, None, "2020-05-21", "2020-05-21", None],
dtype="datetime64[ns]",
),
"TEST_NUM_INT": pd.Series([1, 1, 1, 1, 1, 1], dtype="Int64"),
}
)
assert_frame_equal(df, expected, check_names=True)
def test_oracle_complex_join(oracle_url: str) -> None:
query = "SELECT a.test_int, b.test_date, c.test_num_int FROM test_table a left join test_types b on a.test_int = b.test_num_int cross join (select test_num_int from test_types) c where c.test_num_int < 3"
df = read_sql(oracle_url, query)
df = df.sort_values("TEST_INT").reset_index(drop=True)
expected = pd.DataFrame(
data={
"TEST_INT": pd.Series([1, 2, 4, 5, 5, 2333], dtype="Int64"),
"TEST_DATE": pd.Series(
["2019-05-21", None, None, "2020-05-21", "2020-05-21", None],
dtype="datetime64[ns]",
),
"TEST_NUM_INT": pd.Series([1, 1, 1, 1, 1, 1], dtype="Int64"),
}
)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_complex_join(oracle_url: str) -> None:
query = "SELECT a.test_int, b.test_date, c.test_num_int FROM test_table a left join test_types b on a.test_int = b.test_num_int cross join (select test_num_int from test_types) c where c.test_num_int < 3"
df = read_sql(oracle_url, query)
df = df.sort_values("TEST_INT").reset_index(drop=True)
expected = pd.DataFrame(
data={
"TEST_INT": pd.Series([1, 2, 4, 5, 5, 2333], dtype="Int64"),
"TEST_DATE": pd.Series(
["2019-05-21", None, None, "2020-05-21", "2020-05-21", None],
dtype="datetime64[ns]",
),
"TEST_NUM_INT": pd.Series([1, 1, 1, 1, 1, 1], dtype="Int64"),
}
)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_aggregation(oracle_url: str) -> None:
query = "select avg(test_int), test_char from test_table group by test_char"
df = read_sql(oracle_url, query)
df = df.sort_values("AVG(TEST_INT)").reset_index(drop=True)
expected = pd.DataFrame(
data={
"AVG(TEST_INT)": pd.Series([1, 2, 5, 1168.5], dtype="float64"),
"TEST_CHAR": pd.Series(["str1 ", "str2 ", "str05", None], dtype="object"),
},
)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_partition_on_aggregation(oracle_url: str) -> None:
query = "select sum(test_int) cid, test_char from test_table group by test_char"
df = read_sql(oracle_url, query, partition_on="cid", partition_num=3)
df = df.sort_values("CID").reset_index(drop=True)
expected = pd.DataFrame(
index=range(4),
data={
"CID": pd.Series([1, 2, 5, 2337], dtype="float64"),
"TEST_CHAR": pd.Series(["str1 ", "str2 ", "str05", None], dtype="object"),
},
)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_aggregation2(oracle_url: str) -> None:
query = "select DISTINCT(test_char) from test_table"
df = read_sql(oracle_url, query)
expected = pd.DataFrame(
data={
"TEST_CHAR": pd.Series(["str05", "str1 ", "str2 ", None], dtype="object"),
},
)
df.sort_values(by="TEST_CHAR", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_partition_on_aggregation2(oracle_url: str) -> None:
query = "select MAX(test_int) MAX, MIN(test_int) MIN from test_table"
df = read_sql(oracle_url, query, partition_on="MAX", partition_num=2)
expected = pd.DataFrame(
index=range(1),
data={
"MAX": pd.Series([2333], dtype="float64"),
"MIN": pd.Series([1], dtype="float64"),
},
)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_manual_partition(oracle_url: str) -> None:
queries = [
"SELECT * FROM test_table WHERE test_int < 2",
"SELECT * FROM test_table WHERE test_int >= 2",
]
df = read_sql(oracle_url, query=queries)
expected = pd.DataFrame(
data={
"TEST_INT": pd.Series([1, 2, 4, 5, 2333], dtype="Int64"),
"TEST_CHAR": pd.Series(
["str1 ", "str2 ", None, "str05", None], dtype="object"
),
"TEST_FLOAT": pd.Series([1.1, 2.2, -4.44, None, None], dtype="float64"),
},
)
df.sort_values(by="TEST_INT", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_without_partition(oracle_url: str) -> None:
query = "SELECT * FROM test_table"
df = read_sql(oracle_url, query)
expected = pd.DataFrame(
data={
"TEST_INT": pd.Series([1, 2, 2333, 4, 5], dtype="Int64"),
"TEST_CHAR": pd.Series(
["str1 ", "str2 ", None, None, "str05"], dtype="object"
),
"TEST_FLOAT": pd.Series([1.1, 2.2, None, -4.44, None], dtype="float64"),
},
)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_limit_without_partition(oracle_url: str) -> None:
query = "SELECT * FROM test_table where rownum <= 3"
df = read_sql(oracle_url, query)
expected = pd.DataFrame(
data={
"TEST_INT": pd.Series([1, 2, 2333], dtype="Int64"),
"TEST_CHAR": pd.Series(["str1 ", "str2 ", None], dtype="object"),
"TEST_FLOAT": pd.Series([1.1, 2.2, None], dtype="float64"),
},
)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_limit_large_without_partition(oracle_url: str) -> None:
query = "SELECT * FROM test_table where rownum < 10"
df = read_sql(oracle_url, query)
expected = pd.DataFrame(
data={
"TEST_INT": pd.Series([1, 2, 2333, 4, 5], dtype="Int64"),
"TEST_CHAR": pd.Series(
["str1 ", "str2 ", None, None, "str05"], dtype="object"
),
"TEST_FLOAT": pd.Series([1.1, 2.2, None, -4.44, None], dtype="float64"),
},
)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_with_partition(oracle_url: str) -> None:
query = "SELECT * FROM test_table"
df = read_sql(
oracle_url,
query,
partition_on="test_int",
partition_range=(0, 5001),
partition_num=3,
)
expected = pd.DataFrame(
data={
"TEST_INT": pd.Series([1, 2, 4, 5, 2333], dtype="Int64"),
"TEST_CHAR": pd.Series(
["str1 ", "str2 ", None, "str05", None], dtype="object"
),
"TEST_FLOAT": pd.Series([1.1, 2.2, -4.44, None, None], dtype="float64"),
},
)
df.sort_values(by="TEST_INT", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_with_partition_without_partition_range(oracle_url: str) -> None:
query = "SELECT * FROM test_table where test_float > 1"
df = read_sql(
oracle_url,
query,
partition_on="test_int",
partition_num=3,
)
expected = pd.DataFrame(
data={
"TEST_INT": pd.Series([1, 2], dtype="Int64"),
"TEST_CHAR": pd.Series(["str1 ", "str2 "], dtype="object"),
"TEST_FLOAT": pd.Series([1.1, 2.2], dtype="float64"),
},
)
df.sort_values(by="TEST_INT", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_with_partition_and_selection(oracle_url: str) -> None:
query = "SELECT * FROM test_table WHERE 1 = 3 OR 2 = 2"
df = read_sql(
oracle_url,
query,
partition_on="test_int",
partition_range=(1, 2333),
partition_num=3,
)
expected = pd.DataFrame(
data={
"TEST_INT": pd.Series([1, 2, 4, 5, 2333], dtype="Int64"),
"TEST_CHAR": pd.Series(
["str1 ", "str2 ", None, "str05", None], dtype="object"
),
"TEST_FLOAT": pd.Series([1.1, 2.2, -4.44, None, None], dtype="float64"),
},
)
df.sort_values(by="TEST_INT", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_with_partition_and_spja(oracle_url: str) -> None:
query = "select test_table.test_int cid, SUM(test_types.test_num_float) sfloat from test_table, test_types where test_table.test_int=test_types.test_num_int group by test_table.test_int"
df = read_sql(oracle_url, query, partition_on="cid", partition_num=2)
expected = pd.DataFrame(
data={
"CID":
|
pd.Series([1, 5], dtype="Int64")
|
pandas.Series
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert 1.0 == frame['A'][1]
assert 2.0 == frame['C'][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'][1]
assert 2 == frame['C'][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'].view('i8')[1]
assert 2 == frame['C'].view('i8')[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert frame['A'][1] is True
assert frame['C'][2] is False
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
for k, v in comb}
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize("data, index, columns, dtype, expected", [
(None, lrange(10), ['a', 'b'], object, np.object_),
(None, None, ['a', 'b'], 'int64', np.dtype('int64')),
(None, lrange(10), ['a', 'b'], int, np.dtype('float64')),
({}, None, ['foo', 'bar'], None, np.object_),
({'b': 1}, lrange(10), list('abc'), int, np.dtype('float64'))
])
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
assert df['int'].dtype == np.int64
assert df['bool'].dtype == np.bool_
assert df['float'].dtype == np.float64
assert df['complex'].dtype == np.complex128
assert df['object'].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match='must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
assert dm.values.ndim == 2
arr = randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=['A', 'B'])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with pytest.raises(ValueError, match='cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
assert len(dm.columns) == 2
assert dm.values.dtype == np.float64
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=['A', 'B'])
expected = DataFrame({}, columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
df = DataFrame(data=[[1, 'a'], [2, 'b']], columns=["num", "str"])
assert is_integer_dtype(df['num'])
assert df['str'].dtype == np.object_
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: np.arange(10)})
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
class DummyContainer(compat.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
lst_containers = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(lst_containers, columns=columns)
expected = DataFrame([[1, 'a'], [2, 'b']], columns=columns)
tm.assert_frame_equal(result, expected, check_dtype=False)
# GH 4297
# support Array
import array
result = DataFrame({'A': array.array('i', range(10))})
expected = DataFrame({'A': list(range(10))})
tm.assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([array.array('i', range(10)),
array.array('i', range(10))])
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_iterable(self):
# GH 21987
class Iter():
def __iter__(self):
for i in range(10):
yield [1, 2, 3]
expected = DataFrame([[1, 2, 3]] * 10)
result = DataFrame(Iter())
tm.assert_frame_equal(result, expected)
def test_constructor_iterator(self):
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([range(10), range(10)])
tm.assert_frame_equal(result, expected)
def test_constructor_generator(self):
# related #2305
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([gen1, gen2])
tm.assert_frame_equal(result, expected)
gen = ([i, 'a'] for i in range(10))
result = DataFrame(gen)
expected = DataFrame({0: range(10), 1: 'a'})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_list_of_dicts(self):
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
result = DataFrame(data)
expected = DataFrame.from_dict(dict(zip(range(len(data)), data)),
orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result = DataFrame([{}])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_preserve_order(self):
# see gh-13304
expected = DataFrame([[2, 1]], columns=['b', 'a'])
data = OrderedDict()
data['b'] = [2]
data['a'] = [1]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
data = OrderedDict()
data['b'] = 2
data['a'] = 1
result = DataFrame([data])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_conflicting_orders(self):
# the first dict element sets the ordering for the DataFrame,
# even if there are conflicting orders from subsequent ones
row_one = OrderedDict()
row_one['b'] = 2
row_one['a'] = 1
row_two = OrderedDict()
row_two['a'] = 1
row_two['b'] = 2
row_three = {'b': 2, 'a': 1}
expected = DataFrame([[2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two])
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2, 1], [2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two, row_three])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series(self):
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(['x', 'y'], data))
idx = Index(['a', 'b', 'c'])
# all named
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx, name='y')]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
# some unnamed
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
sdict = OrderedDict(zip(['x', 'Unnamed 0'], data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result.sort_index(), expected)
# none named
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
data = [Series(d) for d in data]
result = DataFrame(data)
sdict = OrderedDict(zip(range(len(data)), data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result2 = DataFrame(data, index=np.arange(6))
tm.assert_frame_equal(result, result2)
result = DataFrame([Series({})])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
|
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])
|
pandas.compat.OrderedDict
|
from datetime import datetime
from decimal import Decimal
from io import StringIO
import numpy as np
import pytest
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, read_csv
import pandas._testing as tm
from pandas.core.base import SpecificationError
import pandas.core.common as com
def test_repr():
# GH18203
result = repr(pd.Grouper(key="A", level="B"))
expected = "Grouper(key='A', level='B', axis=0, sort=False)"
assert result == expected
@pytest.mark.parametrize("dtype", ["int64", "int32", "float64", "float32"])
def test_basic(dtype):
data = Series(np.arange(9) // 3, index=np.arange(9), dtype=dtype)
index = np.arange(9)
np.random.shuffle(index)
data = data.reindex(index)
grouped = data.groupby(lambda x: x // 3)
for k, v in grouped:
assert len(v) == 3
agged = grouped.aggregate(np.mean)
assert agged[1] == 1
tm.assert_series_equal(agged, grouped.agg(np.mean)) # shorthand
tm.assert_series_equal(agged, grouped.mean())
tm.assert_series_equal(grouped.agg(np.sum), grouped.sum())
expected = grouped.apply(lambda x: x * x.sum())
transformed = grouped.transform(lambda x: x * x.sum())
assert transformed[7] == 12
tm.assert_series_equal(transformed, expected)
value_grouped = data.groupby(data)
tm.assert_series_equal(
value_grouped.aggregate(np.mean), agged, check_index_type=False
)
# complex agg
agged = grouped.aggregate([np.mean, np.std])
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped.aggregate({"one": np.mean, "two": np.std})
group_constants = {0: 10, 1: 20, 2: 30}
agged = grouped.agg(lambda x: group_constants[x.name] + x.mean())
assert agged[1] == 21
# corner cases
msg = "Must produce aggregated value"
# exception raised is type Exception
with pytest.raises(Exception, match=msg):
grouped.aggregate(lambda x: x * 2)
def test_groupby_nonobject_dtype(mframe, df_mixed_floats):
key = mframe.index.codes[0]
grouped = mframe.groupby(key)
result = grouped.sum()
expected = mframe.groupby(key.astype("O")).sum()
tm.assert_frame_equal(result, expected)
# GH 3911, mixed frame non-conversion
df = df_mixed_floats.copy()
df["value"] = range(len(df))
def max_value(group):
return group.loc[group["value"].idxmax()]
applied = df.groupby("A").apply(max_value)
result = applied.dtypes
expected = Series(
[np.dtype("object")] * 2 + [np.dtype("float64")] * 2 + [np.dtype("int64")],
index=["A", "B", "C", "D", "value"],
)
tm.assert_series_equal(result, expected)
def test_groupby_return_type():
# GH2893, return a reduced type
df1 = DataFrame(
[
{"val1": 1, "val2": 20},
{"val1": 1, "val2": 19},
{"val1": 2, "val2": 27},
{"val1": 2, "val2": 12},
]
)
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
with tm.assert_produces_warning(FutureWarning):
result = df1.groupby("val1", squeeze=True).apply(func)
assert isinstance(result, Series)
df2 = DataFrame(
[
{"val1": 1, "val2": 20},
{"val1": 1, "val2": 19},
{"val1": 1, "val2": 27},
{"val1": 1, "val2": 12},
]
)
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
with tm.assert_produces_warning(FutureWarning):
result = df2.groupby("val1", squeeze=True).apply(func)
assert isinstance(result, Series)
# GH3596, return a consistent type (regression in 0.11 from 0.10.1)
df = DataFrame([[1, 1], [1, 1]], columns=["X", "Y"])
with tm.assert_produces_warning(FutureWarning):
result = df.groupby("X", squeeze=False).count()
assert isinstance(result, DataFrame)
def test_inconsistent_return_type():
# GH5592
# inconsistent return type
df = DataFrame(
dict(
A=["Tiger", "Tiger", "Tiger", "Lamb", "Lamb", "Pony", "Pony"],
B=Series(np.arange(7), dtype="int64"),
C=date_range("20130101", periods=7),
)
)
def f(grp):
return grp.iloc[0]
expected = df.groupby("A").first()[["B"]]
result = df.groupby("A").apply(f)[["B"]]
tm.assert_frame_equal(result, expected)
def f(grp):
if grp.name == "Tiger":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["B"]]
e = expected.copy()
e.loc["Tiger"] = np.nan
tm.assert_frame_equal(result, e)
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["B"]]
e = expected.copy()
e.loc["Pony"] = np.nan
tm.assert_frame_equal(result, e)
# 5592 revisited, with datetimes
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["C"]]
e = df.groupby("A").first()[["C"]]
e.loc["Pony"] = pd.NaT
tm.assert_frame_equal(result, e)
# scalar outputs
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0].loc["C"]
result = df.groupby("A").apply(f)
e = df.groupby("A").first()["C"].copy()
e.loc["Pony"] = np.nan
e.name = None
tm.assert_series_equal(result, e)
def test_pass_args_kwargs(ts, tsframe):
def f(x, q=None, axis=0):
return np.percentile(x, q, axis=axis)
g = lambda x: np.percentile(x, 80, axis=0)
# Series
ts_grouped = ts.groupby(lambda x: x.month)
agg_result = ts_grouped.agg(np.percentile, 80, axis=0)
apply_result = ts_grouped.apply(np.percentile, 80, axis=0)
trans_result = ts_grouped.transform(np.percentile, 80, axis=0)
agg_expected = ts_grouped.quantile(0.8)
trans_expected = ts_grouped.transform(g)
tm.assert_series_equal(apply_result, agg_expected)
tm.assert_series_equal(agg_result, agg_expected)
tm.assert_series_equal(trans_result, trans_expected)
agg_result = ts_grouped.agg(f, q=80)
apply_result = ts_grouped.apply(f, q=80)
trans_result = ts_grouped.transform(f, q=80)
tm.assert_series_equal(agg_result, agg_expected)
tm.assert_series_equal(apply_result, agg_expected)
tm.assert_series_equal(trans_result, trans_expected)
# DataFrame
df_grouped = tsframe.groupby(lambda x: x.month)
agg_result = df_grouped.agg(np.percentile, 80, axis=0)
apply_result = df_grouped.apply(DataFrame.quantile, 0.8)
expected = df_grouped.quantile(0.8)
tm.assert_frame_equal(apply_result, expected, check_names=False)
tm.assert_frame_equal(agg_result, expected)
agg_result = df_grouped.agg(f, q=80)
apply_result = df_grouped.apply(DataFrame.quantile, q=0.8)
tm.assert_frame_equal(agg_result, expected)
tm.assert_frame_equal(apply_result, expected, check_names=False)
def test_len():
df = tm.makeTimeDataFrame()
grouped = df.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day])
assert len(grouped) == len(df)
grouped = df.groupby([lambda x: x.year, lambda x: x.month])
expected = len({(x.year, x.month) for x in df.index})
assert len(grouped) == expected
# issue 11016
df = pd.DataFrame(dict(a=[np.nan] * 3, b=[1, 2, 3]))
assert len(df.groupby(("a"))) == 0
assert len(df.groupby(("b"))) == 3
assert len(df.groupby(["a", "b"])) == 3
def test_basic_regression():
# regression
result = Series([1.0 * x for x in list(range(1, 10)) * 10])
data = np.random.random(1100) * 10.0
groupings = Series(data)
grouped = result.groupby(groupings)
grouped.mean()
@pytest.mark.parametrize(
"dtype", ["float64", "float32", "int64", "int32", "int16", "int8"]
)
def test_with_na_groups(dtype):
index = Index(np.arange(10))
values = Series(np.ones(10), index, dtype=dtype)
labels = Series(
[np.nan, "foo", "bar", "bar", np.nan, np.nan, "bar", "bar", np.nan, "foo"],
index=index,
)
# this SHOULD be an int
grouped = values.groupby(labels)
agged = grouped.agg(len)
expected = Series([4, 2], index=["bar", "foo"])
tm.assert_series_equal(agged, expected, check_dtype=False)
# assert issubclass(agged.dtype.type, np.integer)
# explicitly return a float from my function
def f(x):
return float(len(x))
agged = grouped.agg(f)
expected = Series([4, 2], index=["bar", "foo"])
tm.assert_series_equal(agged, expected, check_dtype=False)
assert issubclass(agged.dtype.type, np.dtype(dtype).type)
def test_indices_concatenation_order():
# GH 2808
def f1(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
multiindex = MultiIndex(levels=[[]] * 2, codes=[[]] * 2, names=["b", "c"])
res = DataFrame(columns=["a"], index=multiindex)
return res
else:
y = y.set_index(["b", "c"])
return y
def f2(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
return DataFrame()
else:
y = y.set_index(["b", "c"])
return y
def f3(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
multiindex = MultiIndex(
levels=[[]] * 2, codes=[[]] * 2, names=["foo", "bar"]
)
res = DataFrame(columns=["a", "b"], index=multiindex)
return res
else:
return y
df = DataFrame({"a": [1, 2, 2, 2], "b": range(4), "c": range(5, 9)})
df2 = DataFrame({"a": [3, 2, 2, 2], "b": range(4), "c": range(5, 9)})
# correct result
result1 = df.groupby("a").apply(f1)
result2 = df2.groupby("a").apply(f1)
tm.assert_frame_equal(result1, result2)
# should fail (not the same number of levels)
msg = "Cannot concat indices that do not have the same number of levels"
with pytest.raises(AssertionError, match=msg):
df.groupby("a").apply(f2)
with pytest.raises(AssertionError, match=msg):
df2.groupby("a").apply(f2)
# should fail (incorrect shape)
with pytest.raises(AssertionError, match=msg):
df.groupby("a").apply(f3)
with pytest.raises(AssertionError, match=msg):
df2.groupby("a").apply(f3)
def test_attr_wrapper(ts):
grouped = ts.groupby(lambda x: x.weekday())
result = grouped.std()
expected = grouped.agg(lambda x: np.std(x, ddof=1))
tm.assert_series_equal(result, expected)
# this is pretty cool
result = grouped.describe()
expected = {name: gp.describe() for name, gp in grouped}
expected = DataFrame(expected).T
tm.assert_frame_equal(result, expected)
# get attribute
result = grouped.dtype
expected = grouped.agg(lambda x: x.dtype)
# make sure raises error
msg = "'SeriesGroupBy' object has no attribute 'foo'"
with pytest.raises(AttributeError, match=msg):
getattr(grouped, "foo")
def test_frame_groupby(tsframe):
grouped = tsframe.groupby(lambda x: x.weekday())
# aggregate
aggregated = grouped.aggregate(np.mean)
assert len(aggregated) == 5
assert len(aggregated.columns) == 4
# by string
tscopy = tsframe.copy()
tscopy["weekday"] = [x.weekday() for x in tscopy.index]
stragged = tscopy.groupby("weekday").aggregate(np.mean)
tm.assert_frame_equal(stragged, aggregated, check_names=False)
# transform
grouped = tsframe.head(30).groupby(lambda x: x.weekday())
transformed = grouped.transform(lambda x: x - x.mean())
assert len(transformed) == 30
assert len(transformed.columns) == 4
# transform propagate
transformed = grouped.transform(lambda x: x.mean())
for name, group in grouped:
mean = group.mean()
for idx in group.index:
tm.assert_series_equal(transformed.xs(idx), mean, check_names=False)
# iterate
for weekday, group in grouped:
assert group.index[0].weekday() == weekday
# groups / group_indices
groups = grouped.groups
indices = grouped.indices
for k, v in groups.items():
samething = tsframe.index.take(indices[k])
assert (samething == v).all()
def test_frame_groupby_columns(tsframe):
mapping = {"A": 0, "B": 0, "C": 1, "D": 1}
grouped = tsframe.groupby(mapping, axis=1)
# aggregate
aggregated = grouped.aggregate(np.mean)
assert len(aggregated) == len(tsframe)
assert len(aggregated.columns) == 2
# transform
tf = lambda x: x - x.mean()
groupedT = tsframe.T.groupby(mapping, axis=0)
tm.assert_frame_equal(groupedT.transform(tf).T, grouped.transform(tf))
# iterate
for k, v in grouped:
assert len(v.columns) == 2
def test_frame_set_name_single(df):
grouped = df.groupby("A")
result = grouped.mean()
assert result.index.name == "A"
result = df.groupby("A", as_index=False).mean()
assert result.index.name != "A"
result = grouped.agg(np.mean)
assert result.index.name == "A"
result = grouped.agg({"C": np.mean, "D": np.std})
assert result.index.name == "A"
result = grouped["C"].mean()
assert result.index.name == "A"
result = grouped["C"].agg(np.mean)
assert result.index.name == "A"
result = grouped["C"].agg([np.mean, np.std])
assert result.index.name == "A"
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped["C"].agg({"foo": np.mean, "bar": np.std})
def test_multi_func(df):
col1 = df["A"]
col2 = df["B"]
grouped = df.groupby([col1.get, col2.get])
agged = grouped.mean()
expected = df.groupby(["A", "B"]).mean()
# TODO groupby get drops names
tm.assert_frame_equal(
agged.loc[:, ["C", "D"]], expected.loc[:, ["C", "D"]], check_names=False
)
# some "groups" with no data
df = DataFrame(
{
"v1": np.random.randn(6),
"v2": np.random.randn(6),
"k1": np.array(["b", "b", "b", "a", "a", "a"]),
"k2": np.array(["1", "1", "1", "2", "2", "2"]),
},
index=["one", "two", "three", "four", "five", "six"],
)
# only verify that it works for now
grouped = df.groupby(["k1", "k2"])
grouped.agg(np.sum)
def test_multi_key_multiple_functions(df):
grouped = df.groupby(["A", "B"])["C"]
agged = grouped.agg([np.mean, np.std])
expected = DataFrame({"mean": grouped.agg(np.mean), "std": grouped.agg(np.std)})
tm.assert_frame_equal(agged, expected)
def test_frame_multi_key_function_list():
data = DataFrame(
{
"A": [
"foo",
"foo",
"foo",
"foo",
"bar",
"bar",
"bar",
"bar",
"foo",
"foo",
"foo",
],
"B": [
"one",
"one",
"one",
"two",
"one",
"one",
"one",
"two",
"two",
"two",
"one",
],
"C": [
"dull",
"dull",
"shiny",
"dull",
"dull",
"shiny",
"shiny",
"dull",
"shiny",
"shiny",
"shiny",
],
"D": np.random.randn(11),
"E": np.random.randn(11),
"F": np.random.randn(11),
}
)
grouped = data.groupby(["A", "B"])
funcs = [np.mean, np.std]
agged = grouped.agg(funcs)
expected = pd.concat(
[grouped["D"].agg(funcs), grouped["E"].agg(funcs), grouped["F"].agg(funcs)],
keys=["D", "E", "F"],
axis=1,
)
assert isinstance(agged.index, MultiIndex)
assert isinstance(expected.index, MultiIndex)
tm.assert_frame_equal(agged, expected)
@pytest.mark.parametrize("op", [lambda x: x.sum(), lambda x: x.mean()])
def test_groupby_multiple_columns(df, op):
data = df
grouped = data.groupby(["A", "B"])
result1 = op(grouped)
keys = []
values = []
for n1, gp1 in data.groupby("A"):
for n2, gp2 in gp1.groupby("B"):
keys.append((n1, n2))
values.append(op(gp2.loc[:, ["C", "D"]]))
mi = MultiIndex.from_tuples(keys, names=["A", "B"])
expected = pd.concat(values, axis=1).T
expected.index = mi
# a little bit crude
for col in ["C", "D"]:
result_col = op(grouped[col])
pivoted = result1[col]
exp = expected[col]
tm.assert_series_equal(result_col, exp)
tm.assert_series_equal(pivoted, exp)
# test single series works the same
result = data["C"].groupby([data["A"], data["B"]]).mean()
expected = data.groupby(["A", "B"]).mean()["C"]
tm.assert_series_equal(result, expected)
def test_as_index_select_column():
# GH 5764
df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"])
result = df.groupby("A", as_index=False)["B"].get_group(1)
expected = pd.Series([2, 4], name="B")
tm.assert_series_equal(result, expected)
result = df.groupby("A", as_index=False)["B"].apply(lambda x: x.cumsum())
expected = pd.Series(
[2, 6, 6], name="B", index=pd.MultiIndex.from_tuples([(0, 0), (0, 1), (1, 2)])
)
tm.assert_series_equal(result, expected)
def test_groupby_as_index_select_column_sum_empty_df():
# GH 35246
df = DataFrame(columns=["A", "B", "C"])
left = df.groupby(by="A", as_index=False)["B"].sum()
assert type(left) is DataFrame
assert left.to_dict() == {"A": {}, "B": {}}
def test_groupby_as_index_agg(df):
grouped = df.groupby("A", as_index=False)
# single-key
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
result2 = grouped.agg({"C": np.mean, "D": np.sum})
expected2 = grouped.mean()
expected2["D"] = grouped.sum()["D"]
tm.assert_frame_equal(result2, expected2)
grouped = df.groupby("A", as_index=True)
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped["C"].agg({"Q": np.sum})
# multi-key
grouped = df.groupby(["A", "B"], as_index=False)
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
result2 = grouped.agg({"C": np.mean, "D": np.sum})
expected2 = grouped.mean()
expected2["D"] = grouped.sum()["D"]
tm.assert_frame_equal(result2, expected2)
expected3 = grouped["C"].sum()
expected3 = DataFrame(expected3).rename(columns={"C": "Q"})
result3 = grouped["C"].agg({"Q": np.sum})
tm.assert_frame_equal(result3, expected3)
# GH7115 & GH8112 & GH8582
df = DataFrame(np.random.randint(0, 100, (50, 3)), columns=["jim", "joe", "jolie"])
ts = Series(np.random.randint(5, 10, 50), name="jim")
gr = df.groupby(ts)
gr.nth(0) # invokes set_selection_from_grouper internally
tm.assert_frame_equal(gr.apply(sum), df.groupby(ts).apply(sum))
for attr in ["mean", "max", "count", "idxmax", "cumsum", "all"]:
gr = df.groupby(ts, as_index=False)
left = getattr(gr, attr)()
gr = df.groupby(ts.values, as_index=True)
right = getattr(gr, attr)().reset_index(drop=True)
tm.assert_frame_equal(left, right)
def test_ops_not_as_index(reduction_func):
# GH 10355, 21090
# Using as_index=False should not modify grouped column
if reduction_func in ("corrwith",):
pytest.skip("Test not applicable")
if reduction_func in ("nth", "ngroup",):
pytest.skip("Skip until behavior is determined (GH #5755)")
df = DataFrame(np.random.randint(0, 5, size=(100, 2)), columns=["a", "b"])
expected = getattr(df.groupby("a"), reduction_func)()
if reduction_func == "size":
expected = expected.rename("size")
expected = expected.reset_index()
g = df.groupby("a", as_index=False)
result = getattr(g, reduction_func)()
tm.assert_frame_equal(result, expected)
result = g.agg(reduction_func)
tm.assert_frame_equal(result, expected)
result = getattr(g["b"], reduction_func)()
tm.assert_frame_equal(result, expected)
result = g["b"].agg(reduction_func)
tm.assert_frame_equal(result, expected)
def test_as_index_series_return_frame(df):
grouped = df.groupby("A", as_index=False)
grouped2 = df.groupby(["A", "B"], as_index=False)
result = grouped["C"].agg(np.sum)
expected = grouped.agg(np.sum).loc[:, ["A", "C"]]
assert isinstance(result, DataFrame)
tm.assert_frame_equal(result, expected)
result2 = grouped2["C"].agg(np.sum)
expected2 = grouped2.agg(np.sum).loc[:, ["A", "B", "C"]]
assert isinstance(result2, DataFrame)
tm.assert_frame_equal(result2, expected2)
result = grouped["C"].sum()
expected = grouped.sum().loc[:, ["A", "C"]]
assert isinstance(result, DataFrame)
tm.assert_frame_equal(result, expected)
result2 = grouped2["C"].sum()
expected2 = grouped2.sum().loc[:, ["A", "B", "C"]]
assert isinstance(result2, DataFrame)
tm.assert_frame_equal(result2, expected2)
def test_as_index_series_column_slice_raises(df):
# GH15072
grouped = df.groupby("A", as_index=False)
msg = r"Column\(s\) C already selected"
with pytest.raises(IndexError, match=msg):
grouped["C"].__getitem__("D")
def test_groupby_as_index_cython(df):
data = df
# single-key
grouped = data.groupby("A", as_index=False)
result = grouped.mean()
expected = data.groupby(["A"]).mean()
expected.insert(0, "A", expected.index)
expected.index = np.arange(len(expected))
tm.assert_frame_equal(result, expected)
# multi-key
grouped = data.groupby(["A", "B"], as_index=False)
result = grouped.mean()
expected = data.groupby(["A", "B"]).mean()
arrays = list(zip(*expected.index.values))
expected.insert(0, "A", arrays[0])
expected.insert(1, "B", arrays[1])
expected.index = np.arange(len(expected))
tm.assert_frame_equal(result, expected)
def test_groupby_as_index_series_scalar(df):
grouped = df.groupby(["A", "B"], as_index=False)
# GH #421
result = grouped["C"].agg(len)
expected = grouped.agg(len).loc[:, ["A", "B", "C"]]
tm.assert_frame_equal(result, expected)
def test_groupby_as_index_corner(df, ts):
msg = "as_index=False only valid with DataFrame"
with pytest.raises(TypeError, match=msg):
ts.groupby(lambda x: x.weekday(), as_index=False)
msg = "as_index=False only valid for axis=0"
with pytest.raises(ValueError, match=msg):
df.groupby(lambda x: x.lower(), as_index=False, axis=1)
def test_groupby_multiple_key(df):
df = tm.makeTimeDataFrame()
grouped = df.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day])
agged = grouped.sum()
tm.assert_almost_equal(df.values, agged.values)
grouped = df.T.groupby(
[lambda x: x.year, lambda x: x.month, lambda x: x.day], axis=1
)
agged = grouped.agg(lambda x: x.sum())
tm.assert_index_equal(agged.index, df.columns)
tm.assert_almost_equal(df.T.values, agged.values)
agged = grouped.agg(lambda x: x.sum())
tm.assert_almost_equal(df.T.values, agged.values)
def test_groupby_multi_corner(df):
# test that having an all-NA column doesn't mess you up
df = df.copy()
df["bad"] = np.nan
agged = df.groupby(["A", "B"]).mean()
expected = df.groupby(["A", "B"]).mean()
expected["bad"] = np.nan
tm.assert_frame_equal(agged, expected)
def test_omit_nuisance(df):
grouped = df.groupby("A")
result = grouped.mean()
expected = df.loc[:, ["A", "C", "D"]].groupby("A").mean()
tm.assert_frame_equal(result, expected)
agged = grouped.agg(np.mean)
exp = grouped.mean()
tm.assert_frame_equal(agged, exp)
df = df.loc[:, ["A", "C", "D"]]
df["E"] = datetime.now()
grouped = df.groupby("A")
result = grouped.agg(np.sum)
expected = grouped.sum()
tm.assert_frame_equal(result, expected)
# won't work with axis = 1
grouped = df.groupby({"A": 0, "C": 0, "D": 1, "E": 1}, axis=1)
msg = "reduction operation 'sum' not allowed for this dtype"
with pytest.raises(TypeError, match=msg):
grouped.agg(lambda x: x.sum(0, numeric_only=False))
def test_omit_nuisance_python_multiple(three_group):
grouped = three_group.groupby(["A", "B"])
agged = grouped.agg(np.mean)
exp = grouped.mean()
tm.assert_frame_equal(agged, exp)
def test_empty_groups_corner(mframe):
# handle empty groups
df = DataFrame(
{
"k1": np.array(["b", "b", "b", "a", "a", "a"]),
"k2": np.array(["1", "1", "1", "2", "2", "2"]),
"k3": ["foo", "bar"] * 3,
"v1": np.random.randn(6),
"v2": np.random.randn(6),
}
)
grouped = df.groupby(["k1", "k2"])
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
grouped = mframe[3:5].groupby(level=0)
agged = grouped.apply(lambda x: x.mean())
agged_A = grouped["A"].apply(np.mean)
tm.assert_series_equal(agged["A"], agged_A)
assert agged.index.name == "first"
def test_nonsense_func():
df = DataFrame([0])
msg = r"unsupported operand type\(s\) for \+: 'int' and 'str'"
with pytest.raises(TypeError, match=msg):
df.groupby(lambda x: x + "foo")
def test_wrap_aggregated_output_multindex(mframe):
df = mframe.T
df["baz", "two"] = "peekaboo"
keys = [np.array([0, 0, 1]), np.array([0, 0, 1])]
agged = df.groupby(keys).agg(np.mean)
assert isinstance(agged.columns, MultiIndex)
def aggfun(ser):
if ser.name == ("foo", "one"):
raise TypeError
else:
return ser.sum()
agged2 = df.groupby(keys).aggregate(aggfun)
assert len(agged2.columns) + 1 == len(df.columns)
def test_groupby_level_apply(mframe):
result = mframe.groupby(level=0).count()
assert result.index.name == "first"
result = mframe.groupby(level=1).count()
assert result.index.name == "second"
result = mframe["A"].groupby(level=0).count()
assert result.index.name == "first"
def test_groupby_level_mapper(mframe):
deleveled = mframe.reset_index()
mapper0 = {"foo": 0, "bar": 0, "baz": 1, "qux": 1}
mapper1 = {"one": 0, "two": 0, "three": 1}
result0 = mframe.groupby(mapper0, level=0).sum()
result1 = mframe.groupby(mapper1, level=1).sum()
mapped_level0 = np.array([mapper0.get(x) for x in deleveled["first"]])
mapped_level1 = np.array([mapper1.get(x) for x in deleveled["second"]])
expected0 = mframe.groupby(mapped_level0).sum()
expected1 = mframe.groupby(mapped_level1).sum()
expected0.index.name, expected1.index.name = "first", "second"
tm.assert_frame_equal(result0, expected0)
tm.assert_frame_equal(result1, expected1)
def test_groupby_level_nonmulti():
# GH 1313, GH 13901
s = Series([1, 2, 3, 10, 4, 5, 20, 6], Index([1, 2, 3, 1, 4, 5, 2, 6], name="foo"))
expected = Series([11, 22, 3, 4, 5, 6], Index(range(1, 7), name="foo"))
result = s.groupby(level=0).sum()
tm.assert_series_equal(result, expected)
result = s.groupby(level=[0]).sum()
tm.assert_series_equal(result, expected)
result = s.groupby(level=-1).sum()
tm.assert_series_equal(result, expected)
result = s.groupby(level=[-1]).sum()
tm.assert_series_equal(result, expected)
msg = "level > 0 or level < -1 only valid with MultiIndex"
with pytest.raises(ValueError, match=msg):
s.groupby(level=1)
with pytest.raises(ValueError, match=msg):
s.groupby(level=-2)
msg = "No group keys passed!"
with pytest.raises(ValueError, match=msg):
s.groupby(level=[])
msg = "multiple levels only valid with MultiIndex"
with pytest.raises(ValueError, match=msg):
s.groupby(level=[0, 0])
with pytest.raises(ValueError, match=msg):
s.groupby(level=[0, 1])
msg = "level > 0 or level < -1 only valid with MultiIndex"
with pytest.raises(ValueError, match=msg):
s.groupby(level=[1])
def test_groupby_complex():
# GH 12902
a = Series(data=np.arange(4) * (1 + 2j), index=[0, 0, 1, 1])
expected = Series((1 + 2j, 5 + 10j))
result = a.groupby(level=0).sum()
tm.assert_series_equal(result, expected)
result = a.sum(level=0)
tm.assert_series_equal(result, expected)
def test_groupby_series_indexed_differently():
s1 = Series(
[5.0, -9.0, 4.0, 100.0, -5.0, 55.0, 6.7],
index=Index(["a", "b", "c", "d", "e", "f", "g"]),
)
s2 = Series(
[1.0, 1.0, 4.0, 5.0, 5.0, 7.0], index=Index(["a", "b", "d", "f", "g", "h"])
)
grouped = s1.groupby(s2)
agged = grouped.mean()
exp = s1.groupby(s2.reindex(s1.index).get).mean()
tm.assert_series_equal(agged, exp)
def test_groupby_with_hier_columns():
tuples = list(
zip(
*[
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
)
)
index = MultiIndex.from_tuples(tuples)
columns = MultiIndex.from_tuples(
[("A", "cat"), ("B", "dog"), ("B", "cat"), ("A", "dog")]
)
df = DataFrame(np.random.randn(8, 4), index=index, columns=columns)
result = df.groupby(level=0).mean()
tm.assert_index_equal(result.columns, columns)
result = df.groupby(level=0, axis=1).mean()
tm.assert_index_equal(result.index, df.index)
result = df.groupby(level=0).agg(np.mean)
tm.assert_index_equal(result.columns, columns)
result = df.groupby(level=0).apply(lambda x: x.mean())
tm.assert_index_equal(result.columns, columns)
result = df.groupby(level=0, axis=1).agg(lambda x: x.mean(1))
tm.assert_index_equal(result.columns, Index(["A", "B"]))
tm.assert_index_equal(result.index, df.index)
# add a nuisance column
sorted_columns, _ = columns.sortlevel(0)
df["A", "foo"] = "bar"
result = df.groupby(level=0).mean()
tm.assert_index_equal(result.columns, df.columns[:-1])
def test_grouping_ndarray(df):
grouped = df.groupby(df["A"].values)
result = grouped.sum()
expected = df.groupby("A").sum()
tm.assert_frame_equal(
result, expected, check_names=False
) # Note: no names when grouping by value
def test_groupby_wrong_multi_labels():
data = """index,foo,bar,baz,spam,data
0,foo1,bar1,baz1,spam2,20
1,foo1,bar2,baz1,spam3,30
2,foo2,bar2,baz1,spam2,40
3,foo1,bar1,baz2,spam1,50
4,foo3,bar1,baz2,spam1,60"""
data = read_csv(StringIO(data), index_col=0)
grouped = data.groupby(["foo", "bar", "baz", "spam"])
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
def test_groupby_series_with_name(df):
result = df.groupby(df["A"]).mean()
result2 = df.groupby(df["A"], as_index=False).mean()
assert result.index.name == "A"
assert "A" in result2
result = df.groupby([df["A"], df["B"]]).mean()
result2 = df.groupby([df["A"], df["B"]], as_index=False).mean()
assert result.index.names == ("A", "B")
assert "A" in result2
assert "B" in result2
def test_seriesgroupby_name_attr(df):
# GH 6265
result = df.groupby("A")["C"]
assert result.count().name == "C"
assert result.mean().name == "C"
testFunc = lambda x: np.sum(x) * 2
assert result.agg(testFunc).name == "C"
def test_consistency_name():
# GH 12363
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
"C": np.random.randn(8) + 1.0,
"D": np.arange(8),
}
)
expected = df.groupby(["A"]).B.count()
result = df.B.groupby(df.A).count()
tm.assert_series_equal(result, expected)
def test_groupby_name_propagation(df):
# GH 6124
def summarize(df, name=None):
return Series({"count": 1, "mean": 2, "omissions": 3}, name=name)
def summarize_random_name(df):
# Provide a different name for each Series. In this case, groupby
# should not attempt to propagate the Series name since they are
# inconsistent.
return Series({"count": 1, "mean": 2, "omissions": 3}, name=df.iloc[0]["A"])
metrics = df.groupby("A").apply(summarize)
assert metrics.columns.name is None
metrics = df.groupby("A").apply(summarize, "metrics")
assert metrics.columns.name == "metrics"
metrics = df.groupby("A").apply(summarize_random_name)
assert metrics.columns.name is None
def test_groupby_nonstring_columns():
df = DataFrame([np.arange(10) for x in range(10)])
grouped = df.groupby(0)
result = grouped.mean()
expected = df.groupby(df[0]).mean()
tm.assert_frame_equal(result, expected)
def test_groupby_mixed_type_columns():
# GH 13432, unorderable types in py3
df = DataFrame([[0, 1, 2]], columns=["A", "B", 0])
expected = DataFrame([[1, 2]], columns=["B", 0], index=Index([0], name="A"))
result = df.groupby("A").first()
tm.assert_frame_equal(result, expected)
result = df.groupby("A").sum()
tm.assert_frame_equal(result, expected)
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:Mean of:RuntimeWarning")
def test_cython_grouper_series_bug_noncontig():
arr = np.empty((100, 100))
arr.fill(np.nan)
obj = Series(arr[:, 0])
inds = np.tile(range(10), 10)
result = obj.groupby(inds).agg(Series.median)
assert result.isna().all()
def test_series_grouper_noncontig_index():
index = Index(tm.rands_array(10, 100))
values = Series(np.random.randn(50), index=index[::2])
labels = np.random.randint(0, 5, 50)
# it works!
grouped = values.groupby(labels)
# accessing the index elements causes segfault
f = lambda x: len(set(map(id, x.index)))
grouped.agg(f)
def test_convert_objects_leave_decimal_alone():
s = Series(range(5))
labels = np.array(["a", "b", "c", "d", "e"], dtype="O")
def convert_fast(x):
return Decimal(str(x.mean()))
def convert_force_pure(x):
# base will be length 0
assert len(x.values.base) > 0
return Decimal(str(x.mean()))
grouped = s.groupby(labels)
result = grouped.agg(convert_fast)
assert result.dtype == np.object_
assert isinstance(result[0], Decimal)
result = grouped.agg(convert_force_pure)
assert result.dtype == np.object_
assert isinstance(result[0], Decimal)
def test_groupby_dtype_inference_empty():
# GH 6733
df = DataFrame({"x": [], "range": np.arange(0, dtype="int64")})
assert df["x"].dtype == np.float64
result = df.groupby("x").first()
exp_index = Index([], name="x", dtype=np.float64)
expected = DataFrame({"range": Series([], index=exp_index, dtype="int64")})
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_groupby_list_infer_array_like(df):
result = df.groupby(list(df["A"])).mean()
expected = df.groupby(df["A"]).mean()
tm.assert_frame_equal(result, expected, check_names=False)
with pytest.raises(KeyError, match=r"^'foo'$"):
df.groupby(list(df["A"][:-1]))
# pathological case of ambiguity
df = DataFrame({"foo": [0, 1], "bar": [3, 4], "val": np.random.randn(2)})
result = df.groupby(["foo", "bar"]).mean()
expected = df.groupby([df["foo"], df["bar"]]).mean()[["val"]]
def test_groupby_keys_same_size_as_index():
# GH 11185
freq = "s"
index = pd.date_range(
start=pd.Timestamp("2015-09-29T11:34:44-0700"), periods=2, freq=freq
)
df = pd.DataFrame([["A", 10], ["B", 15]], columns=["metric", "values"], index=index)
result = df.groupby([pd.Grouper(level=0, freq=freq), "metric"]).mean()
expected = df.set_index([df.index, "metric"])
tm.assert_frame_equal(result, expected)
def test_groupby_one_row():
# GH 11741
msg = r"^'Z'$"
df1 = pd.DataFrame(np.random.randn(1, 4), columns=list("ABCD"))
with pytest.raises(KeyError, match=msg):
df1.groupby("Z")
df2 = pd.DataFrame(np.random.randn(2, 4), columns=list("ABCD"))
with pytest.raises(KeyError, match=msg):
df2.groupby("Z")
def test_groupby_nat_exclude():
# GH 6992
df = pd.DataFrame(
{
"values": np.random.randn(8),
"dt": [
np.nan,
pd.Timestamp("2013-01-01"),
np.nan,
pd.Timestamp("2013-02-01"),
np.nan,
pd.Timestamp("2013-02-01"),
np.nan,
pd.Timestamp("2013-01-01"),
],
"str": [np.nan, "a", np.nan, "a", np.nan, "a", np.nan, "b"],
}
)
grouped = df.groupby("dt")
expected = [pd.Index([1, 7]), pd.Index([3, 5])]
keys = sorted(grouped.groups.keys())
assert len(keys) == 2
for k, e in zip(keys, expected):
# grouped.groups keys are np.datetime64 with system tz
# not to be affected by tz, only compare values
tm.assert_index_equal(grouped.groups[k], e)
# confirm obj is not filtered
tm.assert_frame_equal(grouped.grouper.groupings[0].obj, df)
assert grouped.ngroups == 2
expected = {
Timestamp("2013-01-01 00:00:00"): np.array([1, 7], dtype=np.intp),
Timestamp("2013-02-01 00:00:00"): np.array([3, 5], dtype=np.intp),
}
for k in grouped.indices:
tm.assert_numpy_array_equal(grouped.indices[k], expected[k])
tm.assert_frame_equal(grouped.get_group(Timestamp("2013-01-01")), df.iloc[[1, 7]])
tm.assert_frame_equal(grouped.get_group(Timestamp("2013-02-01")), df.iloc[[3, 5]])
with pytest.raises(KeyError, match=r"^NaT$"):
grouped.get_group(pd.NaT)
nan_df = DataFrame(
{"nan": [np.nan, np.nan, np.nan], "nat": [pd.NaT, pd.NaT, pd.NaT]}
)
assert nan_df["nan"].dtype == "float64"
assert nan_df["nat"].dtype == "datetime64[ns]"
for key in ["nan", "nat"]:
grouped = nan_df.groupby(key)
assert grouped.groups == {}
assert grouped.ngroups == 0
assert grouped.indices == {}
with pytest.raises(KeyError, match=r"^nan$"):
grouped.get_group(np.nan)
with pytest.raises(KeyError, match=r"^NaT$"):
grouped.get_group(pd.NaT)
def test_groupby_2d_malformed():
d = DataFrame(index=range(2))
d["group"] = ["g1", "g2"]
d["zeros"] = [0, 0]
d["ones"] = [1, 1]
d["label"] = ["l1", "l2"]
tmp = d.groupby(["group"]).mean()
res_values = np.array([[0, 1], [0, 1]], dtype=np.int64)
tm.assert_index_equal(tmp.columns, Index(["zeros", "ones"]))
tm.assert_numpy_array_equal(tmp.values, res_values)
def test_int32_overflow():
B = np.concatenate((np.arange(10000), np.arange(10000), np.arange(5000)))
A = np.arange(25000)
df = DataFrame({"A": A, "B": B, "C": A, "D": B, "E": np.random.randn(25000)})
left = df.groupby(["A", "B", "C", "D"]).sum()
right = df.groupby(["D", "C", "B", "A"]).sum()
assert len(left) == len(right)
def test_groupby_sort_multi():
df = DataFrame(
{
"a": ["foo", "bar", "baz"],
"b": [3, 2, 1],
"c": [0, 1, 2],
"d": np.random.randn(3),
}
)
tups = [tuple(row) for row in df[["a", "b", "c"]].values]
tups = com.asarray_tuplesafe(tups)
result = df.groupby(["a", "b", "c"], sort=True).sum()
tm.assert_numpy_array_equal(result.index.values, tups[[1, 2, 0]])
tups = [tuple(row) for row in df[["c", "a", "b"]].values]
tups = com.asarray_tuplesafe(tups)
result = df.groupby(["c", "a", "b"], sort=True).sum()
tm.assert_numpy_array_equal(result.index.values, tups)
tups = [tuple(x) for x in df[["b", "c", "a"]].values]
tups = com.asarray_tuplesafe(tups)
result = df.groupby(["b", "c", "a"], sort=True).sum()
tm.assert_numpy_array_equal(result.index.values, tups[[2, 1, 0]])
df = DataFrame(
{"a": [0, 1, 2, 0, 1, 2], "b": [0, 0, 0, 1, 1, 1], "d": np.random.randn(6)}
)
grouped = df.groupby(["a", "b"])["d"]
result = grouped.sum()
def _check_groupby(df, result, keys, field, f=lambda x: x.sum()):
tups = [tuple(row) for row in df[keys].values]
tups = com.asarray_tuplesafe(tups)
expected = f(df.groupby(tups)[field])
for k, v in expected.items():
assert result[k] == v
_check_groupby(df, result, ["a", "b"], "d")
def test_dont_clobber_name_column():
df = DataFrame(
{"key": ["a", "a", "a", "b", "b", "b"], "name": ["foo", "bar", "baz"] * 2}
)
result = df.groupby("key").apply(lambda x: x)
tm.assert_frame_equal(result, df)
def test_skip_group_keys():
tsf = tm.makeTimeDataFrame()
grouped = tsf.groupby(lambda x: x.month, group_keys=False)
result = grouped.apply(lambda x: x.sort_values(by="A")[:3])
pieces = [group.sort_values(by="A")[:3] for key, group in grouped]
expected = pd.concat(pieces)
tm.assert_frame_equal(result, expected)
grouped = tsf["A"].groupby(lambda x: x.month, group_keys=False)
result = grouped.apply(lambda x: x.sort_values()[:3])
pieces = [group.sort_values()[:3] for key, group in grouped]
expected = pd.concat(pieces)
tm.assert_series_equal(result, expected)
def test_no_nonsense_name(float_frame):
# GH #995
s = float_frame["C"].copy()
s.name = None
result = s.groupby(float_frame["A"]).agg(np.sum)
assert result.name is None
def test_multifunc_sum_bug():
# GH #1065
x = DataFrame(np.arange(9).reshape(3, 3))
x["test"] = 0
x["fl"] = [1.3, 1.5, 1.6]
grouped = x.groupby("test")
result = grouped.agg({"fl": "sum", 2: "size"})
assert result["fl"].dtype == np.float64
def test_handle_dict_return_value(df):
def f(group):
return {"max": group.max(), "min": group.min()}
def g(group):
return Series({"max": group.max(), "min": group.min()})
result = df.groupby("A")["C"].apply(f)
expected = df.groupby("A")["C"].apply(g)
assert isinstance(result, Series)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("grouper", ["A", ["A", "B"]])
def test_set_group_name(df, grouper):
def f(group):
assert group.name is not None
return group
def freduce(group):
assert group.name is not None
return group.sum()
def foo(x):
return freduce(x)
grouped = df.groupby(grouper)
# make sure all these work
grouped.apply(f)
grouped.aggregate(freduce)
grouped.aggregate({"C": freduce, "D": freduce})
grouped.transform(f)
grouped["C"].apply(f)
grouped["C"].aggregate(freduce)
grouped["C"].aggregate([freduce, foo])
grouped["C"].transform(f)
def test_group_name_available_in_inference_pass():
# gh-15062
df = pd.DataFrame({"a": [0, 0, 1, 1, 2, 2], "b": np.arange(6)})
names = []
def f(group):
names.append(group.name)
return group.copy()
df.groupby("a", sort=False, group_keys=False).apply(f)
expected_names = [0, 1, 2]
assert names == expected_names
def test_no_dummy_key_names(df):
# see gh-1291
result = df.groupby(df["A"].values).sum()
assert result.index.name is None
result = df.groupby([df["A"].values, df["B"].values]).sum()
assert result.index.names == (None, None)
def test_groupby_sort_multiindex_series():
# series multiindex groupby sort argument was not being passed through
# _compress_group_index
# GH 9444
index = MultiIndex(
levels=[[1, 2], [1, 2]],
codes=[[0, 0, 0, 0, 1, 1], [1, 1, 0, 0, 0, 0]],
names=["a", "b"],
)
mseries = Series([0, 1, 2, 3, 4, 5], index=index)
index = MultiIndex(
levels=[[1, 2], [1, 2]], codes=[[0, 0, 1], [1, 0, 0]], names=["a", "b"]
)
mseries_result = Series([0, 2, 4], index=index)
result = mseries.groupby(level=["a", "b"], sort=False).first()
tm.assert_series_equal(result, mseries_result)
result = mseries.groupby(level=["a", "b"], sort=True).first()
tm.assert_series_equal(result, mseries_result.sort_index())
def test_groupby_reindex_inside_function():
periods = 1000
ind = date_range(start="2012/1/1", freq="5min", periods=periods)
df = DataFrame({"high": np.arange(periods), "low": np.arange(periods)}, index=ind)
def agg_before(hour, func, fix=False):
"""
Run an aggregate func on the subset of data.
"""
def _func(data):
d = data.loc[data.index.map(lambda x: x.hour < 11)].dropna()
if fix:
data[data.index[0]]
if len(d) == 0:
return None
return func(d)
return _func
def afunc(data):
d = data.select(lambda x: x.hour < 11).dropna()
return np.max(d)
grouped = df.groupby(lambda x: datetime(x.year, x.month, x.day))
closure_bad = grouped.agg({"high": agg_before(11, np.max)})
closure_good = grouped.agg({"high": agg_before(11, np.max, True)})
tm.assert_frame_equal(closure_bad, closure_good)
def test_groupby_multiindex_missing_pair():
# GH9049
df = DataFrame(
{
"group1": ["a", "a", "a", "b"],
"group2": ["c", "c", "d", "c"],
"value": [1, 1, 1, 5],
}
)
df = df.set_index(["group1", "group2"])
df_grouped = df.groupby(level=["group1", "group2"], sort=True)
res = df_grouped.agg("sum")
idx = MultiIndex.from_tuples(
[("a", "c"), ("a", "d"), ("b", "c")], names=["group1", "group2"]
)
exp = DataFrame([[2], [1], [5]], index=idx, columns=["value"])
tm.assert_frame_equal(res, exp)
def test_groupby_multiindex_not_lexsorted():
# GH 11640
# define the lexsorted version
lexsorted_mi = MultiIndex.from_tuples(
[("a", ""), ("b1", "c1"), ("b2", "c2")], names=["b", "c"]
)
lexsorted_df = DataFrame([[1, 3, 4]], columns=lexsorted_mi)
assert lexsorted_df.columns.is_lexsorted()
# define the non-lexsorted version
not_lexsorted_df = DataFrame(
columns=["a", "b", "c", "d"], data=[[1, "b1", "c1", 3], [1, "b2", "c2", 4]]
)
not_lexsorted_df = not_lexsorted_df.pivot_table(
index="a", columns=["b", "c"], values="d"
)
not_lexsorted_df = not_lexsorted_df.reset_index()
assert not not_lexsorted_df.columns.is_lexsorted()
# compare the results
tm.assert_frame_equal(lexsorted_df, not_lexsorted_df)
expected = lexsorted_df.groupby("a").mean()
with tm.assert_produces_warning(PerformanceWarning):
result = not_lexsorted_df.groupby("a").mean()
tm.assert_frame_equal(expected, result)
# a transforming function should work regardless of sort
# GH 14776
df = DataFrame(
{"x": ["a", "a", "b", "a"], "y": [1, 1, 2, 2], "z": [1, 2, 3, 4]}
).set_index(["x", "y"])
assert not df.index.is_lexsorted()
for level in [0, 1, [0, 1]]:
for sort in [False, True]:
result = df.groupby(level=level, sort=sort).apply(DataFrame.drop_duplicates)
expected = df
tm.assert_frame_equal(expected, result)
result = (
df.sort_index()
.groupby(level=level, sort=sort)
.apply(DataFrame.drop_duplicates)
)
expected = df.sort_index()
tm.assert_frame_equal(expected, result)
def test_index_label_overlaps_location():
# checking we don't have any label/location confusion in the
# the wake of GH5375
df = DataFrame(list("ABCDE"), index=[2, 0, 2, 1, 1])
g = df.groupby(list("ababb"))
actual = g.filter(lambda x: len(x) > 2)
expected = df.iloc[[1, 3, 4]]
tm.assert_frame_equal(actual, expected)
ser = df[0]
g = ser.groupby(list("ababb"))
actual = g.filter(lambda x: len(x) > 2)
expected = ser.take([1, 3, 4])
tm.assert_series_equal(actual, expected)
# ... and again, with a generic Index of floats
df.index = df.index.astype(float)
g = df.groupby(list("ababb"))
actual = g.filter(lambda x: len(x) > 2)
expected = df.iloc[[1, 3, 4]]
tm.assert_frame_equal(actual, expected)
ser = df[0]
g = ser.groupby(list("ababb"))
actual = g.filter(lambda x: len(x) > 2)
expected = ser.take([1, 3, 4])
tm.assert_series_equal(actual, expected)
def test_transform_doesnt_clobber_ints():
# GH 7972
n = 6
x = np.arange(n)
df = DataFrame({"a": x // 2, "b": 2.0 * x, "c": 3.0 * x})
df2 = DataFrame({"a": x // 2 * 1.0, "b": 2.0 * x, "c": 3.0 * x})
gb = df.groupby("a")
result = gb.transform("mean")
gb2 = df2.groupby("a")
expected = gb2.transform("mean")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"sort_column",
["ints", "floats", "strings", ["ints", "floats"], ["ints", "strings"]],
)
@pytest.mark.parametrize(
"group_column", ["int_groups", "string_groups", ["int_groups", "string_groups"]]
)
def test_groupby_preserves_sort(sort_column, group_column):
# Test to ensure that groupby always preserves sort order of original
# object. Issue #8588 and #9651
df = DataFrame(
{
"int_groups": [3, 1, 0, 1, 0, 3, 3, 3],
"string_groups": ["z", "a", "z", "a", "a", "g", "g", "g"],
"ints": [8, 7, 4, 5, 2, 9, 1, 1],
"floats": [2.3, 5.3, 6.2, -2.4, 2.2, 1.1, 1.1, 5],
"strings": ["z", "d", "a", "e", "word", "word2", "42", "47"],
}
)
# Try sorting on different types and with different group types
df = df.sort_values(by=sort_column)
g = df.groupby(group_column)
def test_sort(x):
tm.assert_frame_equal(x, x.sort_values(by=sort_column))
g.apply(test_sort)
def test_group_shift_with_null_key():
# This test is designed to replicate the segfault in issue #13813.
n_rows = 1200
# Generate a moderately large dataframe with occasional missing
# values in column `B`, and then group by [`A`, `B`]. This should
# force `-1` in `labels` array of `g.grouper.group_info` exactly
# at those places, where the group-by key is partially missing.
df = DataFrame(
[(i % 12, i % 3 if i % 3 else np.nan, i) for i in range(n_rows)],
dtype=float,
columns=["A", "B", "Z"],
index=None,
)
g = df.groupby(["A", "B"])
expected = DataFrame(
[(i + 12 if i % 3 and i < n_rows - 12 else np.nan) for i in range(n_rows)],
dtype=float,
columns=["Z"],
index=None,
)
result = g.shift(-1)
tm.assert_frame_equal(result, expected)
def test_group_shift_with_fill_value():
# GH #24128
n_rows = 24
df = DataFrame(
[(i % 12, i % 3, i) for i in range(n_rows)],
dtype=float,
columns=["A", "B", "Z"],
index=None,
)
g = df.groupby(["A", "B"])
expected = DataFrame(
[(i + 12 if i < n_rows - 12 else 0) for i in range(n_rows)],
dtype=float,
columns=["Z"],
index=None,
)
result = g.shift(-1, fill_value=0)[["Z"]]
tm.assert_frame_equal(result, expected)
def test_group_shift_lose_timezone():
# GH 30134
now_dt = pd.Timestamp.utcnow()
df = DataFrame({"a": [1, 1], "date": now_dt})
result = df.groupby("a").shift(0).iloc[0]
expected = Series({"date": now_dt}, name=result.name)
tm.assert_series_equal(result, expected)
def test_pivot_table_values_key_error():
# This test is designed to replicate the error in issue #14938
df = pd.DataFrame(
{
"eventDate": pd.date_range(datetime.today(), periods=20, freq="M").tolist(),
"thename": range(0, 20),
}
)
df["year"] = df.set_index("eventDate").index.year
df["month"] = df.set_index("eventDate").index.month
with pytest.raises(KeyError, match="'badname'"):
df.reset_index().pivot_table(
index="year", columns="month", values="badname", aggfunc="count"
)
def test_empty_dataframe_groupby():
# GH8093
df = DataFrame(columns=["A", "B", "C"])
result = df.groupby("A").sum()
expected = DataFrame(columns=["B", "C"], dtype=np.float64)
expected.index.name = "A"
tm.assert_frame_equal(result, expected)
def test_tuple_as_grouping():
# https://github.com/pandas-dev/pandas/issues/18314
df = pd.DataFrame(
{
("a", "b"): [1, 1, 1, 1],
"a": [2, 2, 2, 2],
"b": [2, 2, 2, 2],
"c": [1, 1, 1, 1],
}
)
with pytest.raises(KeyError, match=r"('a', 'b')"):
df[["a", "b", "c"]].groupby(("a", "b"))
result = df.groupby(("a", "b"))["c"].sum()
expected = pd.Series([4], name="c", index=pd.Index([1], name=("a", "b")))
tm.assert_series_equal(result, expected)
def test_tuple_correct_keyerror():
# https://github.com/pandas-dev/pandas/issues/18798
df = pd.DataFrame(
1, index=range(3), columns=pd.MultiIndex.from_product([[1, 2], [3, 4]])
)
with pytest.raises(KeyError, match=r"^\(7, 8\)$"):
df.groupby((7, 8)).mean()
def test_groupby_agg_ohlc_non_first():
# GH 21716
df = pd.DataFrame(
[[1], [1]],
columns=["foo"],
index=pd.date_range("2018-01-01", periods=2, freq="D"),
)
expected = pd.DataFrame(
[[1, 1, 1, 1, 1], [1, 1, 1, 1, 1]],
columns=pd.MultiIndex.from_tuples(
(
("foo", "sum", "foo"),
("foo", "ohlc", "open"),
("foo", "ohlc", "high"),
("foo", "ohlc", "low"),
("foo", "ohlc", "close"),
)
),
index=pd.date_range("2018-01-01", periods=2, freq="D"),
)
result = df.groupby(pd.Grouper(freq="D")).agg(["sum", "ohlc"])
tm.assert_frame_equal(result, expected)
def test_groupby_multiindex_nat():
# GH 9236
values = [
(pd.NaT, "a"),
(datetime(2012, 1, 2), "a"),
(datetime(2012, 1, 2), "b"),
(datetime(2012, 1, 3), "a"),
]
mi = pd.MultiIndex.from_tuples(values, names=["date", None])
ser = pd.Series([3, 2, 2.5, 4], index=mi)
result = ser.groupby(level=1).mean()
expected = pd.Series([3.0, 2.5], index=["a", "b"])
tm.assert_series_equal(result, expected)
def test_groupby_empty_list_raises():
# GH 5289
values = zip(range(10), range(10))
df =
|
DataFrame(values, columns=["apple", "b"])
|
pandas.DataFrame
|
"""Module contains test cases for points.py module."""
import unittest
from pyroglancer.points import create_pointinfo, upload_points, annotate_points
from pyroglancer.layers import get_ngserver, _handle_ngdimensions
from pyroglancer.localserver import startdataserver, closedataserver
from pyroglancer.ngviewer import openviewer, closeviewer
import os
import pandas as pd
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Add a common viewer, dataserver(specific port for travis) for each test module..
closeviewer()
closedataserver()
startdataserver(port=8004) # start dataserver..
openviewer(headless=True) # open ngviewer
# def setup_module(module):
# """Start all servers."""
# # Add a common viewer, dataserver for the whole serie of test..
# startdataserver() # start dataserver..
# openviewer(headless=True) # open ngviewer
#
#
# def teardown_module(module):
# """Stop all servers."""
# # Stop all viewers..
# closedataserver()
# closeviewer()
class Testpoints(unittest.TestCase):
"""Test pyroglancer.points."""
# def setUp(self):
# """Perform set up."""
# super(Testsynapses, self).setUp()
#
# def tearDown(self):
# """Perform tearing down."""
# super(Testsynapses, self).tearDown()
def test_create_pointinfo(self):
"""Check if the point info is stored."""
layer_serverdir, layer_host = get_ngserver()
layer_kws = {}
layer_kws['ngspace'] = 'FAFB'
dimensions = _handle_ngdimensions(layer_kws)
create_pointinfo(dimensions, layer_serverdir, 'points')
status = os.path.isfile(os.path.join(
layer_serverdir, 'precomputed/points', 'info'))
assert status
def test_put_pointfile(self):
"""Check if the point file is stored."""
layer_serverdir, layer_host = get_ngserver()
layer_kws = {}
layer_kws['ngspace'] = 'FAFB'
dimensions = _handle_ngdimensions(layer_kws)
layer_name = 'points'
points_path = create_pointinfo(dimensions, layer_serverdir, layer_name)
location_data = [{'x': 5, 'y': 10, 'z': 20}, {'x': 15, 'y': 25, 'z': 30}]
points =
|
pd.DataFrame(location_data)
|
pandas.DataFrame
|
from functools import reduce
import pandas as pd
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
def mergeAllTime(dfs:list[pd.DataFrame]):
''' Layer 1 - not useful?
combines multiple mutlicolumned dataframes.
to support disparate frequencies,
outter join fills in missing values with previous value.
So this isn't really important anymore becauase I realized
it'll not be needed anywhere I think, maybe for the live
updates models and stream but that's for later.
'''
if dfs is pd.DataFrame:
return dfs
if len(dfs) == 0:
return None
if len(dfs) == 1:
return dfs[0]
for df in dfs:
df.index =
|
pd.to_datetime(df.index)
|
pandas.to_datetime
|
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import sys
import unittest
import numpy as np
import pandas as pd
from numba.typed import List
from sklearn.datasets import make_gaussian_quantiles
from sklearn.metrics import classification_report as sklearn_classification_report
from sklearn.metrics import confusion_matrix as sklearn_confusion_matrix
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
from .classification import (
create_numba_list,
classification_report,
metrics_from_confusion_matrix,
confusion_matrix,
plot_classification_report,
)
from .regression import regression_report
logger = logging.getLogger(__name__)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
RANDOM_SEED = 27
class TestMultiClassClassification:
@classmethod
def setup_class(cls):
""""""
# Construct dataset
X1, y1 = make_gaussian_quantiles(
cov=3.0,
n_samples=10000,
n_features=2,
n_classes=5,
random_state=RANDOM_SEED,
)
X1 = pd.DataFrame(X1)
cls.label = pd.Series(y1).to_numpy()
np.random.seed(RANDOM_SEED)
cls.predicted = pd.Series(y1)[
np.random.choice(len(y1), size=len(y1), replace=False)
].to_numpy()
cls.labels = create_numba_list(np.unique(cls.label))
cls.n_labels = len(cls.labels)
df = pd.DataFrame({"label": cls.label, "predicted": cls.predicted})
cls.df_np = df.to_numpy()
cls.matrix = confusion_matrix(
cls.df_np[:, 0], cls.df_np[:, 1], labels=cls.labels, n_labels=cls.n_labels
)
def test_confusion_matrix(self):
sklearn_matrix = sklearn_confusion_matrix(self.label, self.predicted)
assert np.array_equal(self.matrix, sklearn_matrix)
def test_metrics(self):
"""
put into a identically-formatted dictionaries to be able to use == operator
"""
report_sklearn = sklearn_classification_report(
self.label, self.predicted, output_dict=True
)
formatted_report = List()
for key, val in report_sklearn.items():
if key not in ["accuracy", "macro avg", "weighted avg"]:
formatted_report.append(val["precision"])
formatted_report.append(val["recall"])
formatted_report.append(val["f1-score"])
report = metrics_from_confusion_matrix(self.matrix, self.labels)
assert formatted_report == report
def test_confidence_intervals(self):
"""
validate that the lower is always <= upper
and that the point estimate lies within
"""
np.random.seed(RANDOM_SEED)
report = classification_report(self.label, self.predicted)
assert (report["lower"] <= report["upper"]).all()
class TestBinaryClassification:
@classmethod
def setup_class(cls):
""""""
# Construct dataset
X1, y1 = make_gaussian_quantiles(
cov=3.0,
n_samples=10000,
n_features=2,
n_classes=2,
random_state=RANDOM_SEED,
)
cls.label = pd.Series(y1).to_numpy()
np.random.seed(RANDOM_SEED)
cls.predicted =
|
pd.Series(y1)
|
pandas.Series
|
import os
import sys
import pandas
os.makedirs("./filelists_incucyte/", exist_ok=True)
ROOT_FOLDERNAME_WITHIN_SUBMISSION_SYSTEM = ""
#FILENAME = "20210920 Overview CG plates and compounds.xlsx"
#FILENAME = "20220121 Overview CG plates and compounds _consolidated RTG.xlsx"
FILENAME = sys.argv[1]
df_batches = pandas.read_excel(FILENAME, sheet_name="compound batches")
df_compounds = pandas.read_excel(FILENAME, sheet_name="compounds")
df_identifier = df_batches.merge(df_compounds, on="compound ID", how="left", validate="m:1")
df_experiments = pandas.read_excel(FILENAME, sheet_name="experiments")
## do only cv
df_experiments = df_experiments[df_experiments["experiment ID"].str.contains("cv")]
## store expanded compound maps
print("expanding the compound maps...")
compound_map_dict = {}
for see, _ in df_experiments.groupby("compound map see corresponding excel table"):
print(f"Checking the compound map '{see}'...")
df_compound_map = pandas.read_excel(FILENAME, sheet_name=f"compound map {see}")
## expand with lookup-ed ID
for i, s in df_compound_map.iterrows():
#print(i)
#print(s.compound_name)
column_name_for_identification = "compound batch ID"
if pandas.isna(s[column_name_for_identification]):
continue
result = df_identifier.query("`compound batch ID` == '{compound_name}'".format(compound_name= s[column_name_for_identification]))
if type(s[column_name_for_identification]) == int:
result = df_identifier.query("`compound batch ID` == {compound_name}".format(compound_name= s[column_name_for_identification]))
#print(result)
#assert len(result) == 1, (s, result)
if len(result) == 1:
#print(dff.loc[i])
for col in result.columns:
df_compound_map.loc[i, col] = result.squeeze()[col]
else:
print("ERROR: couldn't lookup the compound name '{compound_name}'".format(compound_name= s[column_name_for_identification]))
compound_map_dict.update( {see: df_compound_map})
df_imagings =
|
pandas.read_excel(FILENAME, sheet_name="imaging campaigns")
|
pandas.read_excel
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import seaborn as sns
import json
import argparse
import os
import sys
from typing import List, Dict, Any
from ast import literal_eval
"""
Plot heatmap or violinplot of tactics and vendors
"""
CPE_ID_NORB_ID_PATH = "NORB/original_id_to_norb_id/cpe_id_norb_id.json"
def make_intensity_array(tactics, vendors, tactic_ids, tactic_vendor_products):
intensity_array = np.zeros((len(vendors),len(tactics)))
for tactic in tactic_vendor_products:
for vendor in tactic_vendor_products[tactic]:
products = tactic_vendor_products[tactic][vendor]
num_products = len(products)
intensity_array[vendors.index(vendor)][tactic_ids.index(tactic)] = num_products
return intensity_array
def norb_id_to_cpe_id(NORB_folder_path):
NORB_cpe_id_path = os.path.join(NORB_folder_path, CPE_ID_NORB_ID_PATH)
with open(NORB_cpe_id_path) as f:
cpe_id_norb_id = json.load(f)
norb_id_to_cpe_id = dict()
for cpe_id, norb_id in cpe_id_norb_id.items():
norb_id_to_cpe_id[f"cpe_{norb_id}"] = cpe_id
return norb_id_to_cpe_id
def analyze_tactic_result(vendors, tactic_result, norb_id_to_cpe_id):
df = pd.read_csv(tactic_result, usecols=["tactic", "cpe"])
tactic_vendor_products = dict()
for row_index in df.index[:-1]:
tactic = literal_eval(df["tactic"][row_index]).pop()
cpes = set()
entry = df["cpe"][row_index]
if entry != "set()":
cpes = literal_eval(entry)
cpe_ids = find_cpe_ids(cpes, norb_id_to_cpe_id)
vendor_products = find_vendor_cpes(vendors, cpe_ids)
tactic_vendor_products[tactic] = vendor_products
return tactic_vendor_products
def find_vendor_cpes(vendors, cpe_ids):
vendor_products = dict()
for vendor in vendors:
vendor_products[vendor] = set()
for cpe_id in cpe_ids:
parsed = cpe_id.split(':', 5)
vendor = parsed[3]
product = parsed[4]
if vendor in vendors:
vendor_products[vendor].add(product)
return vendor_products
def find_cpe_ids(cpes, norb_id_to_cpe_id):
cpe_ids = set()
for norb_id in cpes:
cpe_ids.add(norb_id_to_cpe_id[norb_id])
return cpe_ids
def make_heat_map(tactics, vendors, tactic_ids, tactic_search_result, norb_id_to_cpe_id, save_path=None):
plt.rc('font', size=12)
plt.rcParams["font.weight"] = "bold"
plt.rcParams["axes.labelweight"] = "bold"
tactic_vendor_products = analyze_tactic_result(vendors, tactic_search_result, norb_id_to_cpe_id)
intensity_array = make_intensity_array(tactics, vendors, tactic_ids, tactic_vendor_products)
labels = np.asarray([[int(intensity_array[row, col]) for col in range(len(tactics))] for row in range(len(vendors))])
comma_fmt = FuncFormatter(lambda x, p: format(int(x), ','))
heatmap = sns.heatmap(intensity_array, cmap='magma_r', xticklabels=tactics, yticklabels=vendors, annot=labels, fmt='', annot_kws={'size':10}, cbar_kws={'format':comma_fmt})
# heatmap.set_xticklabels(heatmap.get_xticklabels(), rotation=45, horizontalalignment='right')
for t in heatmap.texts:
t.set_text('{:,d}'.format(int(t.get_text())))
heatmap.set(xlabel="Tactics", ylabel="Vendors")
heatmap.tick_params(which='both', width=2)
heatmap.tick_params(which='major', length=7)
heatmap.tick_params(which='minor', length=4)
b, t = plt.ylim()
b += 0.5
t -= 0.5
plt.ylim(b, t)
plt.tight_layout()
fig = heatmap.get_figure()
if save_path is None:
plt.show()
else:
fig.savefig(save_path, dpi=400)
def cve_to_risk(cve_summary):
cve_to_risk_dict = dict()
df = pd.read_csv(cve_summary, usecols=["node_name", "metadata"])
for row_index in df.index:
cve = df["node_name"][row_index]
metadata = literal_eval(df["metadata"][row_index])
risk_score = metadata["weight"]
cve_to_risk_dict[cve] = risk_score
return cve_to_risk_dict
# Violin plots for vendor applications that reach a specific tactic
def max_cve_risk_violin_tactic_helper(tactic, vendors, vendor_search_result_folder, cve_to_risk_dict,
tactic_search_result, norb_id_to_cpe_id):
tactic_vendor_products = analyze_tactic_result(vendors, tactic_search_result, norb_id_to_cpe_id)
vendor_products = tactic_vendor_products[tactic] # dict of vendors to set of their products
vendor_to_risk_score = dict()
for vendor in vendors:
VENDOR_SEARCH_RESULT_PATH = os.path.join(vendor_search_result_folder, "search_result_" + vendor + ".csv")
df = pd.read_csv(VENDOR_SEARCH_RESULT_PATH, usecols=["tactic", "cve"])
risk_score_list = []
for row_index in df.index[:-1]:
tactics = set()
tactics_entry = df["tactic"][row_index]
if tactics_entry != "set()":
tactics = literal_eval(tactics_entry)
cves = set()
cves_entry = df["cve"][row_index]
if cves_entry != "set()":
cves = literal_eval(cves_entry)
if tactic in tactics:
max_cve_risk = None
for cve in cves:
cve_risk = cve_to_risk_dict[cve]
if max_cve_risk is None:
max_cve_risk = cve_risk
elif max_cve_risk < cve_risk:
max_cve_risk = cve_risk
risk_score_list.append(max_cve_risk)
vendor_to_risk_score[vendor] = risk_score_list
return vendor_to_risk_score
def max_cve_risk_violin_tactic(tactic_names, tactic_ids, vendors, vendor_search_result_folder,
cve_to_risk_dict, tactic_search_result, norb_id_to_cpe_id, save_path=None, stick=False):
tactic1_name, tactic2_name = tactic_names
tactic1_id, tactic2_id = tactic_ids
plt.rcParams['figure.figsize'] = (20.0, 12.0)
plt.rcParams["font.weight"] = "bold"
plt.rcParams["axes.labelweight"] = "bold"
sns.set(font_scale=3)
sns.set_style("ticks")
vendor_to_risk_score_dict1 = max_cve_risk_violin_tactic_helper(tactic1_id, vendors, vendor_search_result_folder,
cve_to_risk_dict, tactic_search_result, norb_id_to_cpe_id)
vendor_to_risk_score_dict2 = max_cve_risk_violin_tactic_helper(tactic2_id, vendors, vendor_search_result_folder,
cve_to_risk_dict, tactic_search_result, norb_id_to_cpe_id)
combined_data = {'Tactic': [], 'Vendor': [], 'CVSS Scores': []}
for vendor, risk_score_list in vendor_to_risk_score_dict1.items():
for risk_score in risk_score_list:
combined_data['Tactic'].append(tactic1_name)
combined_data['Vendor'].append(vendor)
combined_data['CVSS Scores'].append(risk_score)
for vendor, risk_score_list in vendor_to_risk_score_dict2.items():
for risk_score in risk_score_list:
combined_data['Tactic'].append(tactic2_name)
combined_data['Vendor'].append(vendor)
combined_data['CVSS Scores'].append(risk_score)
vendor_to_risk_score_df = pd.DataFrame(combined_data)
if stick:
p = sns.violinplot(data=vendor_to_risk_score_df, x='Vendor', y='CVSS Scores', hue='Tactic', split=True, inner="stick")
else:
p = sns.violinplot(data=vendor_to_risk_score_df, x='Vendor', y='CVSS Scores', hue='Tactic', split=True)
p.tick_params(which='both', width=4)
p.tick_params(which='major', length=14)
plt.legend(fontsize=30)
p.set(xlabel="Vendors", ylabel="CVSS Scores")
plt.xticks(rotation=45)
plt.ylim(-1.5, 11.5)
plt.tight_layout()
if save_path is None:
plt.show()
else:
plt.savefig(save_path, dpi=400)
# Violin plots for vendor applications that reach all of their tactics
def max_cve_risk_violin_helper(vendors, vendor_search_result_folder, cve_to_risk_dict):
vendor_to_risk_score = dict()
for vendor in vendors:
VENDOR_SEARCH_RESULT_PATH = os.path.join(vendor_search_result_folder, "search_result_" + vendor + ".csv")
df =
|
pd.read_csv(VENDOR_SEARCH_RESULT_PATH, usecols=["cve"])
|
pandas.read_csv
|
import requests
import json
import datetime
import sys
from dateutil.parser import parse as to_datetime
try:
import pandas as pd
except:
pass
from pyteamup.utils.utilities import *
from pyteamup.utils.constants import *
from pyteamup.Event import Event
class Calendar:
def __init__(self, cal_id, api_key):
self.__calendar_id = cal_id
self.__api_key = api_key
self.__cal_base = f'/{cal_id}'
self.__token_str = f'?_teamup_token={self.api_key}'
self.__subcalendars = None
self.__valid_api = None
self.__configuration = None
self._base_url = BASE_URL + self.__cal_base
self._event_collection_url = self._base_url + EVENTS_BASE + self.__token_str
self._subcalendars_url = self._base_url + SUBCALENDARS_BASE + self.__token_str
self._check_access_url = BASE_URL + CHECK_ACCESS_BASE + self.__token_str
self.events_json = None
if not self.valid_api:
raise Exception(f'Invalid Api Key: {self.api_key}')
def __str__(self):
return self.calendar_id
@property
def api_key(self):
return self.__api_key
@property
def calendar_id(self):
return self.__calendar_id
@property
def valid_api(self):
"""Makes a request to the calendar to see if the api is valid"""
if not self.__valid_api:
req = requests.get(self._check_access_url)
try:
check_status_code(req.status_code)
self.__valid_api = True
except:
self.__valid_api = False
return self.__valid_api
else:
return None
@property
def configuration(self):
if self.__configuration is None:
print('Fetching configuration')
req = requests.get(self._base_url + CONFIGURATION_BASE + self.__token_str)
check_status_code(req.status_code)
self.__configuration = json.loads(req.text)['configuration']
return self.__configuration
@property
def subcalendars(self):
if not self.__subcalendars:
print('Fetching Subcalendars')
req = requests.get(self._subcalendars_url)
check_status_code(req.status_code)
self.__subcalendars = json.loads(req.text)['subcalendars']
return self.__subcalendars
def clear_calendar_cache(self):
self.__subcalendars = None
self.__configuration = None
def get_event_collection(self, start_dt=None, end_dt=None, subcal_id=None, returnas='events', markdown=False):
"""
Method allows bulk fetching of events that fall between the provided time frame. If None is provided then
the current date -30 and +180 days is used.
:param start_dt: if set as None then set as today minus 30 days
:param end_dt: if left as None then set as today plus 180 days
:param subcal_id: optional str or list-like if a different calendar should be queried
:return: json of events
"""
if returnas not in ('events', 'dataframe', 'dict'):
raise TypeError('Returnas not recognized. Recognized values: event, series, dict')
if start_dt is None:
start_dt = datetime.date.today() - datetime.timedelta(30)
if end_dt is None:
end_dt = datetime.date.today() + datetime.timedelta(180)
subcal_par = ''
if subcal_id:
if isinstance(subcal_id, (list, tuple)):
for id in subcal_id:
subcal_par += f'&subcalendarId[]={id}'
else:
subcal_par = f'&subcalendarId[]={subcal_id}'
if markdown == True:
para_markdown = '&format[]=markdown'
else:
para_markdown = ''
parameters = f'&startDate={start_dt.strftime("%Y-%m-%d")}&endDate={end_dt.strftime("%Y-%m-%d")}' + subcal_par + para_markdown
req = requests.get(self._event_collection_url + parameters)
check_status_code(req.status_code)
self.events_json = json.loads(req.text)['events']
if returnas == 'events':
return [Event(self, **event_dict) for event_dict in self.events_json]
elif returnas == 'dataframe' and 'pandas' in sys.modules:
return pd.DataFrame.from_records(self.events_json)
else:
return self.events_json
def _create_event_from_json(self, payload):
""" Lazy Creation of Event by passing a formatted payload"""
resp = requests.post(self._event_collection_url, data=payload, headers=POST_HEADERS)
try:
check_status_code(resp.status_code)
except:
print(payload)
print(resp.text)
raise
return resp.text
def get_event(self, event_id, returnas='event'):
if returnas not in ('event', 'series', 'dict'):
raise TypeError('Returnas not recognized. Recognized values: event, series, dict')
url = self._base_url + EVENTS_BASE + f'/{event_id}' + self.__token_str
resp = requests.get(url)
check_status_code(resp.status_code)
event_dict = json.loads(resp.text)['event']
if returnas == 'event':
return Event(self, **event_dict)
elif returnas == 'series' and 'pandas' in sys.modules:
return pd.Series(event_dict)
else:
return event_dict
def get_subcalendar(self):
raise NotImplementedError
def search_events(self):
raise NotImplementedError
def get_changed_events(self, modified_since, returnas='event'):
"""
Get changed events since given unix time
:param modified_since: <int> Unix timestamp, must be less than 30 days old
:param returnas: <str> `event` `series` `dict` are valid options
:return: Tuple of event list and returned timestamp
"""
if returnas not in ('event', 'series', 'dict'):
raise TypeError('Returnas not recognized. Recognized values: event, series, dict')
url = self._base_url + EVENTS_BASE + self.__token_str + '&modifiedSince=' + str(modified_since)
resp = requests.get(url)
check_status_code(resp.status_code)
events_json = json.loads(resp.text)['events']
timestamp = json.loads(resp.text)['timestamp']
if returnas == 'events':
return [Event(self, **event_dict) for event_dict in events_json], timestamp
elif returnas == 'dataframe' and 'pandas' in sys.modules:
return
|
pd.DataFrame.from_records(events_json)
|
pandas.DataFrame.from_records
|
import numpy as np
import pandas as pd
from datetime import datetime
import pytest
import empyrical
import vectorbt as vbt
from vectorbt import settings
from tests.utils import isclose
day_dt = np.timedelta64(86400000000000)
ts = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [5, 4, 3, 2, 1],
'c': [1, 2, 3, 2, 1]
}, index=pd.DatetimeIndex([
datetime(2018, 1, 1),
datetime(2018, 1, 2),
datetime(2018, 1, 3),
datetime(2018, 1, 4),
datetime(2018, 1, 5)
]))
ret = ts.pct_change()
settings.returns['year_freq'] = '252 days' # same as empyrical
seed = 42
np.random.seed(seed)
benchmark_rets = pd.DataFrame({
'a': ret['a'] * np.random.uniform(0.8, 1.2, ret.shape[0]),
'b': ret['b'] * np.random.uniform(0.8, 1.2, ret.shape[0]) * 2,
'c': ret['c'] * np.random.uniform(0.8, 1.2, ret.shape[0]) * 3
})
# ############# accessors.py ############# #
class TestAccessors:
def test_freq(self):
assert ret.vbt.returns.wrapper.freq == day_dt
assert ret['a'].vbt.returns.wrapper.freq == day_dt
assert ret.vbt.returns(freq='2D').wrapper.freq == day_dt * 2
assert ret['a'].vbt.returns(freq='2D').wrapper.freq == day_dt * 2
assert pd.Series([1, 2, 3]).vbt.returns.wrapper.freq is None
assert pd.Series([1, 2, 3]).vbt.returns(freq='3D').wrapper.freq == day_dt * 3
assert pd.Series([1, 2, 3]).vbt.returns(freq=np.timedelta64(4, 'D')).wrapper.freq == day_dt * 4
def test_ann_factor(self):
assert ret['a'].vbt.returns(year_freq='365 days').ann_factor == 365
assert ret.vbt.returns(year_freq='365 days').ann_factor == 365
with pytest.raises(Exception) as e_info:
assert pd.Series([1, 2, 3]).vbt.returns(freq=None).ann_factor
def test_from_price(self):
pd.testing.assert_series_equal(pd.Series.vbt.returns.from_price(ts['a']).obj, ts['a'].pct_change())
pd.testing.assert_frame_equal(pd.DataFrame.vbt.returns.from_price(ts).obj, ts.pct_change())
assert pd.Series.vbt.returns.from_price(ts['a'], year_freq='365 days').year_freq == pd.to_timedelta('365 days')
assert
|
pd.DataFrame.vbt.returns.from_price(ts, year_freq='365 days')
|
pandas.DataFrame.vbt.returns.from_price
|
import re
import numpy as np
import pandas as pd
from arc._common import prob_metric_cal
import matchzoo as mz
from arc.anmm_impl import anmm_train
from arc.arci_impl import arci_train
from arc.arcii_impl import arcii_train
from arc.bimpm_impl import bimpm_train
from arc.cdssm_impl import cdssm_train
from arc.conv_knrm_impl import conv_knrm_train
from arc.diin_impl import diin_train
from arc.drmm_impl import drmm_train
from arc.drmmtks_impl import drmmtks_train
from arc.dssm_impl import dssm_train
from arc.duet_impl import duet_train
from arc.esim_impl import esim_train
from arc.hbmp_impl import hbmp_train
from arc.knrm_impl import knrm_train
from arc.match_lstm_impl import match_lstm_train
from arc.match_pyramid_impl import match_pyramid_train
from arc.match_srnn_impl import match_srnn_train
from arc.mv_lstm_impl import mv_lstm_train
from utils.util_params import arc_params_control
def trans_text(str_data_list):
res = []
for str_data in str_data_list:
str_list = re.findall('\d+', str_data)
num_list = list(map(int, str_list))
num_arr = np.array(num_list, dtype=np.float32)
res.append(num_arr)
print('Shape of text: ', np.array(res).shape)
return res
def trans_ngram(str_data_list, ngram=3):
res = []
for str_data in str_data_list:
str_list = re.findall('\d+', str_data)
num_list = list(map(int, str_list))
num_arr = []
for i in range(len(num_list)):
if i < len(num_list) - ngram + 1:
gram = num_list[i: i + ngram]
else:
gram = num_list[i: len(num_list)] + [0] * (ngram - (len(num_list) - i))
num_arr.append(gram)
res.append(np.array(num_arr, dtype=np.float))
print('Shape of n-gram: ', np.array(res).shape)
return res
def trans_hist(str_data_list_left, str_data_list_right, bin_size):
res_left = trans_ngram(str_data_list_left, 5)
res_right = trans_ngram(str_data_list_right, 5)
res_len = len(res_right[0])
for left_text, right_text in zip(res_left, res_right):
for i in range(res_len):
score_list = []
for j in range(res_len):
score = np.dot(left_text[i], right_text[j]) / (np.linalg.norm(left_text[i]) * (np.linalg.norm(right_text[j])))
score_list.append(score)
# print('Shape of n-gram: ', np.array(res).shape)
# return res
def trans_pd(file_name, arc, params):
pd_data = pd.read_csv(file_name)
id_left_list = pd_data['id_left'].values
text_left_list = trans_text(pd_data['text_left'].values)
length_left_list = list(map(int, pd_data['length_left'].values))
id_right_list = pd_data['id_right'].values
text_right_list = trans_text(pd_data['text_right'].values)
length_right_list = list(map(int, pd_data['length_right'].values))
label_list = list(map(float, pd_data['label'].values))
if arc == 'dssm':
# ngram_left_list = trans_ngram(pd_data['text_left'].values, params['ngram'])
# ngram_right_list = trans_ngram(pd_data['text_right'].values, params['ngram'])
data = {'id_left':
|
pd.Series(id_left_list)
|
pandas.Series
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 20 01:10:22 2021
@author: mohit
"""
#%%
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.decomposition import PCA
import prince
#%%
df=
|
pd.read_csv('stackoverflow_data.csv')
|
pandas.read_csv
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.